diff --git a/push-validator-manager/README.md b/push-validator-manager/README.md
new file mode 100644
index 00000000..7c276fb8
--- /dev/null
+++ b/push-validator-manager/README.md
@@ -0,0 +1,129 @@
+# Push Validator Manager
+
+**Fast validator setup for Push Chain**
+
+## ๐ Quick Start (1-2 minutes)
+
+### Step 1: Install & Start
+```bash
+curl -fsSL https://get.push.network/node/install.sh | bash
+```
+Automatically installs and starts your validator using state sync (no full sync needed).
+
+> **Note:** Restart terminal or run `source ~/.bashrc` to use `push-validator` from anywhere.
+
+### Step 2: Verify Sync
+```bash
+push-validator status
+```
+Wait for: `โ
Catching Up: false` (takes ~1-2 minutes with state sync)
+
+### Step 3: Register Validator
+```bash
+push-validator register-validator
+```
+**Requirements:** 2+ PC tokens from [faucet](https://faucet.push.org)
+
+**Done! Your validator is running with automatic recovery enabled! ๐**
+
+## ๐ Dashboard
+
+Monitor your validator in real-time with an interactive dashboard:
+
+```bash
+push-validator dashboard
+```
+
+**Features:**
+- **Node Status** - Process state, RPC connectivity, resource usage (CPU, memory, disk)
+- **Chain Sync** - Real-time block height, sync progress with ETA, network latency
+- **Validator Metrics** - Bonding status, voting power, commission rate, accumulated rewards
+- **Network Overview** - Connected peers, chain ID, active validators list
+- **Live Logs** - Stream node activity with search and filtering
+- **Auto-Refresh** - Updates every 2 seconds for real-time monitoring
+
+The dashboard provides everything you need to monitor validator health and performance at a glance.
+
+## ๐ Commands
+
+### Core
+```bash
+push-validator start # Start with state sync (2-3 min)
+push-validator stop # Stop node
+push-validator status # Check sync & validator status
+push-validator dashboard # Live interactive monitoring dashboard
+push-validator register-validator # Register as validator
+push-validator logs # View logs
+```
+
+### Validator Operations
+```bash
+push-validator increase-stake # Increase validator stake and voting power
+push-validator unjail # Restore jailed validator to active status
+push-validator withdraw-rewards # Withdraw validator rewards and commission
+push-validator restake # Auto-withdraw and restake all rewards to increase validator power
+```
+
+### Monitoring
+```bash
+push-validator sync # Monitor sync progress
+push-validator peers # Show peer connections (from local RPC)
+push-validator doctor # Run diagnostic checks on validator setup
+```
+
+### Management
+```bash
+push-validator restart # Restart node
+push-validator validators # List validators (supports --output json)
+push-validator balance # Check balance (defaults to validator key)
+push-validator reset # Reset chain data (keeps address book)
+push-validator full-reset # โ ๏ธ Complete reset (deletes ALL keys and data)
+push-validator backup # Backup config and validator state
+```
+
+## โก Features
+
+- **State Sync**: 1-2 minute setup (no full blockchain download)
+- **Interactive Logs**: Real-time log viewer with search and filtering
+- **Smart Detection**: Monitors for sync stalls and network issues
+- **Reliable Snapshots**: Uses trusted RPC nodes for recovery
+- **Multiple Outputs**: JSON, YAML, or text format support
+
+## ๐ Network
+
+- **Chain**: `push_42101-1` (Testnet)
+- **Min Stake**: 1.5 PC
+- **Faucet**: https://faucet.push.org
+- **Explorer**: https://donut.push.network
+
+
+## ๐ง Advanced Setup (Optional)
+
+### Setup NGINX with SSL
+```bash
+bash scripts/setup-nginx.sh yourdomain.com
+```
+**Creates:**
+- `https://yourdomain.com` - Cosmos RPC endpoint
+- `https://evm.yourdomain.com` - EVM RPC endpoint
+- Automatic SSL certificates via Let's Encrypt
+- Rate limiting and security headers
+
+**Requirements:**
+- Domain pointing to your server IP
+- Ports 80/443 open
+- Ubuntu/Debian system
+
+### Log Rotation
+```bash
+bash scripts/setup-log-rotation.sh
+```
+Configures daily rotation with 14-day retention and compression.
+
+### File Locations
+- **Manager**: `~/.local/bin/push-validator`
+- **Binary**: `~/.local/bin/pchaind`
+- **Config**: `~/.pchain/config/`
+- **Data**: `~/.pchain/data/`
+- **Logs**: `~/.pchain/logs/pchaind.log`
+- **Backups**: `~/push-node-backups/`
diff --git a/push-validator-manager/cmd/push-validator/cmd_backup.go b/push-validator-manager/cmd/push-validator/cmd_backup.go
new file mode 100644
index 00000000..bf2eb9a0
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_backup.go
@@ -0,0 +1,20 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/admin"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+)
+
+// handleBackup creates a backup archive of the node configuration and
+// prints the resulting path, or a JSON object when --output=json.
+func handleBackup(cfg config.Config) error {
+ path, err := admin.Backup(admin.BackupOptions{HomeDir: cfg.HomeDir})
+ if err != nil {
+ if flagOutput == "json" { getPrinter().JSON(map[string]any{"ok": false, "error": err.Error()}) } else { getPrinter().Error(fmt.Sprintf("backup error: %v", err)) }
+ return err
+ }
+ if flagOutput == "json" { getPrinter().JSON(map[string]any{"ok": true, "backup_path": path}) } else { getPrinter().Success(fmt.Sprintf("backup created: %s", path)) }
+ return nil
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_balance.go b/push-validator-manager/cmd/push-validator/cmd_balance.go
new file mode 100644
index 00000000..980e4dc5
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_balance.go
@@ -0,0 +1,55 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+)
+
+// handleBalance prints an account balance. It resolves the address from
+// either a positional argument or KEY_NAME when --address/arg is omitted.
+// When --output=json is set, it emits a structured object.
+func handleBalance(cfg config.Config, args []string) error {
+ var addr string
+ if len(args) > 0 { addr = args[0] }
+ if addr == "" {
+ key := os.Getenv("KEY_NAME")
+ if key == "" {
+ if flagOutput == "json" { getPrinter().JSON(map[string]any{"ok": false, "error": "address not provided; set KEY_NAME or pass --address"}) } else { fmt.Println("usage: push-validator balance
(or set KEY_NAME)") }
+ return fmt.Errorf("address not provided")
+ }
+ out, err := exec.Command(findPchaind(), "keys", "show", key, "-a", "--keyring-backend", cfg.KeyringBackend, "--home", cfg.HomeDir).Output()
+ if err != nil {
+ if flagOutput == "json" { getPrinter().JSON(map[string]any{"ok": false, "error": err.Error()}) } else { fmt.Printf("resolve address error: %v\n", err) }
+ return fmt.Errorf("resolve address: %w", err)
+ }
+ addr = strings.TrimSpace(string(out))
+ }
+
+ // Convert hex address (0x...) to bech32 if needed
+ if strings.HasPrefix(addr, "0x") || strings.HasPrefix(addr, "0X") {
+ bech32Addr, convErr := hexToBech32Address(addr)
+ if convErr != nil {
+ if flagOutput == "json" { getPrinter().JSON(map[string]any{"ok": false, "error": convErr.Error(), "address": addr}) } else { getPrinter().Error(fmt.Sprintf("address conversion error: %v", convErr)) }
+ return convErr
+ }
+ addr = bech32Addr
+ }
+
+ v := validator.NewWith(validator.Options{BinPath: findPchaind(), HomeDir: cfg.HomeDir, ChainID: cfg.ChainID, Keyring: cfg.KeyringBackend, GenesisDomain: cfg.GenesisDomain, Denom: cfg.Denom})
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ bal, err := v.Balance(ctx, addr)
+ if err != nil {
+ if flagOutput == "json" { getPrinter().JSON(map[string]any{"ok": false, "error": err.Error(), "address": addr}) } else { getPrinter().Error(fmt.Sprintf("balance error: %v", err)) }
+ return err
+ }
+ if flagOutput == "json" { getPrinter().JSON(map[string]any{"ok": true, "address": addr, "balance": bal, "denom": cfg.Denom}) } else { getPrinter().Info(fmt.Sprintf("%s %s", bal, cfg.Denom)) }
+ return nil
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_dashboard.go b/push-validator-manager/cmd/push-validator/cmd_dashboard.go
new file mode 100644
index 00000000..aa09da4f
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_dashboard.go
@@ -0,0 +1,157 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/dashboard"
+)
+
+// dashboardCmd provides an interactive TUI dashboard for monitoring validator status
+func createDashboardCmd() *cobra.Command {
+ var (
+ refreshInterval time.Duration
+ rpcTimeout time.Duration
+ debugMode bool
+ )
+
+ cmd := &cobra.Command{
+ Use: "dashboard",
+ Short: "Interactive dashboard for monitoring validator status",
+ Long: `Launch an interactive terminal dashboard showing real-time validator metrics:
+
+ โข Node process status (running/stopped, PID, version)
+ โข Chain sync progress with ETA calculation
+ โข Network connectivity (peers, latency)
+ โข Validator consensus power and status
+
+The dashboard auto-refreshes every 2 seconds by default. Press '?' for help.
+
+For non-interactive environments (CI/pipes), dashboard automatically falls back
+to a static text snapshot.`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ // Load configuration
+ cfg := loadCfg()
+
+ // Build dashboard options
+ opts := dashboard.Options{
+ Config: cfg,
+ RefreshInterval: refreshInterval,
+ RPCTimeout: rpcTimeout,
+ NoColor: flagNoColor,
+ NoEmoji: flagNoEmoji,
+ Debug: debugMode,
+ }
+ opts = normalizeDashboardOptions(opts)
+
+ // Check if we're in a TTY environment
+ isTTY := term.IsTerminal(int(os.Stdout.Fd()))
+
+ if !isTTY {
+ // Non-TTY environment (CI/pipes): use static mode
+ if debugMode {
+ fmt.Fprintln(os.Stderr, "Debug: Non-TTY detected, using static mode")
+ }
+ return runDashboardStatic(cmd.Context(), opts)
+ }
+
+ // TTY environment: use interactive Bubble Tea dashboard
+ if debugMode {
+ fmt.Fprintln(os.Stderr, "Debug: TTY detected, using interactive mode")
+ }
+ return runDashboardInteractive(opts)
+ },
+ }
+
+ // Flags
+ cmd.Flags().DurationVar(&refreshInterval, "refresh-interval", 2*time.Second, "Dashboard refresh interval")
+ cmd.Flags().DurationVar(&rpcTimeout, "rpc-timeout", 15*time.Second, "RPC request timeout")
+ cmd.Flags().BoolVar(&debugMode, "debug", false, "Enable debug mode for troubleshooting")
+
+ return cmd
+}
+
+// runDashboardStatic performs a single fetch and prints static output for non-TTY
+func runDashboardStatic(ctx context.Context, opts dashboard.Options) error {
+ // Print debug info BEFORE dashboard output
+ if opts.Debug {
+ fmt.Fprintln(os.Stderr, "Debug: Starting dashboard...")
+ fmt.Fprintf(os.Stderr, "Debug: Config loaded - HomeDir: %s, RPC: %s\n", opts.Config.HomeDir, opts.Config.RPCLocal)
+ fmt.Fprintln(os.Stderr, "Debug: Running in static mode")
+ fmt.Fprintln(os.Stderr, "---") // Separator
+ }
+
+ d := dashboard.New(opts)
+
+ // Apply RPC timeout to context (prevents hung RPCs in CI/pipes)
+ ctx, cancel := context.WithTimeout(ctx, opts.RPCTimeout)
+ defer cancel()
+
+ // Fetch data once with timeout
+ data, err := d.FetchDataOnce(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to fetch dashboard data: %w", err)
+ }
+
+ // Render static text snapshot to stdout
+ fmt.Print(d.RenderStatic(data))
+ return nil
+}
+
+// runDashboardInteractive launches the Bubble Tea TUI program
+func runDashboardInteractive(opts dashboard.Options) error {
+ d := dashboard.New(opts)
+ if d == nil {
+ return fmt.Errorf("failed to create dashboard instance")
+ }
+
+ // Create Bubble Tea program with proper TTY configuration
+ // Key fix: Use stdin/stdout explicitly instead of /dev/tty
+ p := tea.NewProgram(
+ d,
+ tea.WithAltScreen(), // Use alternate screen buffer (clean display)
+ tea.WithInput(os.Stdin), // Use stdin instead of trying to open /dev/tty
+ tea.WithOutput(os.Stdout), // Use stdout instead of trying to open /dev/tty
+ )
+
+ // Run program - blocks until quit
+ if _, err := p.Run(); err != nil {
+ // If TTY error, fall back to static mode
+ if strings.Contains(err.Error(), "tty") || strings.Contains(err.Error(), "device not configured") {
+ if opts.Debug {
+ fmt.Fprintf(os.Stderr, "Debug: TTY error, falling back to static mode: %v\n", err)
+ }
+ return runDashboardStatic(context.Background(), opts)
+ }
+ return fmt.Errorf("dashboard error: %w", err)
+ }
+
+ return nil
+}
+
+// normalizeDashboardOptions applies default refresh/timeout values to keep behaviour
+// consistent between interactive and static dashboard modes.
+func normalizeDashboardOptions(opts dashboard.Options) dashboard.Options {
+ if opts.RefreshInterval <= 0 {
+ opts.RefreshInterval = 2 * time.Second
+ }
+ if opts.RPCTimeout <= 0 {
+ // Default to 15s but cap at twice the refresh interval so the UI remains responsive.
+ timeout := 15 * time.Second
+ if opts.RefreshInterval > 0 {
+ candidate := 2 * opts.RefreshInterval
+ if candidate < timeout {
+ timeout = candidate
+ }
+ }
+ opts.RPCTimeout = timeout
+ }
+ return opts
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_doctor.go b/push-validator-manager/cmd/push-validator/cmd_doctor.go
new file mode 100644
index 00000000..b4574857
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_doctor.go
@@ -0,0 +1,362 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/exitcodes"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/node"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/process"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+ "github.com/spf13/cobra"
+)
+
+var doctorCmd = &cobra.Command{
+ Use: "doctor",
+ Short: "Run diagnostic checks on validator setup",
+ Long: `Performs comprehensive health checks on your validator setup including:
+- Process status and accessibility
+- Configuration file validity
+- Network connectivity (RPC, P2P, remote endpoints)
+- Disk space and permissions
+- Common configuration issues`,
+ SilenceUsage: true,
+ SilenceErrors: true,
+ RunE: runDoctor,
+}
+
+type checkResult struct {
+ Name string
+ Status string // "pass", "warn", "fail"
+ Message string
+ Details []string
+}
+
+func runDoctor(cmd *cobra.Command, args []string) error {
+ cfg := config.Load()
+ if flagHome != "" {
+ cfg.HomeDir = flagHome
+ }
+
+ c := ui.NewColorConfigFromGlobal()
+ results := []checkResult{}
+
+ // Header
+ fmt.Println(c.Header(" VALIDATOR HEALTH CHECK "))
+ fmt.Println()
+
+ // Run all diagnostic checks
+ results = append(results, checkProcessRunning(cfg, c))
+ results = append(results, checkRPCAccessible(cfg, c))
+ results = append(results, checkConfigFiles(cfg, c))
+ results = append(results, checkP2PPeers(cfg, c))
+ results = append(results, checkRemoteConnectivity(cfg, c))
+ results = append(results, checkDiskSpace(cfg, c))
+ results = append(results, checkPermissions(cfg, c))
+ results = append(results, checkSyncStatus(cfg, c))
+
+ // Summary
+ fmt.Println()
+ fmt.Println(c.Separator(60))
+
+ passed := 0
+ warned := 0
+ failed := 0
+
+ for _, r := range results {
+ switch r.Status {
+ case "pass":
+ passed++
+ case "warn":
+ warned++
+ case "fail":
+ failed++
+ }
+ }
+
+ summary := fmt.Sprintf("Checks: %d passed, %d warnings, %d failed", passed, warned, failed)
+ if failed > 0 {
+ fmt.Println(c.Error("โ " + summary))
+ return exitcodes.ValidationErr("")
+ } else if warned > 0 {
+ fmt.Println(c.Warning("โ " + summary))
+ } else {
+ fmt.Println(c.Success("โ " + summary))
+ }
+
+ return nil
+}
+
+func checkProcessRunning(cfg config.Config, c *ui.ColorConfig) checkResult {
+ sup := process.New(cfg.HomeDir)
+ running := sup.IsRunning()
+
+ result := checkResult{Name: "Process Status"}
+
+ if running {
+ if pid, ok := sup.PID(); ok {
+ result.Status = "pass"
+ result.Message = fmt.Sprintf("Validator process running (PID %d)", pid)
+ } else {
+ result.Status = "pass"
+ result.Message = "Validator process running"
+ }
+ } else {
+ result.Status = "fail"
+ result.Message = "Validator process not running"
+ result.Details = []string{"Run 'push-validator start' to start the node"}
+ }
+
+ printCheck(result, c)
+ return result
+}
+
+func checkRPCAccessible(cfg config.Config, c *ui.ColorConfig) checkResult {
+ rpc := cfg.RPCLocal
+ if rpc == "" {
+ rpc = "http://127.0.0.1:26657"
+ }
+
+ result := checkResult{Name: "RPC Accessibility"}
+
+ hostport := "127.0.0.1:26657"
+ if u, err := url.Parse(rpc); err == nil && u.Host != "" {
+ hostport = u.Host
+ }
+
+ if process.IsRPCListening(hostport, 500*time.Millisecond) {
+ result.Status = "pass"
+ result.Message = fmt.Sprintf("RPC listening on %s", hostport)
+ } else {
+ result.Status = "fail"
+ result.Message = fmt.Sprintf("RPC not accessible at %s", hostport)
+ result.Details = []string{
+ "Check if the node is running",
+ "Verify firewall rules allow local connections",
+ "Check config.toml for correct RPC settings",
+ }
+ }
+
+ printCheck(result, c)
+ return result
+}
+
+func checkConfigFiles(cfg config.Config, c *ui.ColorConfig) checkResult {
+ result := checkResult{Name: "Configuration Files"}
+
+ configPath := filepath.Join(cfg.HomeDir, "config", "config.toml")
+ genesisPath := filepath.Join(cfg.HomeDir, "config", "genesis.json")
+
+ missing := []string{}
+ if _, err := os.Stat(configPath); os.IsNotExist(err) {
+ missing = append(missing, "config.toml")
+ }
+ if _, err := os.Stat(genesisPath); os.IsNotExist(err) {
+ missing = append(missing, "genesis.json")
+ }
+
+ if len(missing) > 0 {
+ result.Status = "fail"
+ result.Message = fmt.Sprintf("Missing configuration files: %s", strings.Join(missing, ", "))
+ result.Details = []string{"Run 'push-validator init' to initialize configuration"}
+ } else {
+ result.Status = "pass"
+ result.Message = "All required configuration files present"
+ }
+
+ printCheck(result, c)
+ return result
+}
+
+func checkP2PPeers(cfg config.Config, c *ui.ColorConfig) checkResult {
+ result := checkResult{Name: "P2P Network"}
+
+ rpc := cfg.RPCLocal
+ if rpc == "" {
+ rpc = "http://127.0.0.1:26657"
+ }
+
+ cli := node.New(rpc)
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+
+ peers, err := cli.Peers(ctx)
+ if err != nil {
+ result.Status = "warn"
+ result.Message = "Could not check peer connections"
+ result.Details = []string{fmt.Sprintf("RPC error: %v", err)}
+ } else if len(peers) == 0 {
+ result.Status = "fail"
+ result.Message = "No P2P peers connected"
+ result.Details = []string{
+ "Check persistent_peers in config.toml",
+ "Verify firewall allows port 26656",
+ "Check if seed nodes are reachable",
+ }
+ } else if len(peers) < 3 {
+ result.Status = "warn"
+ result.Message = fmt.Sprintf("Only %d peer(s) connected (recommend 3+)", len(peers))
+ } else {
+ result.Status = "pass"
+ result.Message = fmt.Sprintf("%d peers connected", len(peers))
+ }
+
+ printCheck(result, c)
+ return result
+}
+
+func checkRemoteConnectivity(cfg config.Config, c *ui.ColorConfig) checkResult {
+ result := checkResult{Name: "Remote Connectivity"}
+
+ remote := "https://" + strings.TrimSuffix(cfg.GenesisDomain, "/") + ":443"
+
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+ defer cancel()
+
+ cli := node.New(remote)
+ _, err := cli.Status(ctx)
+
+ if err != nil {
+ result.Status = "fail"
+ result.Message = fmt.Sprintf("Cannot reach %s", cfg.GenesisDomain)
+ result.Details = []string{
+ fmt.Sprintf("Error: %v", err),
+ "Check internet connectivity",
+ "Verify genesis domain is correct",
+ }
+ } else {
+ result.Status = "pass"
+ result.Message = fmt.Sprintf("Remote RPC accessible at %s", cfg.GenesisDomain)
+ }
+
+ printCheck(result, c)
+ return result
+}
+
+func checkDiskSpace(cfg config.Config, c *ui.ColorConfig) checkResult {
+ result := checkResult{Name: "Disk Space"}
+
+ dataDir := filepath.Join(cfg.HomeDir, "data")
+
+ // Try to get disk usage (cross-platform is tricky, simplified check)
+ stat, err := os.Stat(cfg.HomeDir)
+ if err != nil {
+ result.Status = "warn"
+ result.Message = "Could not check disk space"
+ result.Details = []string{fmt.Sprintf("Error: %v", err)}
+ } else if stat.IsDir() {
+ // Simple check: can we write a test file?
+ testFile := filepath.Join(cfg.HomeDir, ".diskcheck")
+ if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
+ result.Status = "fail"
+ result.Message = "Cannot write to data directory"
+ result.Details = []string{
+ fmt.Sprintf("Error: %v", err),
+ "Check disk space",
+ "Verify write permissions",
+ }
+ } else {
+ os.Remove(testFile)
+ result.Status = "pass"
+ result.Message = fmt.Sprintf("Data directory writable at %s", dataDir)
+ }
+ } else {
+ result.Status = "fail"
+ result.Message = fmt.Sprintf("%s is not a directory", cfg.HomeDir)
+ }
+
+ printCheck(result, c)
+ return result
+}
+
+func checkPermissions(cfg config.Config, c *ui.ColorConfig) checkResult {
+ result := checkResult{Name: "File Permissions"}
+
+ configPath := filepath.Join(cfg.HomeDir, "config", "config.toml")
+
+ info, err := os.Stat(configPath)
+ if err != nil {
+ result.Status = "warn"
+ result.Message = "Could not check file permissions"
+ result.Details = []string{fmt.Sprintf("Error: %v", err)}
+ } else {
+ mode := info.Mode()
+ // Check if world-readable (less strict than world-writable)
+ if mode.Perm()&0004 != 0 {
+ result.Status = "pass"
+ result.Message = "Configuration files have appropriate permissions"
+ } else {
+ result.Status = "warn"
+ result.Message = "Configuration files may have restrictive permissions"
+ result.Details = []string{fmt.Sprintf("config.toml has mode %o", mode.Perm())}
+ }
+ }
+
+ printCheck(result, c)
+ return result
+}
+
+func checkSyncStatus(cfg config.Config, c *ui.ColorConfig) checkResult {
+ result := checkResult{Name: "Sync Status"}
+
+ rpc := cfg.RPCLocal
+ if rpc == "" {
+ rpc = "http://127.0.0.1:26657"
+ }
+
+ cli := node.New(rpc)
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+
+ status, err := cli.Status(ctx)
+ if err != nil {
+ result.Status = "warn"
+ result.Message = "Could not check sync status"
+ result.Details = []string{fmt.Sprintf("RPC error: %v", err)}
+ } else {
+ if status.CatchingUp {
+ result.Status = "warn"
+ result.Message = fmt.Sprintf("Node is syncing (height: %d)", status.Height)
+ result.Details = []string{"Wait for sync to complete before validating"}
+ } else {
+ result.Status = "pass"
+ result.Message = fmt.Sprintf("Node is synced (height: %d)", status.Height)
+ }
+ }
+
+ printCheck(result, c)
+ return result
+}
+
+func printCheck(r checkResult, c *ui.ColorConfig) {
+ icon := ""
+ msg := ""
+
+ switch r.Status {
+ case "pass":
+ icon = c.Success("โ")
+ msg = c.Success(r.Message)
+ case "warn":
+ icon = c.Warning("โ ")
+ msg = c.Warning(r.Message)
+ case "fail":
+ icon = c.Error("โ")
+ msg = c.Error(r.Message)
+ }
+
+ fmt.Printf("%s %s: %s\n", icon, c.Apply(c.Theme.Header, r.Name), msg)
+
+ for _, detail := range r.Details {
+ fmt.Printf(" %s %s\n", c.Apply(c.Theme.Pending, "โ"), detail)
+ }
+}
+
+func init() {
+ rootCmd.AddCommand(doctorCmd)
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_increase_stake.go b/push-validator-manager/cmd/push-validator/cmd_increase_stake.go
new file mode 100644
index 00000000..fb6ed5b3
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_increase_stake.go
@@ -0,0 +1,286 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "math/big"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+)
+
+// handleIncreaseStake allows validators to increase their stake after registration
+func handleIncreaseStake(cfg config.Config) {
+ v := validator.NewWith(validator.Options{
+ BinPath: findPchaind(),
+ HomeDir: cfg.HomeDir,
+ ChainID: cfg.ChainID,
+ Keyring: cfg.KeyringBackend,
+ GenesisDomain: cfg.GenesisDomain,
+ Denom: cfg.Denom,
+ })
+
+ // Get validator info
+ valCtx, valCancel := context.WithTimeout(context.Background(), 20*time.Second)
+ myValInfo, valErr := validator.GetCachedMyValidator(valCtx, cfg)
+ valCancel()
+
+ if valErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": valErr.Error()})
+ } else {
+ fmt.Println()
+ fmt.Println(getPrinter().Colors.Error("โ ๏ธ Failed to retrieve validator information"))
+ fmt.Printf("Error: %v\n\n", valErr)
+ fmt.Println(getPrinter().Colors.Info("Make sure you are registered as a validator first:"))
+ fmt.Println(getPrinter().Colors.Apply(getPrinter().Colors.Theme.Command, " push-validator register-validator"))
+ fmt.Println()
+ }
+ return
+ }
+
+ if !myValInfo.IsValidator {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "not a registered validator"})
+ } else {
+ fmt.Println()
+ fmt.Println(getPrinter().Colors.Error("โ This node is not registered as a validator"))
+ fmt.Println()
+ fmt.Println(getPrinter().Colors.Info("To register, use:"))
+ fmt.Println(getPrinter().Colors.Apply(getPrinter().Colors.Theme.Command, " push-validator register-validator"))
+ fmt.Println()
+ }
+ return
+ }
+
+ // Display current validator info
+ p := getPrinter()
+ fmt.Println()
+ p.Section("Current Validator Status")
+ fmt.Println()
+ p.KeyValueLine("Validator Name", myValInfo.Moniker, "blue")
+ p.KeyValueLine("Address", myValInfo.Address, "dim")
+
+ // Get and display EVM address
+ evmAddr, evmErr := getEVMAddress(myValInfo.Address)
+ if evmErr == nil {
+ p.KeyValueLine("EVM Address", evmAddr, "dim")
+ }
+
+ // Display voting power (converted from int64 to PC)
+ votingPowerPC := float64(myValInfo.VotingPower) / 1e6 // Voting power is in units of 1e-6
+ p.KeyValueLine("Voting Power", fmt.Sprintf("%.6f", votingPowerPC)+" PC", "yellow")
+ fmt.Println()
+
+ // Convert validator operator address to account address
+ accountAddr, convErr := convertValidatorToAccountAddress(myValInfo.Address)
+ if convErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": convErr.Error()})
+ } else {
+ fmt.Println(p.Colors.Error("โ ๏ธ Failed to convert validator address"))
+ fmt.Printf("Error: %v\n\n", convErr)
+ }
+ return
+ }
+
+ // Get account balance from Cosmos SDK
+ balCtx, balCancel := context.WithTimeout(context.Background(), 15*time.Second)
+ balance, balErr := v.Balance(balCtx, accountAddr)
+ balCancel()
+
+ if balErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": balErr.Error()})
+ } else {
+ fmt.Println(p.Colors.Error("โ ๏ธ Failed to retrieve balance"))
+ fmt.Printf("Error: %v\n\n", balErr)
+ }
+ return
+ }
+
+ // Display balance info
+ const minDelegate = "100000000000000000" // 0.1 PC in wei
+ const feeReserve = "100000000000000000" // 0.1 PC in wei for gas fees
+
+ balInt := new(big.Int)
+ balInt.SetString(balance, 10)
+ feeInt := new(big.Int)
+ feeInt.SetString(feeReserve, 10)
+ maxDelegatable := new(big.Int).Sub(balInt, feeInt)
+
+ // Handle case where balance is less than fee
+ if maxDelegatable.Sign() < 0 {
+ maxDelegatable.SetInt64(0)
+ }
+
+ divisor := new(big.Float).SetFloat64(1e18)
+ balFloat, _ := new(big.Float).SetString(balance)
+ balPC := new(big.Float).Quo(balFloat, divisor)
+
+ maxDelegateFloat, _ := new(big.Float).SetString(maxDelegatable.String())
+ maxDelegatePC := new(big.Float).Quo(maxDelegateFloat, divisor)
+
+ p.Section("Account Balance")
+ fmt.Println()
+ p.KeyValueLine("Available Balance", fmt.Sprintf("%.6f", balPC)+" PC", "blue")
+ p.KeyValueLine("Available to Delegate", fmt.Sprintf("%.6f", maxDelegatePC)+" PC", "blue")
+ p.KeyValueLine("Reserved for Fees", "0.1 PC", "dim")
+ fmt.Println()
+
+ // Check if user has enough balance
+ if maxDelegatable.Sign() <= 0 {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "insufficient balance"})
+ } else {
+ fmt.Println(p.Colors.Error("โ Insufficient balance to delegate"))
+ fmt.Println()
+ fmt.Println("You need at least 0.2 PC to increase stake (0.1 PC to delegate + 0.1 PC for fees).")
+ fmt.Println()
+ }
+ return
+ }
+
+ // Prompt for delegation amount
+ reader := bufio.NewReader(os.Stdin)
+ minDelegatePC := 0.1
+ maxDelegatePCVal, _ := strconv.ParseFloat(fmt.Sprintf("%.6f", maxDelegatePC), 64)
+
+ delegationAmount := ""
+ for {
+ fmt.Printf("Enter amount to delegate (%.1f - %.1f PC): ", minDelegatePC, maxDelegatePCVal)
+ input, _ := reader.ReadString('\n')
+ input = strings.TrimSpace(input)
+
+ if input == "" {
+ fmt.Println(p.Colors.Error("โ Amount is required. Try again."))
+ continue
+ }
+
+ // Parse user input
+ delegateAmount, err := strconv.ParseFloat(input, 64)
+ if err != nil {
+ fmt.Println(p.Colors.Error("โ Invalid amount. Enter a number. Try again."))
+ continue
+ }
+
+ // Validate bounds
+ if delegateAmount < minDelegatePC {
+ fmt.Printf(p.Colors.Error("โ Amount too low. Minimum delegation is %.1f PC. Try again.\n"), minDelegatePC)
+ continue
+ }
+ if delegateAmount > maxDelegatePCVal {
+ fmt.Printf(p.Colors.Error("โ Insufficient balance. Maximum: %.1f PC. Try again.\n"), maxDelegatePCVal)
+ continue
+ }
+
+ // Convert to wei
+ delegateWei := new(big.Float).Mul(new(big.Float).SetFloat64(delegateAmount), new(big.Float).SetFloat64(1e18))
+ delegationAmount = delegateWei.Text('f', 0)
+
+ fmt.Printf(p.Colors.Success("โ Will delegate %.6f PC\n"), delegateAmount)
+ fmt.Println()
+ break
+ }
+
+ // Auto-derive key name from validator
+ defaultKeyName := getenvDefault("KEY_NAME", "validator-key")
+ var keyName string
+
+ // Try to auto-derive the key name from the validator's address
+ if myValInfo.Address != "" {
+ // We already have accountAddr from the balance check above, but need to recalculate
+ // in case that logic changes in the future
+ accountAddr, convErr := convertValidatorToAccountAddress(myValInfo.Address)
+ if convErr == nil {
+ // Try to find the key in the keyring
+ if foundKey, findErr := findKeyNameByAddress(cfg, accountAddr); findErr == nil {
+ keyName = foundKey
+ if flagOutput != "json" {
+ fmt.Println()
+ fmt.Printf("๐ Using key: %s\n", keyName)
+ }
+ } else {
+ // Fall back to default if key not found
+ keyName = defaultKeyName
+ }
+ } else {
+ // Fall back to default if address conversion failed
+ keyName = defaultKeyName
+ }
+ } else {
+ keyName = defaultKeyName
+ }
+
+ if keyName == "" {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "could not determine key name"})
+ } else {
+ fmt.Println(p.Colors.Error("โ ๏ธ Could not determine key name"))
+ fmt.Println()
+ }
+ return
+ }
+
+ // Execute delegation
+ fmt.Println(p.Colors.Info("Submitting delegation transaction..."))
+ fmt.Println()
+
+ delegCtx, delegCancel := context.WithTimeout(context.Background(), 90*time.Second)
+ txHash, delegErr := v.Delegate(delegCtx, validator.DelegateArgs{
+ ValidatorAddress: myValInfo.Address,
+ Amount: delegationAmount,
+ KeyName: keyName,
+ })
+ delegCancel()
+
+ if delegErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": delegErr.Error()})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Delegation failed"))
+ fmt.Printf("Error: %v\n\n", delegErr)
+ }
+ return
+ }
+
+ // Success output
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{
+ "ok": true,
+ "txhash": txHash,
+ "delegation_amount": delegationAmount,
+ })
+ } else {
+ fmt.Println()
+ p.Success("โ
Delegation successful!")
+ fmt.Println()
+
+ // Display delegation details
+ p.KeyValueLine("Transaction Hash", txHash, "green")
+
+ // Display delegation amount
+ delegateFloat, _ := new(big.Float).SetString(delegationAmount)
+ divisor := new(big.Float).SetFloat64(1e18)
+ delegatePC := new(big.Float).Quo(delegateFloat, divisor)
+ p.KeyValueLine("Amount Delegated", fmt.Sprintf("%.6f", delegatePC)+" PC", "yellow")
+ fmt.Println()
+
+ // Show helpful next steps
+ fmt.Println(p.Colors.SubHeader("Next Steps"))
+ fmt.Println(p.Colors.Separator(40))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 1. Check updated validator status:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator validators"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 2. View dashboard:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator dashboard"))
+ fmt.Println()
+ }
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_logs.go b/push-validator-manager/cmd/push-validator/cmd_logs.go
new file mode 100644
index 00000000..c8eb60fc
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_logs.go
@@ -0,0 +1,89 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "golang.org/x/term"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/process"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+)
+
+// handleLogs tails the node log file until interrupted. It validates
+// the log path and prints structured JSON errors when --output=json.
+func handleLogs(sup process.Supervisor) error {
+ lp := sup.LogPath()
+ if lp == "" {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "no log path configured"})
+ } else {
+ getPrinter().Error("no log path configured")
+ }
+ return fmt.Errorf("no log path configured")
+ }
+ if _, err := os.Stat(lp); err != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "log file not found", "path": lp})
+ } else {
+ getPrinter().Error(fmt.Sprintf("log file not found: %s", lp))
+ }
+ return fmt.Errorf("log file not found: %s", lp)
+ }
+ interactive := term.IsTerminal(int(os.Stdin.Fd())) && term.IsTerminal(int(os.Stdout.Fd())) && !flagNonInteractive
+ var tty *os.File
+ if !interactive && !flagNonInteractive {
+ if t, err := os.OpenFile("/dev/tty", os.O_RDWR, 0); err == nil {
+ if term.IsTerminal(int(t.Fd())) {
+ interactive = true
+ tty = t
+ } else {
+ t.Close()
+ }
+ }
+ }
+ if interactive {
+ var (
+ origIn = os.Stdin
+ origOut = os.Stdout
+ )
+ if tty != nil {
+ os.Stdin = tty
+ os.Stdout = tty
+ }
+ defer func() {
+ if tty != nil {
+ tty.Close()
+ }
+ os.Stdin = origIn
+ os.Stdout = origOut
+ }()
+ // Pass context.Background() - RunLogUIV2 handles Ctrl+C via raw terminal input
+ return ui.RunLogUIV2(context.Background(), ui.LogUIOptions{
+ LogPath: lp,
+ BgKey: 'b',
+ ShowFooter: true,
+ NoColor: flagNoColor,
+ })
+ }
+ if tty != nil {
+ tty.Close()
+ }
+
+ getPrinter().Info(fmt.Sprintf("Tailing %s (Ctrl+C to stop)", lp))
+ stop := make(chan struct{})
+ sigs := make(chan os.Signal, 1)
+ signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
+ go func() { <-sigs; close(stop) }()
+ if err := process.TailFollow(lp, os.Stdout, stop); err != nil {
+ fmt.Printf("tail error: %v\n", err)
+ return err
+ }
+ // Print exit message after Ctrl+C
+ fmt.Println()
+ getPrinter().Success("Stopped tailing logs")
+ return nil
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_peers.go b/push-validator-manager/cmd/push-validator/cmd_peers.go
new file mode 100644
index 00000000..afc0e342
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_peers.go
@@ -0,0 +1,46 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/node"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+)
+
+func init() {
+ peersCmd := &cobra.Command{
+ Use: "peers",
+ Short: "List connected peers (from local RPC)",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ cfg := loadCfg()
+ base := cfg.RPCLocal
+ if base == "" {
+ base = "http://127.0.0.1:26657"
+ }
+ cli := node.New(base)
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+ plist, err := cli.Peers(ctx)
+ if err != nil {
+ getPrinter().Error(fmt.Sprintf("peers error: %v", err))
+ return err
+ }
+ c := ui.NewColorConfig()
+ headers := []string{"ID", "ADDR"}
+ rows := make([][]string, 0, len(plist))
+ for _, p := range plist {
+ rows = append(rows, []string{p.ID, p.Addr})
+ }
+ fmt.Println(c.Header(" Connected Peers "))
+ // Set widths: 40 for ID (full peer ID), 0 for ADDR (auto)
+ fmt.Print(ui.Table(c, headers, rows, []int{40, 0}))
+ fmt.Printf("Total Peers: %d\n", len(plist))
+ return nil
+ },
+ }
+ rootCmd.AddCommand(peersCmd)
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_register.go b/push-validator-manager/cmd/push-validator/cmd_register.go
new file mode 100644
index 00000000..6e843fdb
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_register.go
@@ -0,0 +1,516 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "math/big"
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/node"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+ "golang.org/x/term"
+)
+
+var flagRegisterCheckOnly bool
+
+// handleRegisterValidator is a compatibility wrapper that pulls
+// defaults from env and invokes runRegisterValidator.
+// It prompts interactively for moniker and key name if not set via env vars.
+func handleRegisterValidator(cfg config.Config) {
+ // Get defaults from env or use hardcoded fallbacks
+ defaultMoniker := getenvDefault("MONIKER", "push-validator")
+ defaultKeyName := getenvDefault("KEY_NAME", "validator-key")
+ defaultAmount := getenvDefault("STAKE_AMOUNT", "1500000000000000000")
+
+ moniker := defaultMoniker
+ keyName := defaultKeyName
+
+ v := validator.NewWith(validator.Options{
+ BinPath: findPchaind(),
+ HomeDir: cfg.HomeDir,
+ ChainID: cfg.ChainID,
+ Keyring: cfg.KeyringBackend,
+ GenesisDomain: cfg.GenesisDomain,
+ Denom: cfg.Denom,
+ })
+
+ statusCtx, statusCancel := context.WithTimeout(context.Background(), 20*time.Second)
+ isValAlready, statusErr := v.IsValidator(statusCtx, "")
+ statusCancel()
+ if statusErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": statusErr.Error()})
+ } else {
+ p := ui.NewPrinter(flagOutput)
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ ๏ธ Failed to verify validator status"))
+ fmt.Printf("Error: %v\n\n", statusErr)
+ fmt.Println("Please check your network connection and genesis domain configuration.")
+ }
+ return
+ }
+ if flagRegisterCheckOnly {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": true, "registered": isValAlready})
+ } else {
+ p := ui.NewPrinter(flagOutput)
+ fmt.Println()
+ if isValAlready {
+ fmt.Println(p.Colors.Success("โ This node is already registered as a validator"))
+ } else {
+ fmt.Println(p.Colors.Info("Validator registration required"))
+ }
+ }
+ return
+ }
+ if isValAlready {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "validator already registered"})
+ } else {
+ p := ui.NewPrinter(flagOutput)
+ fmt.Println()
+ fmt.Println(p.Colors.Success("โ This node is already registered as a validator"))
+ fmt.Println()
+ fmt.Println("Your validator is active on the network.")
+ fmt.Println()
+ p.Section("Validator Status")
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" Check your validator:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator validators"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" Monitor node status:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator status"))
+ fmt.Println()
+ }
+ return
+ }
+
+ // Check for moniker conflicts before prompting for registration
+ monikerCheckCtx, monikerCheckCancel := context.WithTimeout(context.Background(), 10*time.Second)
+ myValInfo, monikerErr := validator.GetCachedMyValidator(monikerCheckCtx, cfg)
+ monikerCheckCancel()
+ if monikerErr == nil && myValInfo.ValidatorExistsWithSameMoniker {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{
+ "ok": false,
+ "error": "moniker conflict",
+ "conflicting_moniker": myValInfo.ConflictingMoniker,
+ "message": fmt.Sprintf("A different validator is already using moniker '%s'. Choose a different moniker to register.", myValInfo.ConflictingMoniker),
+ })
+ } else {
+ p := ui.NewPrinter(flagOutput)
+ fmt.Println()
+ fmt.Println(p.Colors.Warning("โ ๏ธ Moniker Conflict Detected"))
+ fmt.Println()
+ fmt.Printf("A different validator is already using the moniker '%s'.\n", p.Colors.Apply(p.Colors.Theme.Value, myValInfo.ConflictingMoniker))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Please choose a different moniker when registering your validator."))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, "Each validator must have a unique identifier on the network."))
+ fmt.Println()
+ }
+ // Don't return - allow registration with a different moniker
+ }
+
+ // Interactive prompts (skip in JSON mode or if env vars are explicitly set)
+ if flagOutput != "json" {
+ savedStdin := os.Stdin
+ var tty *os.File
+ if !flagNonInteractive && !term.IsTerminal(int(savedStdin.Fd())) {
+ if t, err := os.OpenFile("/dev/tty", os.O_RDONLY, 0); err == nil {
+ tty = t
+ os.Stdin = t
+ }
+ }
+ if tty != nil {
+ defer func() {
+ os.Stdin = savedStdin
+ tty.Close()
+ }()
+ }
+
+ reader := bufio.NewReader(os.Stdin)
+
+ if os.Getenv("MONIKER") == "" {
+ fmt.Printf("Enter validator name (moniker) [%s]: ", defaultMoniker)
+ input, _ := reader.ReadString('\n')
+ input = strings.TrimSpace(input)
+ if input != "" {
+ moniker = input
+ }
+ fmt.Println()
+ }
+
+ if os.Getenv("KEY_NAME") == "" {
+ fmt.Printf("Enter key name for validator (default: %s): ", defaultKeyName)
+ input, _ := reader.ReadString('\n')
+ input = strings.TrimSpace(input)
+ if input != "" {
+ keyName = input
+ }
+
+ // Check if key already exists
+ if keyExists(cfg, keyName) {
+ p := ui.NewPrinter(flagOutput)
+ fmt.Println()
+ fmt.Println(p.Colors.Warning(fmt.Sprintf("โ Key '%s' already exists.", keyName)))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("You can use this existing key or create a new one."))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, "Note: Recovery mnemonics are only shown when creating new keys."))
+ fmt.Printf("\nEnter a different key name (or press ENTER to use existing key): ")
+ newName, _ := reader.ReadString('\n')
+ newName = strings.TrimSpace(newName)
+ if newName != "" {
+ keyName = newName
+ } else {
+ // User chose to reuse existing key
+ fmt.Println()
+ fmt.Println(p.Colors.Success("โ Proceeding with existing key"))
+ fmt.Println()
+ }
+ }
+ fmt.Println()
+ }
+
+ // Commission rate prompt (only if not already registered)
+ var commissionRate string
+ if os.Getenv("COMMISSION_RATE") == "" {
+ p := ui.NewPrinter(flagOutput)
+ fmt.Printf("Enter commission rate (1-100%%) [10]: ")
+ input, _ := reader.ReadString('\n')
+ input = strings.TrimSpace(input)
+
+ if input == "" {
+ commissionRate = "0.10" // Default 10%
+ } else {
+ // Parse and validate
+ rate, err := strconv.ParseFloat(input, 64)
+ if err != nil || rate < 1 || rate > 100 {
+ fmt.Println(p.Colors.Error("โ Invalid commission rate. Using default 10%"))
+ commissionRate = "0.10"
+ } else {
+ // Convert percentage to decimal (e.g., 15 -> 0.15)
+ commissionRate = fmt.Sprintf("%.2f", rate/100)
+ }
+ }
+ fmt.Println()
+ } else {
+ commissionRate = getenvDefault("COMMISSION_RATE", "0.10")
+ }
+
+ // Interactive mode - let user choose stake amount
+ // Pass empty string to trigger the interactive stake selection prompt
+ runRegisterValidator(cfg, moniker, keyName, "", commissionRate)
+ } else {
+ // JSON mode or env vars set - use default/env amount
+ commissionRate := getenvDefault("COMMISSION_RATE", "0.10")
+ runRegisterValidator(cfg, moniker, keyName, defaultAmount, commissionRate)
+ }
+}
+
+// keyExists checks if a key with the given name already exists in the keyring
+func keyExists(cfg config.Config, keyName string) bool {
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+ defer cancel()
+ cmd := exec.CommandContext(ctx, findPchaind(), "keys", "show", keyName, "-a",
+ "--keyring-backend", cfg.KeyringBackend, "--home", cfg.HomeDir)
+ err := cmd.Run()
+ return err == nil
+}
+
+// runRegisterValidator performs the end-to-end registration flow:
+// - verify node is not catching up
+// - ensure key exists
+// - wait for funding if necessary
+// - submit create-validator transaction
+// It prints text or JSON depending on --output.
+func runRegisterValidator(cfg config.Config, moniker, keyName, amount, commissionRate string) {
+ savedStdin := os.Stdin
+ var tty *os.File
+ if !flagNonInteractive && !term.IsTerminal(int(savedStdin.Fd())) {
+ if t, err := os.OpenFile("/dev/tty", os.O_RDONLY, 0); err == nil {
+ tty = t
+ os.Stdin = t
+ }
+ }
+ if tty != nil {
+ defer func() {
+ os.Stdin = savedStdin
+ tty.Close()
+ }()
+ }
+
+ local := strings.TrimRight(cfg.RPCLocal, "/")
+ if local == "" {
+ local = "http://127.0.0.1:26657"
+ }
+ remoteHTTP := "https://" + strings.TrimSuffix(cfg.GenesisDomain, "/") + ":443"
+ cliLocal := node.New(local)
+ cliRemote := node.New(remoteHTTP)
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+ defer cancel()
+ stLocal, err1 := cliLocal.Status(ctx)
+ _, err2 := cliRemote.RemoteStatus(ctx, remoteHTTP)
+ if err1 == nil && err2 == nil {
+ if stLocal.CatchingUp {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "node is still syncing"})
+ } else {
+ fmt.Println("node is still syncing. Run 'push-validator sync' first")
+ }
+ return
+ }
+ }
+ v := validator.NewWith(validator.Options{BinPath: findPchaind(), HomeDir: cfg.HomeDir, ChainID: cfg.ChainID, Keyring: cfg.KeyringBackend, GenesisDomain: cfg.GenesisDomain, Denom: cfg.Denom})
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 20*time.Second)
+ defer cancel2()
+ keyInfo, err := v.EnsureKey(ctx2, keyName)
+ if err != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": err.Error()})
+ } else {
+ fmt.Printf("key error: %v\n", err)
+ }
+ return
+ }
+
+ evmAddr, err := v.GetEVMAddress(ctx2, keyInfo.Address)
+ if err != nil {
+ evmAddr = ""
+ }
+
+ p := ui.NewPrinter(flagOutput)
+
+ if flagOutput != "json" {
+ // Display mnemonic if this is a new key
+ if keyInfo.Mnemonic != "" {
+ // Display mnemonic in prominent box
+ p.MnemonicBox(keyInfo.Mnemonic)
+ fmt.Println()
+
+ // Warning message in yellow
+ fmt.Println(p.Colors.Warning("**Important** Write this mnemonic phrase in a safe place."))
+ fmt.Println(p.Colors.Warning("It is the only way to recover your account if you ever forget your password."))
+ fmt.Println()
+ } else {
+ // Existing key - show clear status with reminder
+ fmt.Println(p.Colors.Success(fmt.Sprintf("โ Using existing key: %s", keyInfo.Name)))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " (Recovery mnemonic was displayed when this key was first created)"))
+ fmt.Println()
+ }
+
+ // Always display Account Info section (whether new or existing key)
+ p.Section("Account Info")
+ p.KeyValueLine("EVM Address", evmAddr, "blue")
+ p.KeyValueLine("Cosmos Address", keyInfo.Address, "dim")
+ fmt.Println()
+ }
+ const requiredBalance = "1600000000000000000"
+ const minStake = "1500000000000000000" // 1.5 PC in wei
+ const feeReserve = "100000000000000000" // 0.1 PC in wei for gas fees
+ maxRetries := 10
+ var finalBalance string
+
+ for tries := 0; tries < maxRetries; {
+ balCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ bal, err := v.Balance(balCtx, keyInfo.Address)
+ cancel()
+ if err != nil {
+ fmt.Printf("โ ๏ธ Balance check failed: %v\n", err)
+ tries++
+ time.Sleep(2 * time.Second)
+ continue
+ }
+ balInt := new(big.Int)
+ balInt.SetString(bal, 10)
+ reqInt := new(big.Int)
+ reqInt.SetString(requiredBalance, 10)
+ if balInt.Cmp(reqInt) >= 0 {
+ fmt.Println(p.Colors.Success("โ
Sufficient balance"))
+ finalBalance = bal
+ break
+ }
+ pcAmount := "0.000000"
+ if bal != "0" {
+ balFloat, _ := new(big.Float).SetString(bal)
+ divisor := new(big.Float).SetFloat64(1e18)
+ result := new(big.Float).Quo(balFloat, divisor)
+ pcAmount = fmt.Sprintf("%.6f", result)
+ }
+
+ // Display funding information with breakdown
+ p.KeyValueLine("Current Balance", pcAmount+" PC", "yellow")
+ p.KeyValueLine("Min Stake Required", "1.5 PC", "yellow")
+ p.KeyValueLine("Gas Reserve", "0.1 PC", "yellow")
+ p.KeyValueLine("Total Required", "1.6 PC", "yellow")
+ fmt.Println()
+ fmt.Printf("Please send at least %s to the EVM address shown above.\n", p.Colors.Warning("1.6 PC"))
+ fmt.Printf("(Minimum 1.5 PC for staking + 0.1 PC for transaction fees)\n")
+ fmt.Printf("You can stake more than 1.5 PC if desired.\n\n")
+ fmt.Printf("Use faucet at %s for testnet validators\n", p.Colors.Info("https://faucet.push.org"))
+ fmt.Printf("or contact us at %s\n\n", p.Colors.Info("push.org/support"))
+
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "Press ENTER after funding..."))
+ reader := bufio.NewReader(os.Stdin)
+ _, _ = reader.ReadString('\n')
+ }
+
+ // Interactive stake amount selection
+ stake := amount
+ if stake == "" && flagOutput != "json" {
+ // Calculate max stakeable amount (balance - fee reserve)
+ balInt := new(big.Int)
+ balInt.SetString(finalBalance, 10)
+ feeInt := new(big.Int)
+ feeInt.SetString(feeReserve, 10)
+ maxStakeable := new(big.Int).Sub(balInt, feeInt)
+
+ minStakeInt := new(big.Int)
+ minStakeInt.SetString(minStake, 10)
+
+ // Display balance and staking range
+ fmt.Println()
+ balFloat, _ := new(big.Float).SetString(finalBalance)
+ divisor := new(big.Float).SetFloat64(1e18)
+ balPC := new(big.Float).Quo(balFloat, divisor)
+
+ maxStakeFloat, _ := new(big.Float).SetString(maxStakeable.String())
+ maxPC := new(big.Float).Quo(maxStakeFloat, divisor)
+
+ p.KeyValueLine("Current Balance", fmt.Sprintf("%.6f", balPC)+" PC", "blue")
+ p.KeyValueLine("Available to Stake", fmt.Sprintf("%.6f", maxPC)+" PC", "blue")
+ p.KeyValueLine("Reserved for Fees", "0.1 PC", "dim")
+ fmt.Println()
+
+ // Prompt for stake amount with validation loop
+ reader := bufio.NewReader(os.Stdin)
+ for {
+ minStakePC := 1.5
+ maxStakePC, _ := strconv.ParseFloat(fmt.Sprintf("%.6f", maxPC), 64)
+
+ fmt.Printf("Enter stake amount (%.1f - %.1f PC) [%.1f]: ", minStakePC, maxStakePC, maxStakePC)
+ input, _ := reader.ReadString('\n')
+ input = strings.TrimSpace(input)
+
+ // Default to maximum stakeable amount if empty
+ if input == "" {
+ stake = maxStakeable.String()
+ fmt.Printf(p.Colors.Success("โ Will stake %.6f PC\n"), maxStakePC)
+ fmt.Println()
+ break
+ }
+
+ // Parse user input
+ stakeAmount, err := strconv.ParseFloat(input, 64)
+ if err != nil {
+ fmt.Println(p.Colors.Error("โ Invalid amount. Enter a number. Try again."))
+ continue
+ }
+
+ // Validate bounds
+ if stakeAmount < minStakePC {
+ fmt.Printf(p.Colors.Error("โ Amount too low. Minimum stake is %.1f PC. Try again.\n"), minStakePC)
+ continue
+ }
+ if stakeAmount > maxStakePC {
+ fmt.Printf(p.Colors.Error("โ Insufficient balance. Maximum: %.1f PC. Try again.\n"), maxStakePC)
+ continue
+ }
+
+ // Convert to wei
+ stakeWei := new(big.Float).Mul(new(big.Float).SetFloat64(stakeAmount), new(big.Float).SetFloat64(1e18))
+ stake = stakeWei.Text('f', 0)
+
+ fmt.Printf(p.Colors.Success("โ Will stake %.6f PC\n"), stakeAmount)
+ fmt.Println()
+ break
+ }
+ } else if stake == "" {
+ stake = minStake
+ }
+ // Create fresh context for registration transaction (independent of earlier operations)
+ regCtx, regCancel := context.WithTimeout(context.Background(), 90*time.Second)
+ defer regCancel()
+ txHash, err := v.Register(regCtx, validator.RegisterArgs{Moniker: moniker, Amount: stake, KeyName: keyName, CommissionRate: commissionRate, MinSelfDelegation: "1"})
+ if err != nil {
+ errMsg := err.Error()
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": errMsg})
+ } else {
+ // Check if this is a "validator already exists" error
+ if strings.Contains(errMsg, "validator already exist") {
+ p := getPrinter()
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Validator registration failed: Validator pubkey already exists"))
+ fmt.Println()
+ fmt.Println("This validator consensus key is already registered on the network.")
+ fmt.Println()
+ p.Section("Resolution Options")
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 1. Check existing validators:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator validators"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 2. To register a new validator, reset node data:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator reset"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " (This will generate new validator keys)"))
+ fmt.Println()
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " Note: Resetting will create a new validator identity."))
+ fmt.Println()
+ } else {
+ fmt.Printf("register error: %v\n", err)
+ }
+ }
+ return
+ }
+
+ // Success output
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": true, "txhash": txHash, "moniker": moniker, "key_name": keyName, "commission_rate": commissionRate, "stake_amount": stake})
+ } else {
+ fmt.Println()
+ p := getPrinter()
+ p.Success("โ
Validator registration successful!")
+ fmt.Println()
+
+ // Display registration details
+ p.KeyValueLine("Transaction Hash", txHash, "green")
+ p.KeyValueLine("Validator Name", moniker, "blue")
+
+ // Convert stake amount from wei to PC for display
+ stakeFloat, _ := new(big.Float).SetString(stake)
+ divisor := new(big.Float).SetFloat64(1e18)
+ stakePC := new(big.Float).Quo(stakeFloat, divisor)
+ p.KeyValueLine("Staked Amount", fmt.Sprintf("%.6f", stakePC)+" PC", "yellow")
+
+ // Convert commission rate back to percentage for display
+ commRate, _ := strconv.ParseFloat(commissionRate, 64)
+ p.KeyValueLine("Commission Rate", fmt.Sprintf("%.0f%%", commRate*100), "dim")
+ fmt.Println()
+
+ // Show helpful next steps
+ fmt.Println(p.Colors.SubHeader("Next Steps"))
+ fmt.Println(p.Colors.Separator(40))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 1. Check validator status:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator validators"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 2. Live dashboard:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator dashboard"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 3. Monitor node status:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator status"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 4. View node logs:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator logs"))
+ fmt.Println()
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " Your validator will appear in the active set after the next epoch."))
+ fmt.Println()
+ }
+ return
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_reset.go b/push-validator-manager/cmd/push-validator/cmd_reset.go
new file mode 100644
index 00000000..e6a06172
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_reset.go
@@ -0,0 +1,183 @@
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "golang.org/x/term"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/admin"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/process"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+)
+
+// handleReset stops the node (best-effort), clears chain data while
+// preserving the address book, and restarts the node. It emits JSON or text depending on --output.
+func handleReset(cfg config.Config, sup process.Supervisor) error {
+ wasRunning := sup.IsRunning()
+
+ // Stop node first and verify it stopped
+ if wasRunning {
+ p := getPrinter()
+ if flagOutput != "json" {
+ fmt.Println(p.Colors.Info("Stopping node..."))
+ }
+ if err := sup.Stop(); err != nil {
+ if flagOutput == "json" {
+ p.JSON(map[string]any{"ok": false, "error": fmt.Sprintf("failed to stop node: %v", err)})
+ } else {
+ p.Warn(fmt.Sprintf("โ Could not stop node gracefully: %v", err))
+ p.Info("Proceeding with reset (node may need manual cleanup)")
+ }
+ } else if flagOutput != "json" {
+ p.Success("โ Node stopped")
+ }
+ }
+
+ showSpinner := flagOutput != "json" && term.IsTerminal(int(os.Stdout.Fd()))
+ var (
+ spinnerStop chan struct{}
+ spinnerTicker *time.Ticker
+ )
+ if showSpinner {
+ c := ui.NewColorConfig()
+ prefix := c.Info("Resetting chain data")
+ sp := ui.NewSpinner(os.Stdout, prefix)
+ spinnerStop = make(chan struct{})
+ spinnerTicker = time.NewTicker(120 * time.Millisecond)
+ go func() {
+ for {
+ select {
+ case <-spinnerStop:
+ return
+ case <-spinnerTicker.C:
+ sp.Tick()
+ }
+ }
+ }()
+ }
+
+ err := admin.Reset(admin.ResetOptions{
+ HomeDir: cfg.HomeDir,
+ BinPath: findPchaind(),
+ KeepAddrBook: true,
+ })
+
+ if showSpinner {
+ spinnerTicker.Stop()
+ close(spinnerStop)
+ fmt.Fprint(os.Stdout, "\r\033[K")
+ }
+
+ if err != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": err.Error()})
+ } else {
+ getPrinter().Error(fmt.Sprintf("reset error: %v", err))
+ }
+ return err
+ }
+
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": true, "action": "reset"})
+ } else {
+ p := getPrinter()
+ p.Success("โ Chain data reset (addr book kept)")
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Next steps:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator start"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " (will resume node from genesis with existing peers)\n"))
+ }
+
+ return nil
+}
+
+// handleFullReset performs a complete reset, deleting ALL data including validator keys.
+// Requires explicit confirmation unless --yes flag is used.
+func handleFullReset(cfg config.Config, sup process.Supervisor) error {
+ p := ui.NewPrinter(flagOutput)
+
+ // Stop node first and verify it stopped
+ if sup.IsRunning() {
+ if flagOutput != "json" {
+ fmt.Println(p.Colors.Info("Stopping node..."))
+ }
+ if err := sup.Stop(); err != nil {
+ if flagOutput == "json" {
+ p.JSON(map[string]any{"ok": false, "error": fmt.Sprintf("failed to stop node: %v", err)})
+ return err
+ } else {
+ p.Warn(fmt.Sprintf("โ Could not stop node: %v", err))
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Warning, "Continue with full reset anyway? (y/N): "))
+ reader := bufio.NewReader(os.Stdin)
+ response, _ := reader.ReadString('\n')
+ if strings.ToLower(strings.TrimSpace(response)) != "y" {
+ p.Info("Full reset cancelled")
+ return nil
+ }
+ }
+ } else if flagOutput != "json" {
+ p.Success("โ Node stopped")
+ }
+ }
+
+ if flagOutput != "json" {
+ fmt.Println()
+ fmt.Println(p.Colors.Warning("โ ๏ธ FULL RESET - This will delete EVERYTHING"))
+ fmt.Println()
+ fmt.Println("This operation will permanently delete:")
+ fmt.Println(p.Colors.Error(" โข All blockchain data"))
+ fmt.Println(p.Colors.Error(" โข Validator consensus keys (priv_validator_key.json)"))
+ fmt.Println(p.Colors.Error(" โข All keyring accounts and keys"))
+ fmt.Println(p.Colors.Error(" โข Node identity (node_key.json)"))
+ fmt.Println(p.Colors.Error(" โข Address book and peer connections"))
+ fmt.Println()
+ fmt.Println(p.Colors.Warning("This will create a NEW validator identity - you cannot recover the old one!"))
+ fmt.Println()
+
+ // Require explicit confirmation
+ if !flagYes {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "Type 'yes' to confirm full reset: "))
+ reader := bufio.NewReader(os.Stdin)
+ response, _ := reader.ReadString('\n')
+ response = strings.TrimSpace(strings.ToLower(response))
+
+ if response != "yes" {
+ fmt.Println(p.Colors.Info("Full reset cancelled"))
+ return nil
+ }
+ }
+ }
+
+ // Perform full reset
+ err := admin.FullReset(admin.FullResetOptions{
+ HomeDir: cfg.HomeDir,
+ BinPath: findPchaind(),
+ })
+
+ if err != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": err.Error()})
+ } else {
+ getPrinter().Error(fmt.Sprintf("full reset error: %v", err))
+ }
+ return err
+ }
+
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": true, "action": "full-reset"})
+ } else {
+ p := getPrinter()
+ p.Success("โ Full reset complete")
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Next steps:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator start"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " (will auto-initialize with new validator keys)"))
+ }
+
+ return nil
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_restake.go b/push-validator-manager/cmd/push-validator/cmd_restake.go
new file mode 100644
index 00000000..80d2ceef
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_restake.go
@@ -0,0 +1,438 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "math/big"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/node"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+ "golang.org/x/term"
+)
+
+// handleRestakeAll orchestrates the restake-all flow:
+// - verify node is synced
+// - verify validator is registered
+// - display current rewards
+// - automatically withdraw all rewards (commission + outstanding)
+// - ask for confirmation to restake with edit/cancel options
+// - submit delegation transaction
+// - display results
+func handleRestakeAll(cfg config.Config) {
+ p := ui.NewPrinter(flagOutput)
+
+ if flagOutput != "json" {
+ fmt.Println()
+ p.Header("Push Validator Manager - Restake All Rewards")
+ fmt.Println()
+ }
+
+ // Step 1: Check sync status
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ Checking node sync status..."))
+ }
+
+ local := strings.TrimRight(cfg.RPCLocal, "/")
+ if local == "" {
+ local = "http://127.0.0.1:26657"
+ }
+ remoteHTTP := "https://" + strings.TrimSuffix(cfg.GenesisDomain, "/") + ":443"
+ cliLocal := node.New(local)
+ cliRemote := node.New(remoteHTTP)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+ stLocal, err1 := cliLocal.Status(ctx)
+ _, err2 := cliRemote.RemoteStatus(ctx, remoteHTTP)
+ cancel()
+
+ if err1 != nil || err2 != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "failed to check sync status"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Failed to check sync status"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Please verify your node is running and properly configured."))
+ fmt.Println()
+ }
+ return
+ }
+
+ if stLocal.CatchingUp {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "node is still syncing"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Warning("โ ๏ธ Node is still syncing to latest block"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Please wait for sync to complete before restaking."))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator sync"))
+ fmt.Println()
+ }
+ return
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Step 2: Check validator registration
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ Checking validator status..."))
+ }
+
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
+ myVal, statusErr := validator.GetCachedMyValidator(ctx2, cfg)
+ cancel2()
+
+ if statusErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "failed to check validator status"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Failed to check validator status"))
+ fmt.Println()
+ }
+ return
+ }
+
+ if !myVal.IsValidator {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "node is not registered as validator"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Warning("โ ๏ธ This node is not registered as a validator"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Register first using:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator register"))
+ fmt.Println()
+ }
+ return
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Step 3: Fetch current rewards
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ฐ Fetching current rewards..."))
+ }
+
+ ctx3, cancel3 := context.WithTimeout(context.Background(), 5*time.Second)
+ commission, outstanding, rewardsErr := validator.GetValidatorRewards(ctx3, cfg, myVal.Address)
+ cancel3()
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ if rewardsErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "failed to fetch rewards"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Failed to fetch rewards"))
+ fmt.Println()
+ fmt.Printf("Error: %v\n", rewardsErr)
+ fmt.Println()
+ }
+ return
+ }
+
+ // Display rewards summary
+ if flagOutput != "json" {
+ fmt.Println()
+ p.Section("Current Rewards")
+ p.KeyValueLine("Commission Rewards", commission+" PC", "green")
+ p.KeyValueLine("Outstanding Rewards", outstanding+" PC", "green")
+ fmt.Println()
+ }
+
+ // Parse rewards to check if any are available
+ commissionFloat, _ := strconv.ParseFloat(strings.TrimSpace(commission), 64)
+ outstandingFloat, _ := strconv.ParseFloat(strings.TrimSpace(outstanding), 64)
+ totalRewards := commissionFloat + outstandingFloat
+ const rewardThreshold = 0.01 // Minimum 0.01 PC to be worthwhile
+
+ if totalRewards < rewardThreshold {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "no significant rewards available"})
+ } else {
+ fmt.Println(p.Colors.Warning("โ ๏ธ No significant rewards available (less than 0.01 PC)"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Nothing to restake. Continue earning rewards and try again later."))
+ fmt.Println()
+ }
+ return
+ }
+
+ // Step 4: Auto-detect key name from validator
+ defaultKeyName := getenvDefault("KEY_NAME", "validator-key")
+ var keyName string
+
+ if myVal.Address != "" {
+ accountAddr, convErr := convertValidatorToAccountAddress(myVal.Address)
+ if convErr == nil {
+ if foundKey, findErr := findKeyNameByAddress(cfg, accountAddr); findErr == nil {
+ keyName = foundKey
+ if flagOutput != "json" {
+ fmt.Printf("๐ Using key: %s\n", keyName)
+ fmt.Println()
+ }
+ } else {
+ keyName = defaultKeyName
+ }
+ } else {
+ keyName = defaultKeyName
+ }
+ } else {
+ keyName = defaultKeyName
+ }
+
+ // Step 5: Submit withdraw rewards transaction (always include commission for restaking)
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ธ Withdrawing all rewards..."))
+ }
+
+ v := validator.NewWith(validator.Options{
+ BinPath: findPchaind(),
+ HomeDir: cfg.HomeDir,
+ ChainID: cfg.ChainID,
+ Keyring: cfg.KeyringBackend,
+ GenesisDomain: cfg.GenesisDomain,
+ Denom: cfg.Denom,
+ })
+
+ ctx5, cancel5 := context.WithTimeout(context.Background(), 90*time.Second)
+ txHash, withdrawErr := v.WithdrawRewards(ctx5, myVal.Address, keyName, true) // Always include commission
+ cancel5()
+
+ if withdrawErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": withdrawErr.Error(), "step": "withdraw"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Withdrawal transaction failed"))
+ fmt.Println()
+ fmt.Printf("Error: %v\n", withdrawErr)
+ fmt.Println()
+ }
+ return
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ fmt.Println()
+ p.KeyValueLine("Transaction Hash", txHash, "green")
+ fmt.Printf(p.Colors.Success("โ Successfully withdrew %.6f PC\n"), totalRewards)
+ fmt.Println()
+ }
+
+ // Step 6: Calculate available amount for restaking
+ const feeReserve = 0.15 // Reserve 0.15 PC for gas fees
+ maxRestakeable := totalRewards - feeReserve
+
+ if maxRestakeable <= 0 {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{
+ "ok": true,
+ "withdraw_txhash": txHash,
+ "withdrawn": fmt.Sprintf("%.6f", totalRewards),
+ "restaked": "0",
+ "message": "insufficient balance for restaking after gas reserve",
+ })
+ } else {
+ fmt.Println(p.Colors.Warning("โ ๏ธ Insufficient balance for restaking after gas reserve"))
+ fmt.Println()
+ fmt.Println("Funds have been withdrawn to your wallet but are too small to restake.")
+ fmt.Println()
+ }
+ return
+ }
+
+ // Step 7: Display restaking options
+ if flagOutput != "json" {
+ p.Section("Available for Restaking")
+ p.KeyValueLine("Withdrawn Amount", fmt.Sprintf("%.6f PC", totalRewards), "blue")
+ p.KeyValueLine("Gas Reserve", fmt.Sprintf("%.2f PC", feeReserve), "dim")
+ p.KeyValueLine("Available to Stake", fmt.Sprintf("%.6f PC", maxRestakeable), "blue")
+ fmt.Println()
+ }
+
+ // Step 8: Interactive confirmation with edit/cancel option
+ restakeAmount := maxRestakeable
+ restakeAmountWei := ""
+
+ if !flagNonInteractive && !flagYes && flagOutput != "json" {
+ savedStdin := os.Stdin
+ var tty *os.File
+ if !term.IsTerminal(int(savedStdin.Fd())) {
+ if t, err := os.OpenFile("/dev/tty", os.O_RDONLY, 0); err == nil {
+ tty = t
+ os.Stdin = t
+ }
+ }
+ if tty != nil {
+ defer func() {
+ os.Stdin = savedStdin
+ tty.Close()
+ }()
+ }
+
+ reader := bufio.NewReader(os.Stdin)
+
+ for {
+ fmt.Printf("Restake %.6f PC? (y/n/edit) [y]: ", restakeAmount)
+ input, _ := reader.ReadString('\n')
+ input = strings.TrimSpace(strings.ToLower(input))
+
+ if input == "" || input == "y" || input == "yes" {
+ // Proceed with full amount
+ break
+ } else if input == "n" || input == "no" {
+ // Cancel restaking
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Restaking cancelled. Funds remain in your wallet."))
+ fmt.Println()
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{
+ "ok": true,
+ "withdraw_txhash": txHash,
+ "withdrawn": fmt.Sprintf("%.6f", totalRewards),
+ "restaked": "0",
+ "cancelled": true,
+ })
+ }
+ return
+ } else if input == "edit" || input == "e" {
+ // Allow user to edit amount
+ fmt.Println()
+ for {
+ fmt.Printf("Enter amount to restake (0.01 - %.6f PC): ", maxRestakeable)
+ amountInput, _ := reader.ReadString('\n')
+ amountInput = strings.TrimSpace(amountInput)
+
+ if amountInput == "" {
+ fmt.Println(p.Colors.Error("โ Amount is required. Try again."))
+ continue
+ }
+
+ // Parse user input
+ customAmount, parseErr := strconv.ParseFloat(amountInput, 64)
+ if parseErr != nil {
+ fmt.Println(p.Colors.Error("โ Invalid amount. Enter a number. Try again."))
+ continue
+ }
+
+ // Validate bounds
+ if customAmount < 0.01 {
+ fmt.Println(p.Colors.Error("โ Amount too low. Minimum restake is 0.01 PC. Try again."))
+ continue
+ }
+ if customAmount > maxRestakeable {
+ fmt.Printf(p.Colors.Error("โ Insufficient balance. Maximum: %.6f PC. Try again.\n"), maxRestakeable)
+ continue
+ }
+
+ // Use custom amount
+ restakeAmount = customAmount
+ fmt.Printf(p.Colors.Success("โ Will restake %.6f PC\n"), restakeAmount)
+ fmt.Println()
+ break
+ }
+ break
+ } else {
+ // Treat any other input as cancel
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Invalid input. Restaking cancelled."))
+ fmt.Println()
+ return
+ }
+ }
+ }
+
+ // Convert to wei
+ restakeWei := new(big.Float).Mul(new(big.Float).SetFloat64(restakeAmount), new(big.Float).SetFloat64(1e18))
+ restakeAmountWei = restakeWei.Text('f', 0)
+
+ // Step 9: Submit delegation transaction
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ค Restaking funds..."))
+ }
+
+ ctx6, cancel6 := context.WithTimeout(context.Background(), 90*time.Second)
+ delegateTxHash, delegateErr := v.Delegate(ctx6, validator.DelegateArgs{
+ ValidatorAddress: myVal.Address,
+ Amount: restakeAmountWei,
+ KeyName: keyName,
+ })
+ cancel6()
+
+ if delegateErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{
+ "ok": false,
+ "withdraw_txhash": txHash,
+ "withdrawn": fmt.Sprintf("%.6f", totalRewards),
+ "restake_error": delegateErr.Error(),
+ "step": "restake",
+ })
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Restaking transaction failed"))
+ fmt.Println()
+ fmt.Printf("Error: %v\n", delegateErr)
+ fmt.Println()
+ fmt.Println(p.Colors.Warning("Note: Rewards were successfully withdrawn. Funds are in your wallet."))
+ fmt.Println(p.Colors.Info("You can manually delegate using: push-validator increase-stake"))
+ fmt.Println()
+ }
+ return
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Success output
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{
+ "ok": true,
+ "withdraw_txhash": txHash,
+ "restake_txhash": delegateTxHash,
+ "withdrawn": fmt.Sprintf("%.6f", totalRewards),
+ "restaked": fmt.Sprintf("%.6f", restakeAmount),
+ })
+ } else {
+ fmt.Println()
+ p.Success("โ
Successfully restaked rewards!")
+ fmt.Println()
+
+ // Display transaction details
+ p.KeyValueLine("Withdrawal TxHash", txHash, "green")
+ p.KeyValueLine("Restake TxHash", delegateTxHash, "green")
+ p.KeyValueLine("Amount Restaked", fmt.Sprintf("%.6f PC", restakeAmount), "yellow")
+ fmt.Println()
+
+ // Show helpful next steps
+ fmt.Println(p.Colors.SubHeader("Next Steps"))
+ fmt.Println(p.Colors.Separator(40))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 1. Check your increased stake:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator status"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 2. Monitor validator performance:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator dashboard"))
+ fmt.Println()
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " Your validator power has been increased!"))
+ fmt.Println()
+ }
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_start.go b/push-validator-manager/cmd/push-validator/cmd_start.go
new file mode 100644
index 00000000..c28db7be
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_start.go
@@ -0,0 +1,25 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/process"
+)
+
+func handleStop(sup process.Supervisor) error {
+ p := getPrinter()
+ if err := sup.Stop(); err != nil {
+ if flagOutput == "json" { p.JSON(map[string]any{"ok": false, "error": err.Error()}) } else { p.Error(fmt.Sprintf("stop error: %v", err)) }
+ return err
+ }
+ if flagOutput == "json" {
+ p.JSON(map[string]any{"ok": true, "action": "stop"})
+ } else {
+ p.Success("โ Node stopped")
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Next steps:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator start"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " (start the node)"))
+ }
+ return nil
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_status.go b/push-validator-manager/cmd/push-validator/cmd_status.go
new file mode 100644
index 00000000..e2bc1a4b
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_status.go
@@ -0,0 +1,725 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "os/exec"
+ "strings"
+ "time"
+
+ "github.com/charmbracelet/lipgloss"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/node"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/process"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/metrics"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+)
+
+// statusResult models the key process and RPC fields shown by the
+// `status` command. It is also used for JSON output when --output=json.
+type statusResult struct {
+ // Process information
+ Running bool `json:"running"`
+ PID int `json:"pid,omitempty"`
+
+ // RPC connectivity
+ RPCListening bool `json:"rpc_listening"`
+ RPCURL string `json:"rpc_url,omitempty"`
+
+ // Sync status
+ CatchingUp bool `json:"catching_up"`
+ Height int64 `json:"height"`
+ RemoteHeight int64 `json:"remote_height,omitempty"`
+ SyncProgress float64 `json:"sync_progress,omitempty"` // Percentage (0-100)
+
+ // Validator status
+ IsValidator bool `json:"is_validator,omitempty"`
+
+ // Network information
+ Peers int `json:"peers,omitempty"`
+ PeerList []string `json:"peer_list,omitempty"` // Full peer IDs
+ LatencyMS int64 `json:"latency_ms,omitempty"`
+
+ // Node identity (when available)
+ NodeID string `json:"node_id,omitempty"`
+ Moniker string `json:"moniker,omitempty"`
+ Network string `json:"network,omitempty"` // chain-id
+
+ // System metrics
+ BinaryVer string `json:"binary_version,omitempty"`
+ MemoryPct float64 `json:"memory_percent,omitempty"`
+ DiskPct float64 `json:"disk_percent,omitempty"`
+
+ // Validator details (when registered)
+ ValidatorStatus string `json:"validator_status,omitempty"`
+ ValidatorMoniker string `json:"validator_moniker,omitempty"`
+ VotingPower int64 `json:"voting_power,omitempty"`
+ VotingPct float64 `json:"voting_percent,omitempty"`
+ Commission string `json:"commission,omitempty"`
+ CommissionRewards string `json:"commission_rewards,omitempty"`
+ OutstandingRewards string `json:"outstanding_rewards,omitempty"`
+ IsJailed bool `json:"is_jailed,omitempty"`
+ JailReason string `json:"jail_reason,omitempty"`
+ JailedUntil string `json:"jailed_until,omitempty"` // RFC3339 timestamp
+ MissedBlocks int64 `json:"missed_blocks,omitempty"`
+ Tombstoned bool `json:"tombstoned,omitempty"`
+
+ // Errors
+ Error string `json:"error,omitempty"`
+}
+
+// computeStatus gathers comprehensive status information including system metrics,
+// network details, and validator information.
+func computeStatus(cfg config.Config, sup process.Supervisor) statusResult {
+ res := statusResult{}
+ res.Running = sup.IsRunning()
+ if pid, ok := sup.PID(); ok {
+ res.PID = pid
+ // Try to get system metrics for this process
+ getProcessMetrics(res.PID, &res)
+ }
+
+ rpc := cfg.RPCLocal
+ if rpc == "" { rpc = "http://127.0.0.1:26657" }
+ res.RPCURL = rpc
+ hostport := "127.0.0.1:26657"
+ if u, err := url.Parse(rpc); err == nil && u.Host != "" { hostport = u.Host }
+
+ // Check RPC listening with timeout
+ rpcCtx, rpcCancel := context.WithTimeout(context.Background(), 1*time.Second)
+ rpcListeningDone := make(chan bool, 1)
+ go func() {
+ rpcListeningDone <- process.IsRPCListening(hostport, 500*time.Millisecond)
+ }()
+ select {
+ case res.RPCListening = <-rpcListeningDone:
+ // Got response
+ case <-rpcCtx.Done():
+ res.RPCListening = false
+ }
+ rpcCancel()
+
+ if res.RPCListening {
+ cli := node.New(rpc)
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+ st, err := cli.Status(ctx)
+ if err == nil {
+ res.CatchingUp = st.CatchingUp
+ res.Height = st.Height
+ // Extract node identity from status
+ if st.NodeID != "" { res.NodeID = st.NodeID }
+ if st.Moniker != "" { res.Moniker = st.Moniker }
+ if st.Network != "" { res.Network = st.Network }
+
+ // Fetch comprehensive validator details (best-effort, 3s timeout)
+ valCtx, valCancel := context.WithTimeout(context.Background(), 3*time.Second)
+ myVal, _ := validator.GetCachedMyValidator(valCtx, cfg)
+ valCancel()
+ res.IsValidator = myVal.IsValidator
+ if myVal.IsValidator {
+ res.ValidatorMoniker = myVal.Moniker
+ res.VotingPower = myVal.VotingPower
+ res.VotingPct = myVal.VotingPct
+ res.Commission = myVal.Commission
+ res.ValidatorStatus = myVal.Status
+ res.IsJailed = myVal.Jailed
+ if myVal.SlashingInfo.JailReason != "" {
+ res.JailReason = myVal.SlashingInfo.JailReason
+ }
+
+ // Add detailed jail information
+ if myVal.SlashingInfo.JailedUntil != "" {
+ res.JailedUntil = myVal.SlashingInfo.JailedUntil
+ }
+ if myVal.SlashingInfo.MissedBlocks > 0 {
+ res.MissedBlocks = myVal.SlashingInfo.MissedBlocks
+ }
+ res.Tombstoned = myVal.SlashingInfo.Tombstoned
+
+ // Fetch rewards (best-effort, 2s timeout)
+ rewardCtx, rewardCancel := context.WithTimeout(context.Background(), 2*time.Second)
+ commRewards, outRewards, _ := validator.GetCachedRewards(rewardCtx, cfg, myVal.Address)
+ rewardCancel()
+ res.CommissionRewards = commRewards
+ res.OutstandingRewards = outRewards
+ }
+
+ // Enrich with remote height and peers (best-effort, with strict timeout)
+ remote := "https://" + strings.TrimSuffix(cfg.GenesisDomain, "/") + ":443"
+ col := metrics.NewWithoutCPU()
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 1000*time.Millisecond)
+ snapChan := make(chan metrics.Snapshot, 1)
+ go func() {
+ snapChan <- col.Collect(ctx2, rpc, remote)
+ }()
+ var snap metrics.Snapshot
+ select {
+ case snap = <-snapChan:
+ // Got response
+ case <-time.After(1200 * time.Millisecond):
+ // Timeout - use empty snapshot
+ }
+ cancel2()
+
+ if snap.Chain.RemoteHeight > 0 {
+ res.RemoteHeight = snap.Chain.RemoteHeight
+ // Calculate sync progress percentage
+ if res.Height > 0 && res.RemoteHeight > 0 {
+ pct := float64(res.Height) / float64(res.RemoteHeight) * 100
+ if pct > 100 { pct = 100 }
+ res.SyncProgress = pct
+ }
+ }
+ if snap.Network.Peers > 0 {
+ res.Peers = snap.Network.Peers
+ }
+
+ // Fetch peer list for detailed display (best-effort, 2s timeout)
+ peerCtx, peerCancel := context.WithTimeout(context.Background(), 2*time.Second)
+ peers, _ := cli.Peers(peerCtx)
+ peerCancel()
+ if len(peers) > 0 {
+ for _, p := range peers {
+ res.PeerList = append(res.PeerList, p.ID)
+ }
+ }
+
+ if snap.Network.LatencyMS > 0 { res.LatencyMS = snap.Network.LatencyMS }
+
+ // Capture system metrics
+ if snap.System.MemTotal > 0 {
+ memPct := float64(snap.System.MemUsed) / float64(snap.System.MemTotal)
+ res.MemoryPct = memPct * 100
+ }
+ if snap.System.DiskTotal > 0 {
+ diskPct := float64(snap.System.DiskUsed) / float64(snap.System.DiskTotal)
+ res.DiskPct = diskPct * 100
+ }
+ } else {
+ res.Error = fmt.Sprintf("RPC status error: %v", err)
+ }
+ }
+
+ // Fetch binary version (best-effort)
+ res.BinaryVer = getBinaryVersion(cfg)
+
+ return res
+}
+
+// getProcessMetrics attempts to fetch memory and disk metrics for a process
+func getProcessMetrics(pid int, res *statusResult) {
+ // This is a best-effort attempt - we'll try to get these metrics if possible
+ // For now, we set defaults. In production, you'd use process libraries or proc filesystem
+ // Try using `ps` command to get memory usage
+ // Example: ps -p -o %mem= gives percentage of memory
+ // This is simplified for now to avoid external dependencies
+}
+
+// getBinaryVersion fetches the binary version string from pchaind
+func getBinaryVersion(cfg config.Config) string {
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, "pchaind", "version", "--long")
+ output, err := cmd.Output()
+ if err != nil {
+ return ""
+ }
+
+ // Parse version from output
+ // Format is usually "version: v0.x.x-..." on first line
+ lines := strings.Split(string(output), "\n")
+ for _, line := range lines {
+ if strings.HasPrefix(strings.TrimSpace(line), "version") {
+ parts := strings.SplitN(line, ":", 2)
+ if len(parts) == 2 {
+ return strings.TrimSpace(parts[1])
+ }
+ }
+ }
+
+ return ""
+}
+
+// printStatusText prints a human-friendly status summary matching the dashboard layout.
+func printStatusText(result statusResult) {
+ c := ui.NewColorConfig()
+
+ // Build icon/status strings
+ nodeIcon := c.StatusIcon("stopped")
+ nodeVal := "Stopped"
+ if result.Running {
+ nodeIcon = c.StatusIcon("running")
+ if result.PID != 0 {
+ nodeVal = fmt.Sprintf("Running (pid %d)", result.PID)
+ } else {
+ nodeVal = "Running"
+ }
+ }
+
+ rpcIcon := c.StatusIcon("offline")
+ rpcVal := "Not listening"
+ if result.RPCListening {
+ rpcIcon = c.StatusIcon("online")
+ rpcVal = "Listening"
+ }
+
+ syncIcon := c.StatusIcon("offline")
+ syncVal := "N/A"
+ if result.RPCListening {
+ if result.CatchingUp {
+ syncIcon = c.StatusIcon("syncing")
+ syncVal = "Catching Up"
+ } else {
+ syncIcon = c.StatusIcon("success")
+ syncVal = "In Sync"
+ }
+ }
+
+ validatorIcon := c.StatusIcon("offline")
+ validatorVal := "Not Registered"
+ if result.IsValidator {
+ validatorIcon = c.StatusIcon("online")
+ validatorVal = "Registered"
+ }
+
+ heightVal := ui.FormatNumber(result.Height)
+ if result.Error != "" {
+ heightVal = c.Error(result.Error)
+ }
+
+ peers := "0 peers"
+ if result.Peers == 1 {
+ peers = "1 peer"
+ } else if result.Peers > 1 {
+ peers = fmt.Sprintf("%d peers", result.Peers)
+ }
+
+ // Define box styling (enhanced layout with wider boxes)
+ boxStyle := lipgloss.NewStyle().
+ Border(lipgloss.RoundedBorder()).
+ BorderForeground(lipgloss.Color("63")).
+ Padding(0, 1).
+ Width(80)
+
+ titleStyle := lipgloss.NewStyle().
+ Bold(true).
+ Foreground(lipgloss.Color("39")). // Bright cyan
+ Width(76).
+ Align(lipgloss.Center)
+
+ // Build NODE STATUS box - Enhanced with system metrics and version
+ nodeLines := []string{
+ fmt.Sprintf("%s %s", nodeIcon, nodeVal),
+ fmt.Sprintf("%s %s", rpcIcon, rpcVal),
+ }
+ if result.MemoryPct > 0 {
+ nodeLines = append(nodeLines, fmt.Sprintf(" Memory: %.1f%%", result.MemoryPct))
+ }
+ if result.DiskPct > 0 {
+ nodeLines = append(nodeLines, fmt.Sprintf(" Disk: %.1f%%", result.DiskPct))
+ }
+ if result.BinaryVer != "" {
+ nodeLines = append(nodeLines, fmt.Sprintf(" Version: %s", result.BinaryVer))
+ }
+ nodeBox := boxStyle.Render(
+ titleStyle.Render("NODE STATUS") + "\n" + strings.Join(nodeLines, "\n"),
+ )
+
+ // Build CHAIN STATUS box - Dashboard-style with progress bar and block counts
+ chainLines := []string{}
+
+ if result.RPCListening && result.RemoteHeight > 0 {
+ // Use dashboard-style progress rendering with block counts
+ syncLine := renderSyncProgressDashboard(result.Height, result.RemoteHeight, result.CatchingUp)
+ chainLines = append(chainLines, syncLine)
+ } else {
+ // Fallback to simple format if RPC not available
+ chainLines = append(chainLines, fmt.Sprintf("%s %s", syncIcon, syncVal))
+ if result.Height > 0 {
+ chainLines = append(chainLines, fmt.Sprintf("Height: %s", heightVal))
+ }
+ }
+
+ chainBox := boxStyle.Render(
+ titleStyle.Render("CHAIN STATUS") + "\n" + strings.Join(chainLines, "\n"),
+ )
+
+ // Top row: NODE STATUS | CHAIN STATUS
+ topRow := lipgloss.JoinHorizontal(lipgloss.Top, nodeBox, chainBox)
+
+ // Build NETWORK STATUS box - Enhanced with full peer list
+ networkLines := []string{}
+
+ if len(result.PeerList) > 0 {
+ networkLines = append(networkLines, fmt.Sprintf("Connected to %d peers (Node ID):", len(result.PeerList)))
+ maxDisplay := 3 // Show first 3 peers like dashboard
+ for i, peer := range result.PeerList {
+ if i >= maxDisplay {
+ networkLines = append(networkLines, fmt.Sprintf(" ... and %d more", len(result.PeerList)-maxDisplay))
+ break
+ }
+ networkLines = append(networkLines, fmt.Sprintf(" %s", peer))
+ }
+ } else {
+ networkLines = append(networkLines, fmt.Sprintf("%s %s", c.Info("โข"), peers))
+ }
+
+ if result.LatencyMS > 0 {
+ networkLines = append(networkLines, fmt.Sprintf("Latency: %dms", result.LatencyMS))
+ }
+ if result.Network != "" {
+ networkLines = append(networkLines, fmt.Sprintf("Chain: %s", result.Network))
+ }
+ if result.NodeID != "" {
+ networkLines = append(networkLines, fmt.Sprintf("Node ID: %s", result.NodeID))
+ }
+ if result.Moniker != "" {
+ networkLines = append(networkLines, fmt.Sprintf("Name: %s", result.Moniker))
+ }
+
+ networkBox := boxStyle.Render(
+ titleStyle.Render("NETWORK STATUS") + "\n" + strings.Join(networkLines, "\n"),
+ )
+
+ // Build VALIDATOR STATUS box - Enhanced with two-column layout when jailed
+ var validatorBoxContent string
+
+ if result.IsValidator && result.IsJailed {
+ // Two-column layout for jailed validators (matching dashboard)
+
+ // LEFT column: Basic validator info and rewards
+ leftLines := []string{
+ fmt.Sprintf("%s %s", validatorIcon, validatorVal),
+ }
+
+ if result.ValidatorMoniker != "" {
+ leftLines = append(leftLines, fmt.Sprintf(" Moniker: %s", result.ValidatorMoniker))
+ }
+
+ // Show basic status on left
+ if result.ValidatorStatus != "" {
+ leftLines = append(leftLines, fmt.Sprintf(" โ
Status: %s", result.ValidatorStatus))
+ }
+
+ if result.VotingPower > 0 {
+ vpStr := ui.FormatNumber(result.VotingPower)
+ if result.VotingPct > 0 {
+ vpStr += fmt.Sprintf(" (%.3f%%)", result.VotingPct*100)
+ }
+ leftLines = append(leftLines, fmt.Sprintf(" Power: %s", vpStr))
+ }
+
+ if result.Commission != "" {
+ leftLines = append(leftLines, fmt.Sprintf(" Commission: %s", result.Commission))
+ }
+
+ // Show rewards if available
+ hasCommRewards := result.CommissionRewards != "" && result.CommissionRewards != "โ" && result.CommissionRewards != "0"
+ hasOutRewards := result.OutstandingRewards != "" && result.OutstandingRewards != "โ" && result.OutstandingRewards != "0"
+
+ if hasCommRewards || hasOutRewards {
+ // Add reward amounts first
+ if hasCommRewards {
+ leftLines = append(leftLines, fmt.Sprintf(" Comm Rewards: %s", result.CommissionRewards))
+ }
+ if hasOutRewards {
+ leftLines = append(leftLines, fmt.Sprintf(" Outstanding Rewards: %s", result.OutstandingRewards))
+ }
+
+ leftLines = append(leftLines, "")
+ // Create command style for colored output
+ commandStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("205")).Bold(true)
+ leftLines = append(leftLines, fmt.Sprintf(" %s %s", c.StatusIcon("online"), commandStyle.Render("Rewards available!")))
+ leftLines = append(leftLines, commandStyle.Render(" Run: push-validator restake"))
+ leftLines = append(leftLines, commandStyle.Render(" Run: push-validator withdraw-rewards"))
+ }
+
+ // RIGHT column: Status details
+ rightLines := []string{
+ "STATUS DETAILS",
+ }
+ rightLines = append(rightLines, "")
+
+ // Show status with jail indicator on right
+ statusText := fmt.Sprintf("%s (JAILED)", result.ValidatorStatus)
+ rightLines = append(rightLines, statusText)
+ rightLines = append(rightLines, "")
+
+ if result.JailReason != "" {
+ rightLines = append(rightLines, fmt.Sprintf(" Reason: %s", result.JailReason))
+ }
+
+ // Add missed blocks if available
+ if result.MissedBlocks > 0 {
+ rightLines = append(rightLines, fmt.Sprintf(" Missed: %s blks", ui.FormatNumber(result.MissedBlocks)))
+ }
+
+ // Add tombstoned status if applicable
+ if result.Tombstoned {
+ rightLines = append(rightLines, fmt.Sprintf(" %s Tombstoned: Yes", c.StatusIcon("offline")))
+ }
+
+ // Add jail until time if available
+ if result.JailedUntil != "" {
+ formatted := formatTimestamp(result.JailedUntil)
+ if formatted != "" {
+ rightLines = append(rightLines, fmt.Sprintf(" Until: %s", formatted))
+ }
+
+ // Add time remaining if applicable
+ remaining := timeUntil(result.JailedUntil)
+ if remaining != "" && remaining != "0s" {
+ rightLines = append(rightLines, fmt.Sprintf(" Remaining: %s", remaining))
+ } else if remaining == "0s" || remaining == "" {
+ rightLines = append(rightLines, fmt.Sprintf(" Remaining: 0s (Ready"))
+ rightLines = append(rightLines, fmt.Sprintf(" now!)"))
+ }
+ }
+
+ // Show unjail information
+ rightLines = append(rightLines, "")
+ // Create command style for colored output
+ commandStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("205")).Bold(true)
+ rightLines = append(rightLines, fmt.Sprintf(" %s %s", c.StatusIcon("online"), commandStyle.Render("Ready to unjail!")))
+ rightLines = append(rightLines, commandStyle.Render(" Run: push-validator unjail"))
+
+ // Build two-column layout
+ leftContent := strings.Join(leftLines, "\n")
+ rightContent := strings.Join(rightLines, "\n")
+
+ // Calculate column widths: assume box is ~78 chars wide (80 - 2 borders)
+ // Split roughly in half with 2-char spacing between
+ const boxInnerWidth = 78
+ leftWidth := (boxInnerWidth / 2) - 1 // ~38 chars
+ rightWidth := boxInnerWidth - leftWidth - 2 // ~38 chars with 2-space separator
+
+ // Use lipgloss to join columns horizontally
+ leftStyle := lipgloss.NewStyle().Width(leftWidth)
+ rightStyle := lipgloss.NewStyle().Width(rightWidth)
+
+ leftRendered := leftStyle.Render(leftContent)
+ rightRendered := rightStyle.Render(rightContent)
+
+ validatorBoxContent = titleStyle.Render("MY VALIDATOR STATUS") + "\n" +
+ lipgloss.JoinHorizontal(lipgloss.Top, leftRendered, " ", rightRendered)
+ } else {
+ // Single column layout for non-jailed or non-registered validators
+ validatorLines := []string{
+ fmt.Sprintf("%s %s", validatorIcon, validatorVal),
+ }
+
+ if result.IsValidator {
+ if result.ValidatorMoniker != "" {
+ validatorLines = append(validatorLines, fmt.Sprintf(" Moniker: %s", result.ValidatorMoniker))
+ }
+
+ // Show validator status with jail indicator
+ if result.ValidatorStatus != "" {
+ statusText := result.ValidatorStatus
+ if result.IsJailed {
+ statusText = fmt.Sprintf("%s (JAILED)", result.ValidatorStatus)
+ }
+ validatorLines = append(validatorLines, fmt.Sprintf(" Status: %s", statusText))
+ }
+
+ if result.VotingPower > 0 {
+ vpStr := ui.FormatNumber(result.VotingPower)
+ if result.VotingPct > 0 {
+ vpStr += fmt.Sprintf(" (%.3f%%)", result.VotingPct*100)
+ }
+ validatorLines = append(validatorLines, fmt.Sprintf(" Power: %s", vpStr))
+ }
+
+ if result.Commission != "" {
+ validatorLines = append(validatorLines, fmt.Sprintf(" Commission: %s", result.Commission))
+ }
+
+ // Show rewards if available
+ hasCommRewards := result.CommissionRewards != "" && result.CommissionRewards != "โ" && result.CommissionRewards != "0"
+ hasOutRewards := result.OutstandingRewards != "" && result.OutstandingRewards != "โ" && result.OutstandingRewards != "0"
+
+ if hasCommRewards || hasOutRewards {
+ // Add reward amounts first
+ if hasCommRewards {
+ validatorLines = append(validatorLines, fmt.Sprintf(" Comm Rewards: %s PC", result.CommissionRewards))
+ }
+ if hasOutRewards {
+ validatorLines = append(validatorLines, fmt.Sprintf(" Outstanding Rewards: %s PC", result.OutstandingRewards))
+ }
+
+ validatorLines = append(validatorLines, "")
+ // Create command style for colored output
+ commandStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("205")).Bold(true)
+ validatorLines = append(validatorLines, fmt.Sprintf(" %s %s", c.StatusIcon("online"), commandStyle.Render("Rewards available!")))
+ validatorLines = append(validatorLines, commandStyle.Render(" Run: push-validator restake"))
+ validatorLines = append(validatorLines, commandStyle.Render(" Run: push-validator withdraw-rewards"))
+ }
+ }
+
+ validatorBoxContent = titleStyle.Render("MY VALIDATOR STATUS") + "\n" + strings.Join(validatorLines, "\n")
+ }
+
+ validatorBox := boxStyle.Render(validatorBoxContent)
+
+ // Bottom row: NETWORK STATUS | VALIDATOR STATUS
+ bottomRow := lipgloss.JoinHorizontal(lipgloss.Top, networkBox, validatorBox)
+
+ // Combine top and bottom rows
+ output := lipgloss.JoinVertical(lipgloss.Left, topRow, bottomRow)
+
+ fmt.Println(output)
+
+ // Add hint when no peers connected
+ if result.Peers == 0 && result.Running && result.RPCListening {
+ fmt.Printf("\n%s Check connectivity: push-validator doctor\n", c.Info("โน"))
+ }
+}
+
+// truncateNodeID shortens a long node ID for display
+func truncateNodeID(nodeID string) string {
+ if len(nodeID) <= 16 {
+ return nodeID
+ }
+ return nodeID[:8] + "..." + nodeID[len(nodeID)-8:]
+}
+
+// renderProgressBar creates a visual progress bar using block characters
+func renderProgressBar(percent float64, width int) string {
+ if percent < 0 {
+ percent = 0
+ }
+ if percent > 100 {
+ percent = 100
+ }
+
+ filled := int(float64(width) * (percent / 100))
+ empty := width - filled
+
+ bar := strings.Repeat("โ", filled) + strings.Repeat("โ", empty)
+ return fmt.Sprintf("[%s] %.2f%%", bar, percent)
+}
+
+// formatWithCommas adds comma separators to large numbers
+func formatWithCommas(n int64) string {
+ if n < 1000 {
+ return fmt.Sprintf("%d", n)
+ }
+ s := fmt.Sprintf("%d", n)
+ var result string
+ for i, c := range s {
+ if i > 0 && (len(s)-i)%3 == 0 {
+ result += ","
+ }
+ result += string(c)
+ }
+ return result
+}
+
+// formatTimestamp converts RFC3339 timestamp to "Jan 02, 03:04 PM MST" format
+func formatTimestamp(rfcTime string) string {
+ if rfcTime == "" {
+ return ""
+ }
+ t, err := time.Parse(time.RFC3339Nano, rfcTime)
+ if err != nil {
+ return ""
+ }
+ return t.Local().Format("Jan 02, 03:04 PM MST")
+}
+
+// timeUntil calculates human-readable time remaining until a given RFC3339 timestamp
+func timeUntil(rfcTime string) string {
+ if rfcTime == "" {
+ return ""
+ }
+ t, err := time.Parse(time.RFC3339Nano, rfcTime)
+ if err != nil {
+ return ""
+ }
+ remaining := time.Until(t)
+ if remaining <= 0 {
+ return "0s"
+ }
+ return durationShort(remaining)
+}
+
+// durationShort formats duration concisely (e.g., "2h30m", "45s")
+func durationShort(d time.Duration) string {
+ if d < time.Minute {
+ return fmt.Sprintf("%ds", int(d.Seconds()))
+ }
+ if d < time.Hour {
+ return fmt.Sprintf("%dm", int(d.Minutes()))
+ }
+ if d < 24*time.Hour {
+ h := int(d.Hours())
+ m := int(d.Minutes()) % 60
+ if m == 0 {
+ return fmt.Sprintf("%dh", h)
+ }
+ return fmt.Sprintf("%dh%dm", h, m)
+ }
+ days := int(d.Hours()) / 24
+ h := int(d.Hours()) % 24
+ if h == 0 {
+ return fmt.Sprintf("%dd", days)
+ }
+ return fmt.Sprintf("%dd%dh", days, h)
+}
+
+// renderSyncProgressDashboard creates dashboard-style sync progress line
+func renderSyncProgressDashboard(local, remote int64, isCatchingUp bool) string {
+ if remote <= 0 {
+ return ""
+ }
+
+ percent := float64(local) / float64(remote) * 100
+ if percent < 0 {
+ percent = 0
+ }
+ if percent > 100 {
+ percent = 100
+ }
+
+ width := 28
+ filled := int(percent / 100 * float64(width))
+ if filled < 0 {
+ filled = 0
+ }
+ if filled > width {
+ filled = width
+ }
+
+ // Create colored progress bar
+ greenBar := lipgloss.NewStyle().Foreground(lipgloss.Color("10")).Render(strings.Repeat("โ", filled))
+ greyBar := lipgloss.NewStyle().Foreground(lipgloss.Color("240")).Render(strings.Repeat("โ", width-filled))
+ bar := greenBar + greyBar
+
+ // Status label
+ icon := "๐ Syncing"
+ if !isCatchingUp {
+ icon = "๐ In Sync"
+ }
+
+ result := fmt.Sprintf("%s [%s] %.2f%% | %s/%s blocks",
+ icon, bar, percent,
+ formatWithCommas(local),
+ formatWithCommas(remote))
+
+ // Add ETA if syncing
+ if isCatchingUp && remote > local {
+ blocksBehind := remote - local
+ // Assume average block time of ~6 seconds (adjust if needed)
+ eta := blocksBehind * 6
+ result += fmt.Sprintf(" | ETA: %s", durationShort(time.Duration(eta)*time.Second))
+ } else if remote > 0 {
+ // In sync
+ result += " | ETA: 0s"
+ }
+
+ return result
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_sync.go b/push-validator-manager/cmd/push-validator/cmd_sync.go
new file mode 100644
index 00000000..d5dee0f4
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_sync.go
@@ -0,0 +1,81 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/exitcodes"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/process"
+ syncmon "github.com/pushchain/push-chain-node/push-validator-manager/internal/sync"
+)
+
+func init() {
+ var syncCompact bool
+ var syncWindow int
+ var syncRPC string
+ var syncRemote string
+ var syncSkipFinal bool
+ var syncInterval time.Duration
+ var syncStuckTimeout time.Duration
+
+ syncCmd := &cobra.Command{
+ Use: "sync",
+ Short: "Monitor sync progress",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ cfg := loadCfg()
+ if syncRPC == "" {
+ syncRPC = cfg.RPCLocal
+ }
+ if syncRemote == "" {
+ syncRemote = "https://" + strings.TrimSuffix(cfg.GenesisDomain, "/") + ":443"
+ }
+ sup := process.New(cfg.HomeDir)
+ if syncStuckTimeout <= 0 {
+ if envTimeout := os.Getenv("PNM_SYNC_STUCK_TIMEOUT"); envTimeout != "" {
+ if parsed, err := time.ParseDuration(envTimeout); err == nil {
+ syncStuckTimeout = parsed
+ }
+ }
+ }
+ if err := syncmon.Run(cmd.Context(), syncmon.Options{
+ LocalRPC: syncRPC,
+ RemoteRPC: syncRemote,
+ LogPath: sup.LogPath(),
+ Window: syncWindow,
+ Compact: syncCompact,
+ Out: os.Stdout,
+ Interval: syncInterval,
+ Quiet: flagQuiet,
+ Debug: flagDebug,
+ StuckTimeout: syncStuckTimeout,
+ }); err != nil {
+ if errors.Is(err, syncmon.ErrSyncStuck) {
+ return exitcodes.NewError(exitcodes.SyncStuck, err.Error())
+ }
+ return err
+ }
+ if !syncSkipFinal {
+ out := cmd.OutOrStdout()
+ if flagQuiet {
+ fmt.Fprintln(out, " Sync complete.")
+ } else {
+ fmt.Fprintln(out, " โ Sync complete! Node is fully synced.")
+ }
+ }
+ return nil
+ },
+ }
+ syncCmd.Flags().BoolVar(&syncCompact, "compact", false, "Compact output")
+ syncCmd.Flags().IntVar(&syncWindow, "window", 30, "Moving average window (headers)")
+ syncCmd.Flags().StringVar(&syncRPC, "rpc", "", "Local RPC base (http[s]://host:port)")
+ syncCmd.Flags().StringVar(&syncRemote, "remote", "", "Remote RPC base")
+ syncCmd.Flags().DurationVar(&syncInterval, "interval", 120*time.Millisecond, "Update interval (e.g. 1s, 2s)")
+ syncCmd.Flags().BoolVar(&syncSkipFinal, "skip-final-message", false, "Suppress completion message (for automation)")
+ syncCmd.Flags().DurationVar(&syncStuckTimeout, "stuck-timeout", 0, "Stuck detection timeout (e.g. 2m, 5m). 0 uses default or PNM_SYNC_STUCK_TIMEOUT")
+ rootCmd.AddCommand(syncCmd)
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_unjail.go b/push-validator-manager/cmd/push-validator/cmd_unjail.go
new file mode 100644
index 00000000..ab948822
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_unjail.go
@@ -0,0 +1,336 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/node"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+ "golang.org/x/term"
+)
+
+// handleUnjail orchestrates the validator unjail flow:
+// - verify node is synced
+// - verify validator is jailed with expired jail period
+// - prompt for key name
+// - submit unjail transaction
+// - display results
+func handleUnjail(cfg config.Config) {
+ p := ui.NewPrinter(flagOutput)
+
+ // Step 1: Check sync status
+ if flagOutput != "json" {
+ fmt.Println()
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ Checking node sync status..."))
+ }
+
+ local := strings.TrimRight(cfg.RPCLocal, "/")
+ if local == "" {
+ local = "http://127.0.0.1:26657"
+ }
+ remoteHTTP := "https://" + strings.TrimSuffix(cfg.GenesisDomain, "/") + ":443"
+ cliLocal := node.New(local)
+ cliRemote := node.New(remoteHTTP)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+ stLocal, err1 := cliLocal.Status(ctx)
+ _, err2 := cliRemote.RemoteStatus(ctx, remoteHTTP)
+ cancel()
+
+ if err1 != nil || err2 != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "failed to check sync status"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Failed to check sync status"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Please verify your node is running and properly configured."))
+ fmt.Println()
+ }
+ return
+ }
+
+ if stLocal.CatchingUp {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "node is still syncing"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Warning("โ ๏ธ Node is still syncing to latest block"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Please wait for sync to complete before unjailing."))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator sync"))
+ fmt.Println()
+ }
+ return
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Step 2: Check validator jail status
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ Checking validator jail status..."))
+ }
+
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
+ myVal, statusErr := validator.GetCachedMyValidator(ctx2, cfg)
+ cancel2()
+
+ if statusErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "failed to check validator status"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Failed to check validator status"))
+ fmt.Println()
+ }
+ return
+ }
+
+ if !myVal.IsValidator {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "node is not registered as validator"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Warning("โ ๏ธ This node is not registered as a validator"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Register first using:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator register-validator"))
+ fmt.Println()
+ }
+ return
+ }
+
+ if !myVal.Jailed {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "validator is not jailed"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Success("โ Validator is active (not jailed)"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Status: " + myVal.Status))
+ fmt.Println()
+ }
+ return
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Step 3: Check if jail period has expired
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ Checking jail expiry..."))
+ }
+
+ jailedUntil := myVal.SlashingInfo.JailedUntil
+ if jailedUntil == "" {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "could not determine jail period"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Could not determine jail period"))
+ fmt.Println()
+ }
+ return
+ }
+
+ // Check if jail time has passed
+ if !isJailPeriodExpired(jailedUntil) {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "jail period has not expired", "jailed_until": jailedUntil})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Warning("โ ๏ธ Jail period has not expired yet"))
+ fmt.Println()
+ fmt.Printf("Jailed until: %s\n", jailedUntil)
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Please wait until the jail period expires before attempting to unjail."))
+ fmt.Println()
+ }
+ return
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Step 4: Auto-derive key name from validator
+ defaultKeyName := getenvDefault("KEY_NAME", "validator-key")
+ var keyName string
+
+ // Try to auto-derive the key name from the validator's address
+ if myVal.Address != "" {
+ // Convert validator address to account address
+ accountAddr, convErr := convertValidatorToAccountAddress(myVal.Address)
+ if convErr == nil {
+ // Try to find the key in the keyring
+ if foundKey, findErr := findKeyNameByAddress(cfg, accountAddr); findErr == nil {
+ keyName = foundKey
+ if flagOutput != "json" {
+ fmt.Println()
+ fmt.Printf("๐ Using key: %s\n", keyName)
+ }
+ } else {
+ // Fall back to default if key not found
+ keyName = defaultKeyName
+ }
+ } else {
+ // Fall back to default if address conversion failed
+ keyName = defaultKeyName
+ }
+ } else {
+ keyName = defaultKeyName
+ }
+
+ // Only prompt if explicitly requested via env or interactive mode AND key derivation failed
+ if flagOutput != "json" && !flagNonInteractive && keyName == defaultKeyName && os.Getenv("KEY_NAME") == "" {
+ // Interactive prompt for key name
+ savedStdin := os.Stdin
+ var tty *os.File
+ if !term.IsTerminal(int(savedStdin.Fd())) {
+ if t, err := os.OpenFile("/dev/tty", os.O_RDONLY, 0); err == nil {
+ tty = t
+ os.Stdin = t
+ }
+ }
+ if tty != nil {
+ defer func() {
+ os.Stdin = savedStdin
+ tty.Close()
+ }()
+ }
+
+ reader := bufio.NewReader(os.Stdin)
+ fmt.Printf("\nEnter key name for unjailing [%s]: ", defaultKeyName)
+ input, _ := reader.ReadString('\n')
+ input = strings.TrimSpace(input)
+ if input != "" {
+ keyName = input
+ } else {
+ keyName = defaultKeyName
+ }
+ fmt.Println()
+ }
+
+ // Step 5: Check balance for gas fees
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ฐ Checking wallet balance for gas fees..."))
+ }
+
+ // Convert validator address to account address for balance check
+ accountAddr, addrErr := convertValidatorToAccountAddress(myVal.Address)
+ if addrErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "failed to derive account address"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Failed to derive account address"))
+ fmt.Println()
+ }
+ return
+ }
+
+ // Get EVM address for display
+ evmAddr, evmErr := getEVMAddress(accountAddr)
+ if evmErr != nil {
+ evmAddr = "" // Not critical, we can proceed without EVM address
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Wait for sufficient balance (only in interactive mode)
+ if flagOutput != "json" && !flagNonInteractive {
+ const requiredForGasFees = "150000000000000000" // 0.15 PC in micro-units, enough for gas (actual: ~0.1037 PC + 1.45x buffer)
+ if !waitForSufficientBalance(cfg, accountAddr, evmAddr, requiredForGasFees, "unjail") {
+ return
+ }
+ }
+
+ // Step 6: Submit unjail transaction
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ค Submitting unjail transaction..."))
+ }
+
+ v := validator.NewWith(validator.Options{
+ BinPath: findPchaind(),
+ HomeDir: cfg.HomeDir,
+ ChainID: cfg.ChainID,
+ Keyring: cfg.KeyringBackend,
+ GenesisDomain: cfg.GenesisDomain,
+ Denom: cfg.Denom,
+ })
+
+ ctx3, cancel3 := context.WithTimeout(context.Background(), 90*time.Second)
+ defer cancel3()
+
+ txHash, err := v.Unjail(ctx3, keyName)
+ if err != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": err.Error()})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Unjail transaction failed"))
+ fmt.Println()
+ fmt.Printf("Error: %v\n", err)
+ fmt.Println()
+ }
+ return
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Success output
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": true, "txhash": txHash})
+ } else {
+ fmt.Println()
+ p.Success("โ
Validator successfully unjailed!")
+ fmt.Println()
+
+ // Display transaction hash
+ p.KeyValueLine("Transaction Hash", txHash, "green")
+ fmt.Println()
+
+ // Show helpful next steps
+ fmt.Println(p.Colors.SubHeader("Next Steps"))
+ fmt.Println(p.Colors.Separator(40))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 1. Check validator status:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator validators"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 2. Monitor node status:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator status"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 3. Live dashboard:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator dashboard"))
+ fmt.Println()
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " Your validator will resume block signing and earning rewards."))
+ fmt.Println()
+ }
+}
+
+// isJailPeriodExpired checks if the jail period has passed
+func isJailPeriodExpired(jailedUntil string) bool {
+ if jailedUntil == "" || jailedUntil == "1970-01-01T00:00:00Z" {
+ return true // No jail time means expired
+ }
+
+ t, err := time.Parse(time.RFC3339Nano, jailedUntil)
+ if err != nil {
+ return false // If we can't parse, assume not expired
+ }
+
+ return time.Now().After(t)
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_update.go b/push-validator-manager/cmd/push-validator/cmd_update.go
new file mode 100644
index 00000000..5c6381a6
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_update.go
@@ -0,0 +1,190 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/exitcodes"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+ "github.com/spf13/cobra"
+)
+
+var (
+ updateBranch string
+ updateForce bool
+)
+
+var updateCmd = &cobra.Command{
+ Use: "update",
+ Short: "Update validator manager and chain binary",
+ Hidden: true,
+ Long: `Updates the Push Validator Manager and pchaind binary to the latest version.
+
+This command:
+1. Checks current versions
+2. Pulls latest code from the repository
+3. Rebuilds binaries
+4. Restarts the node if it was running
+
+Use --branch to update to a specific branch or tag.
+Use --force to skip confirmation prompts.`,
+ RunE: runUpdate,
+}
+
+func runUpdate(cmd *cobra.Command, args []string) error {
+ p := ui.NewPrinterFromGlobal(flagOutput)
+ c := p.Colors
+
+ // Check if we're in a git repository or if we can find the repo
+ execPath, err := os.Executable()
+ if err != nil {
+ return exitcodes.PreconditionErrorf("cannot determine executable path: %v", err)
+ }
+
+ // Try to find the repository root
+ repoDir := findRepoRoot(filepath.Dir(execPath))
+ if repoDir == "" {
+ p.Warn("Cannot find repository. The update command requires installation from source.")
+ p.Info("To update:")
+ p.Info("1. Clone/pull the repository: https://github.com/pushchain/push-chain-node")
+ p.Info("2. Run: bash push-validator/install.sh --use-local")
+ return exitcodes.PreconditionError("not installed from repository")
+ }
+
+ fmt.Println(c.Header(" VALIDATOR UPDATE "))
+ fmt.Println()
+
+ // Check current version/commit
+ currentCommit := getGitCommit(repoDir)
+ if currentCommit != "" {
+ p.Info(fmt.Sprintf("Current version: %s", currentCommit[:8]))
+ }
+
+ // Fetch latest changes
+ p.Info("Fetching latest changes...")
+ branch := updateBranch
+ if branch == "" {
+ branch = "feature/pnm" // default branch
+ }
+
+ if err := gitFetch(repoDir); err != nil {
+ return exitcodes.NetworkErrf("failed to fetch updates: %v", err)
+ }
+
+ // Check if update available
+ latestCommit := getGitCommit(repoDir)
+ if currentCommit == latestCommit && !updateForce {
+ p.Success("Already up to date!")
+ return nil
+ }
+
+ // Show what will be updated
+ if !updateForce && !flagYes {
+ // Check if non-interactive mode is enabled without --yes
+ if flagNonInteractive {
+ return exitcodes.PreconditionError("update requires confirmation: use --yes or --force in non-interactive mode")
+ }
+
+ fmt.Println()
+ p.Warn(fmt.Sprintf("This will update to branch '%s'", branch))
+ p.Info("The node will be stopped and restarted if running")
+ fmt.Print("\nContinue? (y/N): ")
+
+ var response string
+ fmt.Scanln(&response)
+ if strings.ToLower(response) != "y" && strings.ToLower(response) != "yes" {
+ p.Info("Update cancelled")
+ return nil
+ }
+ }
+
+ fmt.Println()
+ p.Info("Updating repository...")
+
+ // Pull latest changes
+ if err := gitPull(repoDir, branch); err != nil {
+ return exitcodes.NetworkErrf("failed to pull updates: %v", err)
+ }
+
+ p.Success("Updated to latest commit")
+
+ // Rebuild binaries
+ p.Info("Rebuilding binaries...")
+
+ installScript := filepath.Join(repoDir, "push-validator", "install.sh")
+ if _, err := os.Stat(installScript); os.IsNotExist(err) {
+ return exitcodes.PreconditionErrorf("install script not found at %s", installScript)
+ }
+
+ // Run install script with --use-local --no-reset
+ installCmd := exec.Command("bash", installScript, "--use-local", "--no-reset", "--no-start")
+ installCmd.Dir = filepath.Dir(installScript)
+ installCmd.Stdout = os.Stdout
+ installCmd.Stderr = os.Stderr
+
+ if err := installCmd.Run(); err != nil {
+ return exitcodes.ProcessErrf("failed to rebuild: %v", err)
+ }
+
+ fmt.Println()
+ p.Success("Update completed successfully!")
+ p.Info("Restart the node with: push-validator restart")
+
+ return nil
+}
+
+func findRepoRoot(startDir string) string {
+ dir := startDir
+ for i := 0; i < 10; i++ { // limit search depth
+ gitDir := filepath.Join(dir, ".git")
+ if stat, err := os.Stat(gitDir); err == nil && stat.IsDir() {
+ return dir
+ }
+
+ // Also check for push-validator directory
+ pvmDir := filepath.Join(dir, "push-validator")
+ if stat, err := os.Stat(pvmDir); err == nil && stat.IsDir() {
+ return dir
+ }
+
+ parent := filepath.Dir(dir)
+ if parent == dir {
+ break // reached root
+ }
+ dir = parent
+ }
+ return ""
+}
+
+func getGitCommit(repoDir string) string {
+ cmd := exec.Command("git", "rev-parse", "HEAD")
+ cmd.Dir = repoDir
+ out, err := cmd.Output()
+ if err != nil {
+ return ""
+ }
+ return strings.TrimSpace(string(out))
+}
+
+func gitFetch(repoDir string) error {
+ cmd := exec.Command("git", "fetch", "origin")
+ cmd.Dir = repoDir
+ return cmd.Run()
+}
+
+func gitPull(repoDir, branch string) error {
+ cmd := exec.Command("git", "pull", "origin", branch)
+ cmd.Dir = repoDir
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+}
+
+func init() {
+ updateCmd.Flags().StringVar(&updateBranch, "branch", "", "Branch or tag to update to (default: feature/pnm)")
+ updateCmd.Flags().BoolVar(&updateForce, "force", false, "Force update without confirmation")
+ rootCmd.AddCommand(updateCmd)
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_validators.go b/push-validator-manager/cmd/push-validator/cmd_validators.go
new file mode 100644
index 00000000..fab57a14
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_validators.go
@@ -0,0 +1,199 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+)
+
+// truncateAddress truncates long addresses while keeping prefix and suffix visible
+func truncateAddress(addr string, maxWidth int) string {
+ if len(addr) <= maxWidth {
+ return addr
+ }
+ if strings.HasPrefix(addr, "pushvaloper") {
+ prefix := addr[:14]
+ suffix := addr[len(addr)-8:]
+ return prefix + "..." + suffix
+ }
+ if strings.HasPrefix(addr, "0x") || strings.HasPrefix(addr, "0X") {
+ prefix := addr[:6]
+ suffix := addr[len(addr)-6:]
+ return prefix + "..." + suffix
+ }
+ return addr
+}
+
+func handleValidators(cfg config.Config) error {
+ return handleValidatorsWithFormat(cfg, false)
+}
+
+// handleValidatorsWithFormat prints either a pretty table (default)
+// or raw JSON (--output=json at root) of the current validator set.
+func handleValidatorsWithFormat(cfg config.Config, jsonOut bool) error {
+ bin := findPchaind()
+ remote := fmt.Sprintf("tcp://%s:26657", cfg.GenesisDomain)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ cmd := exec.CommandContext(ctx, bin, "query", "staking", "validators", "--node", remote, "-o", "json")
+ output, err := cmd.Output()
+ if err != nil {
+ if ctx.Err() == context.DeadlineExceeded {
+ return fmt.Errorf("validators: timeout connecting to %s", cfg.GenesisDomain)
+ }
+ return fmt.Errorf("validators: %w", err)
+ }
+ if jsonOut {
+ // passthrough raw JSON
+ fmt.Println(string(output))
+ return nil
+ }
+ var result struct {
+ Validators []struct {
+ Description struct {
+ Moniker string `json:"moniker"`
+ Details string `json:"details"`
+ } `json:"description"`
+ OperatorAddress string `json:"operator_address"`
+ Status string `json:"status"`
+ Jailed bool `json:"jailed"`
+ Tokens string `json:"tokens"`
+ Commission struct {
+ CommissionRates struct {
+ Rate string `json:"rate"`
+ MaxRate string `json:"max_rate"`
+ MaxChangeRate string `json:"max_change_rate"`
+ } `json:"commission_rates"`
+ } `json:"commission"`
+ } `json:"validators"`
+ }
+ if err := json.Unmarshal(output, &result); err != nil {
+ // If JSON parse fails, print raw output for diagnostics
+ fmt.Println(string(output))
+ return nil
+ }
+ if len(result.Validators) == 0 {
+ fmt.Println("No validators found or node not synced")
+ return nil
+ }
+
+ // Fetch my validator info to highlight in table
+ myValidatorAddr := ""
+ myValCtx, myValCancel := context.WithTimeout(context.Background(), 10*time.Second)
+ if myVal, err := validator.GetCachedMyValidator(myValCtx, cfg); err == nil {
+ myValidatorAddr = myVal.Address
+ }
+ myValCancel()
+
+ type validatorDisplay struct {
+ moniker string
+ status string
+ statusOrder int
+ jailed bool
+ tokensPC float64
+ commissionPct float64
+ operatorAddr string
+ cosmosAddr string
+ commissionRwd string
+ outstandingRwd string
+ evmAddress string
+ isMyValidator bool
+ }
+ vals := make([]validatorDisplay, len(result.Validators))
+ var wg sync.WaitGroup
+
+ for i, v := range result.Validators {
+ vals[i] = validatorDisplay{moniker: v.Description.Moniker, operatorAddr: v.OperatorAddress, cosmosAddr: v.OperatorAddress, jailed: v.Jailed, isMyValidator: myValidatorAddr != "" && v.OperatorAddress == myValidatorAddr}
+ if vals[i].moniker == "" { vals[i].moniker = "unknown" }
+ switch v.Status {
+ case "BOND_STATUS_BONDED":
+ vals[i].status, vals[i].statusOrder = "BONDED", 1
+ case "BOND_STATUS_UNBONDING":
+ vals[i].status, vals[i].statusOrder = "UNBONDING", 2
+ case "BOND_STATUS_UNBONDED":
+ vals[i].status, vals[i].statusOrder = "UNBONDED", 3
+ default:
+ vals[i].status, vals[i].statusOrder = v.Status, 4
+ }
+ if v.Tokens != "" && v.Tokens != "0" {
+ if t, err := strconv.ParseFloat(v.Tokens, 64); err == nil { vals[i].tokensPC = t / 1e18 }
+ }
+ if v.Commission.CommissionRates.Rate != "" {
+ if c, err := strconv.ParseFloat(v.Commission.CommissionRates.Rate, 64); err == nil { vals[i].commissionPct = c * 100 }
+ }
+
+ // Fetch rewards and EVM address in parallel using goroutines
+ wg.Add(1)
+ go func(idx int, addr string) {
+ defer wg.Done()
+ // 3 second timeout per validator to avoid blocking
+ fetchCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+ defer cancel()
+
+ vals[idx].commissionRwd, vals[idx].outstandingRwd, _ = validator.GetValidatorRewards(fetchCtx, cfg, addr)
+ vals[idx].evmAddress = validator.GetEVMAddress(fetchCtx, addr)
+ }(i, v.OperatorAddress)
+ }
+
+ wg.Wait()
+ sort.Slice(vals, func(i, j int) bool {
+ // My validator always comes first
+ if vals[i].isMyValidator != vals[j].isMyValidator {
+ return vals[i].isMyValidator
+ }
+ if vals[i].statusOrder != vals[j].statusOrder { return vals[i].statusOrder < vals[j].statusOrder }
+ return vals[i].tokensPC > vals[j].tokensPC
+ })
+ c := ui.NewColorConfig()
+ fmt.Println()
+ fmt.Println(c.Header(" ๐ฅ Active Push Chain Validators "))
+ headers := []string{"VALIDATOR", "COSMOS_ADDR", "STATUS", "STAKE(PC)", "COMM%", "COMM_RWD", "OUTSTND_RWD", "EVM_ADDR"}
+ rows := make([][]string, 0, len(vals))
+ for _, v := range vals {
+ // Check if this is my validator
+ moniker := v.moniker
+ if v.isMyValidator {
+ moniker = moniker + " [My Validator]"
+ }
+
+ // Build status string with optional (JAILED) suffix
+ statusStr := v.status
+ if v.jailed {
+ statusStr = statusStr + " (JAILED)"
+ }
+
+ row := []string{
+ moniker,
+ truncateAddress(v.cosmosAddr, 24),
+ statusStr,
+ fmt.Sprintf("%.1f", v.tokensPC),
+ fmt.Sprintf("%.0f%%", v.commissionPct),
+ v.commissionRwd,
+ v.outstandingRwd,
+ truncateAddress(v.evmAddress, 16),
+ }
+
+ // Apply green highlighting to the entire row if it's my validator
+ if v.isMyValidator {
+ for i := range row {
+ row[i] = c.Success(row[i])
+ }
+ }
+
+ rows = append(rows, row)
+ }
+ fmt.Print(ui.Table(c, headers, rows, nil))
+ fmt.Printf("Total Validators: %d\n", len(vals))
+ fmt.Println(c.Info("๐ก Tip: Use --output=json for full addresses and raw data"))
+ return nil
+}
diff --git a/push-validator-manager/cmd/push-validator/cmd_withdraw_rewards.go b/push-validator-manager/cmd/push-validator/cmd_withdraw_rewards.go
new file mode 100644
index 00000000..dd690635
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/cmd_withdraw_rewards.go
@@ -0,0 +1,378 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/node"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+ "golang.org/x/term"
+)
+
+// handleWithdrawRewards orchestrates the withdraw rewards flow:
+// - verify node is synced
+// - verify validator is registered
+// - display current rewards
+// - prompt for key name
+// - ask about including commission
+// - submit withdraw transaction
+// - display results
+func handleWithdrawRewards(cfg config.Config) {
+ p := ui.NewPrinter(flagOutput)
+
+ // Step 1: Check sync status
+ if flagOutput != "json" {
+ fmt.Println()
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ Checking node sync status..."))
+ }
+
+ local := strings.TrimRight(cfg.RPCLocal, "/")
+ if local == "" {
+ local = "http://127.0.0.1:26657"
+ }
+ remoteHTTP := "https://" + strings.TrimSuffix(cfg.GenesisDomain, "/") + ":443"
+ cliLocal := node.New(local)
+ cliRemote := node.New(remoteHTTP)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+ stLocal, err1 := cliLocal.Status(ctx)
+ _, err2 := cliRemote.RemoteStatus(ctx, remoteHTTP)
+ cancel()
+
+ if err1 != nil || err2 != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "failed to check sync status"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Failed to check sync status"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Please verify your node is running and properly configured."))
+ fmt.Println()
+ }
+ return
+ }
+
+ if stLocal.CatchingUp {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "node is still syncing"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Warning("โ ๏ธ Node is still syncing to latest block"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Please wait for sync to complete before withdrawing rewards."))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator sync"))
+ fmt.Println()
+ }
+ return
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Step 2: Check validator registration
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ Checking validator status..."))
+ }
+
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
+ myVal, statusErr := validator.GetCachedMyValidator(ctx2, cfg)
+ cancel2()
+
+ if statusErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "failed to check validator status"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Failed to check validator status"))
+ fmt.Println()
+ }
+ return
+ }
+
+ if !myVal.IsValidator {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "node is not registered as validator"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Warning("โ ๏ธ This node is not registered as a validator"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Register first using:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator register-validator"))
+ fmt.Println()
+ }
+ return
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Step 3: Display current rewards
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ฐ Fetching current rewards..."))
+ }
+
+ ctx3, cancel3 := context.WithTimeout(context.Background(), 5*time.Second)
+ commission, outstanding, rewardsErr := validator.GetValidatorRewards(ctx3, cfg, myVal.Address)
+ cancel3()
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{
+ "ok": true,
+ "commission_rewards": commission,
+ "outstanding_rewards": outstanding,
+ })
+ return
+ }
+
+ // Display rewards summary and validate
+ fmt.Println()
+ p.Header("Current Rewards")
+ if rewardsErr == nil {
+ p.KeyValueLine("Commission Rewards", commission+" PC", "green")
+ p.KeyValueLine("Outstanding Rewards", outstanding+" PC", "green")
+ } else {
+ fmt.Println(p.Colors.Warning("โ ๏ธ Could not fetch rewards, but proceeding with withdrawal"))
+ }
+ fmt.Println()
+
+ // Parse rewards to check if any are available
+ commissionFloat, _ := strconv.ParseFloat(strings.TrimSpace(strings.TrimSuffix(commission, "PC")), 64)
+ outstandingFloat, _ := strconv.ParseFloat(strings.TrimSpace(strings.TrimSuffix(outstanding, "PC")), 64)
+ const rewardThreshold = 0.01 // Minimum 0.01 PC to be worthwhile
+ hasSignificantRewards := commissionFloat >= rewardThreshold || outstandingFloat >= rewardThreshold
+
+ // Warn if rewards are minimal
+ if !hasSignificantRewards && rewardsErr == nil {
+ fmt.Println(p.Colors.Warning("โ ๏ธ No significant rewards available (less than 0.01 PC)"))
+ if !flagNonInteractive {
+ savedStdin := os.Stdin
+ var tty *os.File
+ if !term.IsTerminal(int(savedStdin.Fd())) {
+ if t, err := os.OpenFile("/dev/tty", os.O_RDONLY, 0); err == nil {
+ tty = t
+ os.Stdin = t
+ }
+ }
+ if tty != nil {
+ defer func() {
+ os.Stdin = savedStdin
+ tty.Close()
+ }()
+ }
+
+ reader := bufio.NewReader(os.Stdin)
+ fmt.Print("Continue with withdrawal anyway? (y/N): ")
+ input, _ := reader.ReadString('\n')
+ input = strings.TrimSpace(strings.ToLower(input))
+ if input != "y" && input != "yes" {
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Withdrawal cancelled."))
+ fmt.Println()
+ return
+ }
+ fmt.Println()
+ } else {
+ // Non-interactive: abort if no rewards
+ fmt.Println(p.Colors.Error("โ No significant rewards to withdraw. Aborting."))
+ fmt.Println()
+ return
+ }
+ }
+
+ // Step 4: Auto-detect key name from validator
+ defaultKeyName := getenvDefault("KEY_NAME", "validator-key")
+ var keyName string
+
+ // Try to auto-derive the key name from the validator's address
+ if myVal.Address != "" {
+ // Convert validator address to account address
+ accountAddr, convErr := convertValidatorToAccountAddress(myVal.Address)
+ if convErr == nil {
+ // Try to find the key in the keyring
+ if foundKey, findErr := findKeyNameByAddress(cfg, accountAddr); findErr == nil {
+ keyName = foundKey
+ if flagOutput != "json" {
+ fmt.Println()
+ fmt.Printf("๐ Using key: %s\n", keyName)
+ }
+ } else {
+ // Fall back to default if key not found
+ keyName = defaultKeyName
+ }
+ } else {
+ // Fall back to default if address conversion failed
+ keyName = defaultKeyName
+ }
+ } else {
+ keyName = defaultKeyName
+ }
+
+ // Only prompt if explicitly requested via env or interactive mode AND key derivation failed
+ if flagOutput != "json" && !flagNonInteractive && keyName == defaultKeyName && os.Getenv("KEY_NAME") == "" {
+ // Interactive prompt for key name
+ savedStdin := os.Stdin
+ var tty *os.File
+ if !term.IsTerminal(int(savedStdin.Fd())) {
+ if t, err := os.OpenFile("/dev/tty", os.O_RDONLY, 0); err == nil {
+ tty = t
+ os.Stdin = t
+ }
+ }
+ if tty != nil {
+ defer func() {
+ os.Stdin = savedStdin
+ tty.Close()
+ }()
+ }
+
+ reader := bufio.NewReader(os.Stdin)
+ fmt.Printf("\nEnter key name for withdrawal [%s]: ", defaultKeyName)
+ input, _ := reader.ReadString('\n')
+ input = strings.TrimSpace(input)
+ if input != "" {
+ keyName = input
+ } else {
+ keyName = defaultKeyName
+ }
+ fmt.Println()
+ }
+
+ // Step 5: Check balance for gas fees
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ฐ Checking wallet balance for gas fees..."))
+ }
+
+ // Convert validator address to account address for balance check
+ accountAddr, addrErr := convertValidatorToAccountAddress(myVal.Address)
+ if addrErr != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": "failed to derive account address"})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Failed to derive account address"))
+ fmt.Println()
+ }
+ return
+ }
+
+ // Get EVM address for display
+ evmAddr, evmErr := getEVMAddress(accountAddr)
+ if evmErr != nil {
+ evmAddr = "" // Not critical, we can proceed without EVM address
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Wait for sufficient balance (only in interactive mode)
+ if flagOutput != "json" && !flagNonInteractive {
+ const requiredForGasFees = "150000000000000000" // 0.15 PC in micro-units, enough for gas (actual: ~0.1037 PC + 1.45x buffer)
+ if !waitForSufficientBalance(cfg, accountAddr, evmAddr, requiredForGasFees, "withdraw") {
+ return
+ }
+ }
+
+ // Step 7: Ask about commission
+ var includeCommission bool
+ if !flagNonInteractive {
+ savedStdin := os.Stdin
+ var tty *os.File
+ if !term.IsTerminal(int(savedStdin.Fd())) {
+ if t, err := os.OpenFile("/dev/tty", os.O_RDONLY, 0); err == nil {
+ tty = t
+ os.Stdin = t
+ }
+ }
+ if tty != nil {
+ defer func() {
+ os.Stdin = savedStdin
+ tty.Close()
+ }()
+ }
+
+ reader := bufio.NewReader(os.Stdin)
+ fmt.Print("Include commission rewards in withdrawal? (y/n) [n]: ")
+ input, _ := reader.ReadString('\n')
+ input = strings.TrimSpace(strings.ToLower(input))
+ includeCommission = input == "y" || input == "yes"
+ fmt.Println()
+ }
+
+ // Step 8: Submit withdraw rewards transaction
+ if flagOutput != "json" {
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "๐ค Submitting withdrawal transaction..."))
+ }
+
+ v := validator.NewWith(validator.Options{
+ BinPath: findPchaind(),
+ HomeDir: cfg.HomeDir,
+ ChainID: cfg.ChainID,
+ Keyring: cfg.KeyringBackend,
+ GenesisDomain: cfg.GenesisDomain,
+ Denom: cfg.Denom,
+ })
+
+ ctx5, cancel5 := context.WithTimeout(context.Background(), 90*time.Second)
+ defer cancel5()
+
+ txHash, err := v.WithdrawRewards(ctx5, myVal.Address, keyName, includeCommission)
+ if err != nil {
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": false, "error": err.Error()})
+ } else {
+ fmt.Println()
+ fmt.Println(p.Colors.Error("โ Withdrawal transaction failed"))
+ fmt.Println()
+ fmt.Printf("Error: %v\n", err)
+ fmt.Println()
+ }
+ return
+ }
+
+ if flagOutput != "json" {
+ fmt.Println(" " + p.Colors.Success("โ"))
+ }
+
+ // Success output
+ if flagOutput == "json" {
+ getPrinter().JSON(map[string]any{"ok": true, "txhash": txHash})
+ } else {
+ fmt.Println()
+ p.Success("โ
Rewards successfully withdrawn!")
+ fmt.Println()
+
+ // Display transaction hash
+ p.KeyValueLine("Transaction Hash", txHash, "green")
+ fmt.Println()
+
+ // Show helpful next steps
+ fmt.Println(p.Colors.SubHeader("Next Steps"))
+ fmt.Println(p.Colors.Separator(40))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 1. Check validator status:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator validators"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 2. View account balance:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator balance"))
+ fmt.Println()
+ fmt.Println(p.Colors.Info(" 3. Live dashboard:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator dashboard"))
+ fmt.Println()
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " Your rewards have been transferred to your account."))
+ fmt.Println()
+ }
+}
diff --git a/push-validator-manager/cmd/push-validator/helpers.go b/push-validator-manager/cmd/push-validator/helpers.go
new file mode 100644
index 00000000..002938cb
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/helpers.go
@@ -0,0 +1,247 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+ "golang.org/x/term"
+)
+
+// findPchaind returns the path to the pchaind binary, resolving
+// either PCHAIND or PCHAIN_BIN environment variables, or falling
+// back to the literal "pchaind" on PATH.
+func findPchaind() string {
+ if v := os.Getenv("PCHAIND"); v != "" { return v }
+ if v := os.Getenv("PCHAIN_BIN"); v != "" { return v }
+ return "pchaind"
+}
+
+// getenvDefault returns the environment value for k, or default d
+// when k is not set.
+func getenvDefault(k, d string) string { if v := os.Getenv(k); v != "" { return v }; return d }
+
+// getPrinter returns a UI printer bound to the current --output flag.
+func getPrinter() ui.Printer { return ui.NewPrinter(flagOutput) }
+
+// convertValidatorToAccountAddress converts a validator operator address (pushvaloper...)
+// to its corresponding account address (push...) using pchaind debug addr
+func convertValidatorToAccountAddress(validatorAddress string) (string, error) {
+ bin := findPchaind()
+ cmd := exec.Command(bin, "debug", "addr", validatorAddress)
+ output, err := cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("failed to convert address: %w", err)
+ }
+
+ // Parse the output to find "Bech32 Acc: push1..."
+ // Output format:
+ // Address: [... bytes ...]
+ // Address (hex): 6AD36CEE...
+ // Bech32 Acc: push1dtfkemne22yusl2cn5y6lvewxwfk0a9rcs7rv6
+ // Bech32 Val: pushvaloper1...
+ // Bech32 Con: pushvalcons1...
+ lines := strings.Split(string(output), "\n")
+ for _, line := range lines {
+ if strings.HasPrefix(line, "Bech32 Acc:") {
+ parts := strings.Fields(line)
+ if len(parts) >= 3 {
+ return parts[2], nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("could not find Bech32 Acc in debug output")
+}
+
+// getEVMAddress converts a bech32 address (push...) to EVM hex format (0x...)
+// using pchaind debug addr command
+func getEVMAddress(address string) (string, error) {
+ bin := findPchaind()
+ cmd := exec.Command(bin, "debug", "addr", address)
+ output, err := cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("failed to convert address to EVM format: %w", err)
+ }
+
+ // Parse the output to find "Address (hex): ..."
+ lines := strings.Split(string(output), "\n")
+ for _, line := range lines {
+ if strings.HasPrefix(line, "Address (hex):") {
+ parts := strings.Fields(line)
+ if len(parts) >= 3 {
+ // Add 0x prefix if not present
+ hexAddr := parts[2]
+ if !strings.HasPrefix(hexAddr, "0x") {
+ hexAddr = "0x" + hexAddr
+ }
+ return hexAddr, nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("could not find Address (hex) in debug output")
+}
+
+// hexToBech32Address converts a hex address (0x... or just hex bytes) to bech32 format (push1...)
+// using pchaind debug addr command
+func hexToBech32Address(hexAddr string) (string, error) {
+ // Remove 0x prefix if present
+ if strings.HasPrefix(hexAddr, "0x") || strings.HasPrefix(hexAddr, "0X") {
+ hexAddr = hexAddr[2:]
+ }
+
+ bin := findPchaind()
+ cmd := exec.Command(bin, "debug", "addr", hexAddr)
+ output, err := cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("failed to convert hex address to bech32: %w", err)
+ }
+
+ // Parse the output to find "Bech32 Acc: push1..."
+ lines := strings.Split(string(output), "\n")
+ for _, line := range lines {
+ if strings.HasPrefix(line, "Bech32 Acc:") {
+ parts := strings.Fields(line)
+ if len(parts) >= 3 {
+ return parts[2], nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("could not find Bech32 Acc in debug output")
+}
+
+// findKeyNameByAddress finds the key name in the keyring that corresponds to the given address
+func findKeyNameByAddress(cfg config.Config, accountAddress string) (string, error) {
+ bin := findPchaind()
+ cmd := exec.Command(bin, "keys", "list", "--keyring-backend", cfg.KeyringBackend, "--home", cfg.HomeDir, "--output", "json")
+ output, err := cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("failed to list keys: %w", err)
+ }
+
+ // Parse the JSON output to find a key with matching address
+ var keys []struct {
+ Name string `json:"name"`
+ Address string `json:"address"`
+ }
+ if err := json.Unmarshal(output, &keys); err != nil {
+ return "", fmt.Errorf("failed to parse keys: %w", err)
+ }
+
+ // Find matching key
+ for _, key := range keys {
+ if key.Address == accountAddress {
+ return key.Name, nil
+ }
+ }
+
+ return "", fmt.Errorf("no key found for address %s", accountAddress)
+}
+
+// waitForSufficientBalance checks if the account has enough balance to pay gas fees
+// If not, prompts user to fund the wallet and waits for them to press Enter
+// requiredBalance is in micro-units (upc)
+// Returns true if balance is sufficient, false if check failed
+func waitForSufficientBalance(cfg config.Config, accountAddr string, evmAddr string, requiredBalance string, operationName string) bool {
+ p := ui.NewPrinter(flagOutput)
+ v := validator.NewWith(validator.Options{
+ BinPath: findPchaind(),
+ HomeDir: cfg.HomeDir,
+ ChainID: cfg.ChainID,
+ Keyring: cfg.KeyringBackend,
+ GenesisDomain: cfg.GenesisDomain,
+ Denom: cfg.Denom,
+ })
+
+ maxRetries := 10
+ for tries := 0; tries < maxRetries; tries++ {
+ balCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ bal, err := v.Balance(balCtx, accountAddr)
+ cancel()
+
+ if err != nil {
+ fmt.Printf("โ ๏ธ Balance check failed: %v\n", err)
+ tries++
+ time.Sleep(2 * time.Second)
+ continue
+ }
+
+ balInt := new(big.Int)
+ balInt.SetString(bal, 10)
+ reqInt := new(big.Int)
+ reqInt.SetString(requiredBalance, 10)
+
+ if balInt.Cmp(reqInt) >= 0 {
+ fmt.Println(p.Colors.Success("โ
Sufficient balance"))
+ fmt.Println()
+ return true
+ }
+
+ // Convert balance to PC for display (1 PC = 1e18 upc)
+ pcAmount := "0.000000"
+ if bal != "0" {
+ balFloat, _ := new(big.Float).SetString(bal)
+ divisor := new(big.Float).SetFloat64(1e18)
+ result := new(big.Float).Quo(balFloat, divisor)
+ pcAmount = fmt.Sprintf("%.6f", result)
+ }
+
+ // Convert required to PC for display
+ reqFloat, _ := new(big.Float).SetString(requiredBalance)
+ divisor := new(big.Float).SetFloat64(1e18)
+ reqPC := new(big.Float).Quo(reqFloat, divisor)
+ reqPCStr := fmt.Sprintf("%.6f", reqPC)
+
+ // Display funding information with address
+ fmt.Println()
+ p.KeyValueLine("Current Balance", pcAmount+" PC", "yellow")
+ p.KeyValueLine("Required for "+operationName, reqPCStr+" PC", "yellow")
+ fmt.Println()
+ if evmAddr != "" {
+ p.KeyValueLine("Send funds to", evmAddr, "blue")
+ fmt.Println()
+ }
+ fmt.Printf("Please send at least %s to your account for %s.\n\n", p.Colors.Warning(reqPCStr+" PC"), operationName)
+ fmt.Printf("Use faucet at %s for testnet validators\n", p.Colors.Info("https://faucet.push.org"))
+ fmt.Printf("or contact us at %s\n\n", p.Colors.Info("push.org/support"))
+
+ // Wait for user to press Enter
+ if !flagNonInteractive {
+ savedStdin := os.Stdin
+ var tty *os.File
+ if !term.IsTerminal(int(savedStdin.Fd())) {
+ if t, err := os.OpenFile("/dev/tty", os.O_RDONLY, 0); err == nil {
+ tty = t
+ os.Stdin = t
+ }
+ }
+ if tty != nil {
+ defer func() {
+ os.Stdin = savedStdin
+ tty.Close()
+ }()
+ }
+
+ reader := bufio.NewReader(os.Stdin)
+ fmt.Print(p.Colors.Apply(p.Colors.Theme.Prompt, "Press ENTER after funding..."))
+ _, _ = reader.ReadString('\n')
+ fmt.Println()
+ }
+ }
+
+ // After max retries, give up
+ fmt.Println(p.Colors.Error("โ Unable to verify sufficient balance after multiple attempts"))
+ fmt.Println()
+ return false
+}
diff --git a/push-validator-manager/cmd/push-validator/main.go b/push-validator-manager/cmd/push-validator/main.go
new file mode 100644
index 00000000..6e7a95fd
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/main.go
@@ -0,0 +1,3 @@
+package main
+
+func main() { Execute() }
diff --git a/push-validator-manager/cmd/push-validator/root_cobra.go b/push-validator-manager/cmd/push-validator/root_cobra.go
new file mode 100644
index 00000000..82ab109a
--- /dev/null
+++ b/push-validator-manager/cmd/push-validator/root_cobra.go
@@ -0,0 +1,846 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+ "gopkg.in/yaml.v3"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/bootstrap"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/dashboard"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/exitcodes"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/metrics"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/process"
+ syncmon "github.com/pushchain/push-chain-node/push-validator-manager/internal/sync"
+ ui "github.com/pushchain/push-chain-node/push-validator-manager/internal/ui"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+)
+
+// Version information - set via -ldflags during build
+var (
+ Version = "dev"
+ Commit = "unknown"
+ BuildDate = "unknown"
+)
+
+// rootCmd wires the CLI surface using Cobra. Persistent flags are
+// applied to a loaded config in loadCfg(). Subcommands implement the
+// actual operations (init, start/stop, sync, status, etc.).
+var rootCmd = &cobra.Command{
+ Use: "push-validator",
+ Short: "Push Validator",
+ Long: "Manage a Push Chain validator node: init, start, status, sync, and admin tasks.",
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ // Initialize global UI config from flags after parsing but before command execution
+ ui.InitGlobal(ui.Config{
+ NoColor: flagNoColor,
+ NoEmoji: flagNoEmoji,
+ Yes: flagYes,
+ NonInteractive: flagNonInteractive,
+ Verbose: flagVerbose,
+ Quiet: flagQuiet,
+ Debug: flagDebug,
+ })
+ },
+}
+
+var (
+ flagHome string
+ flagBin string
+ flagRPC string
+ flagGenesis string
+ flagOutput string
+ flagVerbose bool
+ flagQuiet bool
+ flagDebug bool
+ flagNoColor bool
+ flagNoEmoji bool
+ flagYes bool
+ flagNonInteractive bool
+)
+
+func init() {
+ // Persistent flags to override defaults
+ rootCmd.PersistentFlags().StringVar(&flagHome, "home", "", "Node home directory (overrides env)")
+ rootCmd.PersistentFlags().StringVar(&flagBin, "bin", "", "Path to pchaind binary (overrides env)")
+ rootCmd.PersistentFlags().StringVar(&flagRPC, "rpc", "", "Local RPC base (http[s]://host:port)")
+ rootCmd.PersistentFlags().StringVar(&flagGenesis, "genesis-domain", "", "Genesis RPC domain or URL")
+ rootCmd.PersistentFlags().StringVarP(&flagOutput, "output", "o", "text", "Output format: json|yaml|text")
+ rootCmd.PersistentFlags().BoolVar(&flagVerbose, "verbose", false, "Verbose output")
+ rootCmd.PersistentFlags().BoolVarP(&flagQuiet, "quiet", "q", false, "Quiet mode: minimal output (suppresses extras)")
+ rootCmd.PersistentFlags().BoolVarP(&flagDebug, "debug", "d", false, "Debug output: extra diagnostic logs")
+ rootCmd.PersistentFlags().BoolVar(&flagNoColor, "no-color", false, "Disable ANSI colors")
+ rootCmd.PersistentFlags().BoolVar(&flagNoEmoji, "no-emoji", false, "Disable emoji output")
+ rootCmd.PersistentFlags().BoolVarP(&flagYes, "yes", "y", false, "Assume yes for all prompts")
+ rootCmd.PersistentFlags().BoolVar(&flagNonInteractive, "non-interactive", false, "Fail instead of prompting")
+
+ // Replace root help to present grouped, example-rich output.
+ rootCmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
+ // Help runs before PersistentPreRun, so manually configure colors
+ c := ui.NewColorConfig()
+ c.Enabled = c.Enabled && !flagNoColor
+ c.EmojiEnabled = c.EmojiEnabled && !flagNoEmoji
+ w := os.Stdout
+
+ // Header
+ fmt.Fprintln(w, c.Header(" Push Validator "))
+ fmt.Fprintln(w, c.Description("Manage a Push Chain validator node: init, start, status, sync, and admin tasks."))
+ fmt.Fprintln(w, c.Separator(50))
+ fmt.Fprintln(w)
+
+ // Usage
+ fmt.Fprintln(w, c.SubHeader("USAGE"))
+ fmt.Fprintf(w, " %s [flags]\n", "push-validator")
+ fmt.Fprintln(w)
+
+ // Quick Start
+ fmt.Fprintln(w, c.SubHeader("Quick Start"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator start", "Start the node process"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator status", "Show node/rpc/sync status"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator dashboard", "Live dashboard with metrics"))
+ fmt.Fprintln(w)
+
+ // Operations
+ fmt.Fprintln(w, c.SubHeader("Operations"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator stop", "Stop the node process"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator restart", "Restart the node process"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator logs", "Tail node logs"))
+ fmt.Fprintln(w)
+
+ // Validator
+ fmt.Fprintln(w, c.SubHeader("Validator"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator validators", "List validators (default pretty, --output json)"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator balance [address]", "Check account balance (defaults to KEY_NAME)"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator register-validator", "Register this node as a validator"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator increase-stake", "Increase validator stake"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator unjail", "Restore jailed validator to active status"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator withdraw-rewards", "Withdraw validator rewards and commission"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator restake", "Withdraw and restake all rewards"))
+ fmt.Fprintln(w)
+
+ // Maintenance
+ fmt.Fprintln(w, c.SubHeader("Maintenance"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator backup", "Create config/state backup archive"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator reset", "Reset chain data (keeps addr book)"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator full-reset", "โ ๏ธ Complete reset (deletes ALL keys and data)"))
+ fmt.Fprintln(w)
+
+ // Utilities
+ fmt.Fprintln(w, c.SubHeader("Utilities"))
+ fmt.Fprintln(w, c.FormatCommand("push-validator doctor", "Run diagnostic checks"))
+ fmt.Fprintln(w)
+ })
+
+ // status command (uses root --output)
+ var statusStrict bool
+ statusCmd := &cobra.Command{
+ Use: "status",
+ Short: "Show node status",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ cfg := loadCfg()
+ sup := process.New(cfg.HomeDir)
+ res := computeStatus(cfg, sup)
+
+ // Strict mode: exit non-zero if issues detected
+ if statusStrict && (res.Error != "" || !res.Running || res.CatchingUp || res.Peers == 0) {
+ // Still output the status before exiting
+ switch flagOutput {
+ case "json":
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", " ")
+ _ = enc.Encode(res)
+ case "yaml":
+ data, _ := yaml.Marshal(res)
+ fmt.Println(string(data))
+ case "text", "":
+ if !flagQuiet {
+ printStatusText(res)
+ }
+ }
+ return exitcodes.ValidationErr("node has issues")
+ }
+
+ switch flagOutput {
+ case "json":
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", " ")
+ return enc.Encode(res)
+ case "yaml":
+ data, err := yaml.Marshal(res)
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(data))
+ return nil
+ case "text", "":
+ if flagQuiet {
+ fmt.Printf("running=%v rpc=%v catching_up=%v height=%d\n", res.Running, res.RPCListening, res.CatchingUp, res.Height)
+ } else {
+ printStatusText(res)
+ }
+ return nil
+ default:
+ return fmt.Errorf("invalid --output: %s (use json|yaml|text)", flagOutput)
+ }
+ },
+ }
+ statusCmd.Flags().BoolVar(&statusStrict, "strict", false, "Exit non-zero if node has issues (not running, catching up, no peers, or errors)")
+ rootCmd.AddCommand(statusCmd)
+
+ // dashboard - interactive TUI for monitoring
+ rootCmd.AddCommand(createDashboardCmd())
+
+ // init (Cobra flags)
+ var initMoniker, initChainID, initSnapshotRPC string
+ initCmd := &cobra.Command{
+ Use: "init",
+ Short: "Initialize local node home",
+ Hidden: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ cfg := loadCfg()
+ p := getPrinter()
+ if initMoniker == "" {
+ initMoniker = getenvDefault("MONIKER", "push-validator")
+ }
+ if initChainID == "" {
+ initChainID = cfg.ChainID
+ }
+ if initSnapshotRPC == "" {
+ initSnapshotRPC = cfg.SnapshotRPC
+ }
+
+ // Create progress callback that shows init steps
+ progressCallback := func(msg string) {
+ if flagOutput != "json" {
+ fmt.Printf(" โ %s\n", msg)
+ }
+ }
+
+ svc := bootstrap.New()
+ if err := svc.Init(cmd.Context(), bootstrap.Options{
+ HomeDir: cfg.HomeDir,
+ ChainID: initChainID,
+ Moniker: initMoniker,
+ GenesisDomain: cfg.GenesisDomain,
+ BinPath: findPchaind(),
+ SnapshotRPCPrimary: initSnapshotRPC,
+ SnapshotRPCSecondary: "https://rpc-testnet-donut-node1.push.org",
+ Progress: progressCallback,
+ }); err != nil {
+ ui.PrintError(ui.ErrorMessage{
+ Problem: "Initialization failed",
+ Causes: []string{
+ "Network issue fetching genesis or status",
+ "Incorrect --genesis-domain or RPC unreachable",
+ "pchaind binary missing or not executable",
+ },
+ Actions: []string{
+ "Verify connectivity: curl https:///status",
+ "Set --genesis-domain to a working RPC host",
+ "Ensure pchaind is installed and in PATH or pass --bin",
+ },
+ Hints: []string{"push-validator validators --output json"},
+ })
+ return err
+ }
+ if flagOutput != "json" {
+ p.Success("โ Initialization complete")
+ }
+ return nil
+ },
+ }
+ initCmd.Flags().StringVar(&initMoniker, "moniker", "", "Validator moniker")
+ initCmd.Flags().StringVar(&initChainID, "chain-id", "", "Chain ID")
+ initCmd.Flags().StringVar(&initSnapshotRPC, "snapshot-rpc", "", "Snapshot RPC base URL")
+ rootCmd.AddCommand(initCmd)
+
+ // start (Cobra flags)
+ var startBin string
+ var startNoPrompt bool
+ startCmd := &cobra.Command{
+ Use: "start",
+ Short: "Start node",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ cfg := loadCfg()
+ p := getPrinter()
+
+ // Check if initialization is needed (genesis.json or validator keys missing)
+ genesisPath := filepath.Join(cfg.HomeDir, "config", "genesis.json")
+ privValKeyPath := filepath.Join(cfg.HomeDir, "config", "priv_validator_key.json")
+ nodeKeyPath := filepath.Join(cfg.HomeDir, "config", "node_key.json")
+
+ // Initialize if genesis OR validator keys are missing
+ // (needed for first-time setup and post-full-reset scenarios)
+ needsInit := false
+ if _, err := os.Stat(genesisPath); os.IsNotExist(err) {
+ needsInit = true
+ }
+ if _, err := os.Stat(privValKeyPath); os.IsNotExist(err) {
+ needsInit = true
+ }
+ if _, err := os.Stat(nodeKeyPath); os.IsNotExist(err) {
+ needsInit = true
+ }
+
+ if needsInit {
+ // Auto-initialize on first start
+ if flagOutput != "json" {
+ p.Info("Initializing node (first time)...")
+ fmt.Println()
+ }
+
+ // Create progress callback that shows init steps
+ progressCallback := func(msg string) {
+ if flagOutput != "json" {
+ fmt.Printf(" โ %s\n", msg)
+ }
+ }
+
+ svc := bootstrap.New()
+ if err := svc.Init(cmd.Context(), bootstrap.Options{
+ HomeDir: cfg.HomeDir,
+ ChainID: cfg.ChainID,
+ Moniker: getenvDefault("MONIKER", "push-validator"),
+ GenesisDomain: cfg.GenesisDomain,
+ BinPath: findPchaind(),
+ SnapshotRPCPrimary: cfg.SnapshotRPC,
+ SnapshotRPCSecondary: "https://rpc-testnet-donut-node1.push.org",
+ Progress: progressCallback,
+ }); err != nil {
+ ui.PrintError(ui.ErrorMessage{
+ Problem: "Initialization failed",
+ Causes: []string{
+ "Network issue fetching genesis or status",
+ "Incorrect genesis domain configuration",
+ "pchaind binary missing or not executable",
+ },
+ Actions: []string{
+ "Verify connectivity: curl https:///status",
+ "Check genesis domain in config",
+ "Ensure pchaind is installed and in PATH",
+ },
+ })
+ return err
+ }
+
+ if flagOutput != "json" {
+ fmt.Println()
+ p.Success("โ Initialization complete")
+ }
+ }
+
+ // Check if node is already running
+ sup := process.New(cfg.HomeDir)
+ isAlreadyRunning := sup.IsRunning()
+
+ if flagOutput != "json" {
+ if isAlreadyRunning {
+ if pid, ok := sup.PID(); ok {
+ p.Success(fmt.Sprintf("โ Node is running (PID: %d)", pid))
+ } else {
+ p.Success("โ Node is running")
+ }
+ } else {
+ p.Info("Starting node...")
+ }
+ }
+
+ // Continue with normal start
+ if startBin != "" {
+ os.Setenv("PCHAIND", startBin)
+ }
+ _, err := sup.Start(process.StartOpts{HomeDir: cfg.HomeDir, Moniker: os.Getenv("MONIKER"), BinPath: findPchaind()})
+ if err != nil {
+ ui.PrintError(ui.ErrorMessage{
+ Problem: "Failed to start node",
+ Causes: []string{
+ "Invalid home directory or permissions",
+ "pchaind not found or incompatible",
+ "Port already in use",
+ },
+ Actions: []string{
+ "Check: ls /config/genesis.json",
+ "Confirm pchaind version matches network",
+ "Verify ports 26656/26657 are available",
+ },
+ })
+ return err
+ }
+ if flagOutput == "json" {
+ p.JSON(map[string]any{"ok": true, "action": "start", "already_running": isAlreadyRunning})
+ } else {
+ if !isAlreadyRunning {
+ p.Success("โ Node started successfully")
+ }
+
+ // Check validator status and show appropriate next steps (skip if --no-prompt)
+ if !startNoPrompt {
+ fmt.Println()
+ if !handlePostStartFlow(cfg, &p) {
+ // If post-start flow fails, just continue (node is already started)
+ return nil
+ }
+ }
+ }
+ return nil
+ },
+ }
+ startCmd.Flags().StringVar(&startBin, "bin", "", "Path to pchaind binary")
+ startCmd.Flags().BoolVar(&startNoPrompt, "no-prompt", false, "Skip post-start prompts (for use in scripts)")
+ rootCmd.AddCommand(startCmd)
+
+ rootCmd.AddCommand(&cobra.Command{Use: "stop", Short: "Stop node", RunE: func(cmd *cobra.Command, args []string) error { return handleStop(process.New(loadCfg().HomeDir)) }})
+
+ var restartBin string
+ restartCmd := &cobra.Command{Use: "restart", Short: "Restart node", RunE: func(cmd *cobra.Command, args []string) error {
+ cfg := loadCfg()
+ p := getPrinter()
+ if restartBin != "" {
+ os.Setenv("PCHAIND", restartBin)
+ }
+ _, err := process.New(cfg.HomeDir).Restart(process.StartOpts{HomeDir: cfg.HomeDir, Moniker: os.Getenv("MONIKER"), BinPath: findPchaind()})
+ if err != nil {
+ ui.PrintError(ui.ErrorMessage{
+ Problem: "Failed to restart node",
+ Causes: []string{
+ "Process could not be stopped cleanly",
+ "Start preconditions failed (see start command)",
+ },
+ Actions: []string{
+ "Check logs: push-validator logs",
+ "Try: push-validator stop; then start",
+ },
+ })
+ return err
+ }
+ if flagOutput == "json" {
+ p.JSON(map[string]any{"ok": true, "action": "restart"})
+ } else {
+ p.Success("โ Node restarted")
+ fmt.Println()
+ fmt.Println(p.Colors.Info("Useful commands:"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator status"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " (check sync progress)"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator dashboard"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " (live dashboard)"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Command, " push-validator logs"))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " (view logs)"))
+ }
+ return nil
+ }}
+ restartCmd.Flags().StringVar(&restartBin, "bin", "", "Path to pchaind binary")
+ rootCmd.AddCommand(restartCmd)
+
+ rootCmd.AddCommand(&cobra.Command{Use: "logs", Short: "Tail node logs", RunE: func(cmd *cobra.Command, args []string) error { return handleLogs(process.New(loadCfg().HomeDir)) }})
+
+ rootCmd.AddCommand(&cobra.Command{Use: "reset", Short: "Reset chain data", RunE: func(cmd *cobra.Command, args []string) error {
+ return handleReset(loadCfg(), process.New(loadCfg().HomeDir))
+ }})
+ rootCmd.AddCommand(&cobra.Command{Use: "full-reset", Short: "Complete reset (deletes all keys and data)", RunE: func(cmd *cobra.Command, args []string) error {
+ return handleFullReset(loadCfg(), process.New(loadCfg().HomeDir))
+ }})
+ rootCmd.AddCommand(&cobra.Command{Use: "backup", Short: "Backup config and validator state", RunE: func(cmd *cobra.Command, args []string) error { return handleBackup(loadCfg()) }})
+ validatorsCmd := &cobra.Command{Use: "validators", Short: "List validators", RunE: func(cmd *cobra.Command, args []string) error {
+ return handleValidatorsWithFormat(loadCfg(), flagOutput == "json")
+ }}
+ rootCmd.AddCommand(validatorsCmd)
+ var balAddr string
+ balanceCmd := &cobra.Command{Use: "balance [address]", Short: "Show balance", Args: cobra.RangeArgs(0, 1), RunE: func(cmd *cobra.Command, args []string) error {
+ if balAddr != "" {
+ args = []string{balAddr}
+ }
+ return handleBalance(loadCfg(), args)
+ }}
+ balanceCmd.Flags().StringVar(&balAddr, "address", "", "Account address")
+ rootCmd.AddCommand(balanceCmd)
+ // register-validator: interactive flow with optional flag overrides
+ regCmd := &cobra.Command{Use: "register-validator", Short: "Register this node as validator", RunE: func(cmd *cobra.Command, args []string) error {
+ cfg := loadCfg()
+ handleRegisterValidator(cfg)
+ return nil
+ }}
+ regCmd.Flags().BoolVar(&flagRegisterCheckOnly, "check-only", false, "Exit after reporting validator registration status")
+ rootCmd.AddCommand(regCmd)
+
+ // unjail command
+ unjailCmd := &cobra.Command{
+ Use: "unjail",
+ Short: "Restore jailed validator to active status",
+ Long: "Unjail a validator that was temporarily jailed for downtime, restoring it to the active validator set",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ handleUnjail(loadCfg())
+ return nil
+ },
+ }
+ rootCmd.AddCommand(unjailCmd)
+
+ // withdraw-rewards command
+ withdrawRewardsCmd := &cobra.Command{
+ Use: "withdraw-rewards",
+ Aliases: []string{"withdraw", "claim-rewards"},
+ Short: "Withdraw validator rewards and commission",
+ Long: "Withdraw accumulated delegation rewards and optionally withdraw validator commission",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ handleWithdrawRewards(loadCfg())
+ return nil
+ },
+ }
+ rootCmd.AddCommand(withdrawRewardsCmd)
+
+ // increase-stake command
+ increaseStakeCmd := &cobra.Command{
+ Use: "increase-stake",
+ Short: "Increase validator stake",
+ Long: "Delegate additional tokens to increase your validator's stake and voting power",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ handleIncreaseStake(loadCfg())
+ return nil
+ },
+ }
+ rootCmd.AddCommand(increaseStakeCmd)
+
+ // restake command
+ restakeAllCmd := &cobra.Command{
+ Use: "restake",
+ Short: "Withdraw all rewards and restake them",
+ Long: "Automatically withdraw all rewards (commission and outstanding) and restake them to increase validator power",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ handleRestakeAll(loadCfg())
+ return nil
+ },
+ }
+ rootCmd.AddCommand(restakeAllCmd)
+
+ // completion and version
+ rootCmd.AddCommand(&cobra.Command{Use: "completion [bash|zsh|fish|powershell]", Short: "Generate shell completion", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error {
+ switch args[0] {
+ case "bash":
+ return rootCmd.GenBashCompletion(os.Stdout)
+ case "zsh":
+ return rootCmd.GenZshCompletion(os.Stdout)
+ case "fish":
+ return rootCmd.GenFishCompletion(os.Stdout, true)
+ case "powershell":
+ return rootCmd.GenPowerShellCompletionWithDesc(os.Stdout)
+ default:
+ return fmt.Errorf("unknown shell: %s", args[0])
+ }
+ }})
+ // version command with semantic versioning
+ versionCmd := &cobra.Command{
+ Use: "version",
+ Short: "Show version",
+ Run: func(cmd *cobra.Command, args []string) {
+ switch flagOutput {
+ case "json":
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", " ")
+ enc.Encode(map[string]string{
+ "version": Version,
+ "commit": Commit,
+ "build_date": BuildDate,
+ })
+ case "yaml":
+ data, _ := yaml.Marshal(map[string]string{
+ "version": Version,
+ "commit": Commit,
+ "build_date": BuildDate,
+ })
+ fmt.Println(string(data))
+ default:
+ fmt.Printf("push-validator %s (%s) built %s\n", Version, Commit, BuildDate)
+ }
+ },
+ }
+ rootCmd.AddCommand(versionCmd)
+}
+
+func Execute() {
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(exitcodes.CodeForError(err))
+ }
+}
+
+// loadCfg reads defaults + env via internal/config.Load() and then
+// applies overrides from persistent flags (home, bin, rpc, domain).
+func loadCfg() config.Config {
+ cfg := config.Load()
+ if flagHome != "" {
+ cfg.HomeDir = flagHome
+ }
+ if flagRPC != "" {
+ cfg.RPCLocal = flagRPC
+ }
+ if flagGenesis != "" {
+ cfg.GenesisDomain = flagGenesis
+ }
+ if flagBin != "" {
+ os.Setenv("PCHAIND", flagBin)
+ }
+ return cfg
+}
+
+// handlePostStartFlow manages the post-start flow based on validator status.
+// Returns false if an error occurred (non-fatal), true if flow completed successfully.
+func handlePostStartFlow(cfg config.Config, p *ui.Printer) bool {
+ // First, check if the node is still syncing using comprehensive sync check
+ // (same logic as dashboard/status to ensure accuracy)
+ fmt.Println(p.Colors.Info("โธ Checking Sync Status"))
+
+ collector := metrics.NewWithoutCPU()
+ syncCtx, syncCancel := context.WithTimeout(context.Background(), 5*time.Second)
+ snap := collector.Collect(syncCtx, "http://127.0.0.1:26657", cfg.GenesisDomain)
+ syncCancel()
+
+ // Consider synced only if:
+ // 1. CatchingUp is false AND
+ // 2. Local height is within 5 blocks of remote height (or remote height unavailable)
+ const syncTolerance = 5
+ isSyncing := snap.Chain.CatchingUp ||
+ (snap.Chain.RemoteHeight > 0 && snap.Chain.LocalHeight < snap.Chain.RemoteHeight-syncTolerance)
+
+ // DEBUG: Log sync status if verbose
+ if flagVerbose {
+ fmt.Printf("[DEBUG] Sync Check: CatchingUp=%v, LocalHeight=%d, RemoteHeight=%d, IsSyncing=%v\n",
+ snap.Chain.CatchingUp, snap.Chain.LocalHeight, snap.Chain.RemoteHeight, isSyncing)
+ }
+
+ if isSyncing {
+ // Node is still syncing - wait for sync to complete before validator checks
+ fmt.Println(p.Colors.Info(" โธ Node is syncing with the network..."))
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Description, " Waiting for sync to complete...\n"))
+ fmt.Println(p.Colors.Info("โธ Monitoring Sync Progress"))
+
+ // Wait for sync to complete using sync monitor
+ sup := process.New(cfg.HomeDir)
+ remoteURL := "https://" + strings.TrimSuffix(cfg.GenesisDomain, "/") + ":443"
+
+ if err := syncmon.Run(context.Background(), syncmon.Options{
+ LocalRPC: "http://127.0.0.1:26657",
+ RemoteRPC: remoteURL,
+ LogPath: sup.LogPath(),
+ Window: 30,
+ Compact: false,
+ Out: os.Stdout,
+ Interval: 120 * time.Millisecond,
+ Quiet: flagQuiet,
+ Debug: flagDebug,
+ StuckTimeout: 2 * time.Minute,
+ }); err != nil {
+ // Sync failed or stuck - show warning and dashboard
+ fmt.Println()
+ fmt.Println(p.Colors.Warning(" โ Sync monitoring error (will retry in dashboard)"))
+ showDashboardPrompt(cfg, p)
+ return false
+ }
+
+ // Sync complete - fall through to validator checks
+ fmt.Println()
+ } else {
+ // Node is already synced - show success message
+ fmt.Println(p.Colors.Success(" โ Node is synced"))
+ }
+
+ // Node is synced (or sync check failed) - proceed with validator checks
+ // Check if already a validator
+ v := validator.NewWith(validator.Options{
+ BinPath: findPchaind(),
+ HomeDir: cfg.HomeDir,
+ ChainID: cfg.ChainID,
+ Keyring: cfg.KeyringBackend,
+ GenesisDomain: cfg.GenesisDomain,
+ Denom: cfg.Denom,
+ })
+
+ // Show status checking message
+ fmt.Println(p.Colors.Info("โธ Checking Validator Status"))
+
+ statusCtx, statusCancel := context.WithTimeout(context.Background(), 10*time.Second)
+ isValidator, err := v.IsValidator(statusCtx, "")
+ statusCancel()
+
+ if err != nil {
+ // If we can't check status, show warning but continue to dashboard
+ fmt.Println(p.Colors.Warning(" โ Could not verify validator status (will retry in dashboard)"))
+ showDashboardPrompt(cfg, p)
+ return false
+ }
+
+ if isValidator {
+ // Already a validator - show success and dashboard
+ fmt.Println(p.Colors.Success(" โ Registered as validator"))
+ showDashboardPrompt(cfg, p)
+ return true
+ }
+
+ // Not a validator - show registration prompt
+ fmt.Println(p.Colors.Warning(" โ Not registered as validator"))
+ fmt.Println()
+
+ // Check if we're in an interactive terminal
+ if !isTerminalInteractive() {
+ // Non-interactive - show next steps for scripts/CI
+ fmt.Println("Next steps to register as validator:")
+ fmt.Println("1. Get test tokens: https://faucet.push.org")
+ fmt.Println("2. Check balance: push-validator balance")
+ fmt.Println("3. Register: push-validator register-validator")
+ fmt.Println()
+ showDashboardPrompt(cfg, p)
+ return true
+ }
+
+ // Interactive prompt - use /dev/tty to avoid buffering os.Stdin
+ // This ensures stdin remains clean for subsequent log UI raw mode
+ fmt.Print("Register as validator now? (y/N) ")
+
+ ttyFile, err := os.OpenFile("/dev/tty", os.O_RDONLY, 0)
+ var response string
+ if err == nil {
+ reader := bufio.NewReader(ttyFile)
+ line, readErr := reader.ReadString('\n')
+ ttyFile.Close()
+ if readErr != nil {
+ // Error reading input - show dashboard
+ fmt.Println()
+ showDashboardPrompt(cfg, p)
+ return false
+ }
+ response = strings.ToLower(strings.TrimSpace(line))
+ } else {
+ // Fallback to stdin if /dev/tty unavailable
+ reader := bufio.NewReader(os.Stdin)
+ line, readErr := reader.ReadString('\n')
+ if readErr != nil {
+ fmt.Println()
+ showDashboardPrompt(cfg, p)
+ return false
+ }
+ response = strings.ToLower(strings.TrimSpace(line))
+ }
+
+ if response == "y" || response == "yes" {
+ // User wants to register
+ fmt.Println()
+ handleRegisterValidator(cfg)
+ fmt.Println()
+ } else {
+ // User declined - show them the steps to do it manually
+ fmt.Println()
+ fmt.Println("Next steps to register as validator:")
+ fmt.Println("1. Get test tokens: https://faucet.push.org")
+ fmt.Println("2. Check balance: push-validator balance")
+ fmt.Println("3. Register: push-validator register-validator")
+ fmt.Println()
+ }
+
+ // Always show dashboard at the end
+ showDashboardPrompt(cfg, p)
+ return true
+}
+
+// handleDashboard launches the interactive dashboard
+func handleDashboard(cfg config.Config) error {
+ opts := dashboard.Options{
+ Config: cfg,
+ RefreshInterval: 3 * time.Second,
+ RPCTimeout: 5 * time.Second,
+ NoColor: flagNoColor,
+ NoEmoji: flagNoEmoji,
+ Debug: false,
+ }
+ return runDashboardInteractive(opts)
+}
+
+// showDashboardPrompt displays a prompt asking user to press ENTER to launch dashboard
+// Always shows the prompt, but handles timeouts gracefully in non-interactive environments
+// Follows the install flow pattern with clear before/after messages
+func showDashboardPrompt(cfg config.Config, p *ui.Printer) {
+ fmt.Println()
+ fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
+ fmt.Println("โ DASHBOARD AVAILABLE โ")
+ fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
+ fmt.Println()
+ fmt.Println(" The node is running in the background.")
+ fmt.Println(" Press ENTER to open the interactive dashboard (or Ctrl+C to skip)")
+ fmt.Println(" Note: The node will continue running in the background.")
+ fmt.Println()
+ fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
+ fmt.Print("Press ENTER to continue to the dashboard... ")
+
+ // Try /dev/tty first (best for interactive terminals)
+ ttyFile, err := os.OpenFile("/dev/tty", os.O_RDONLY, 0)
+ if err == nil {
+ // TTY available - wait for ENTER indefinitely (user is present)
+ reader := bufio.NewReader(ttyFile)
+ _, readErr := reader.ReadString('\n')
+ ttyFile.Close()
+ if readErr != nil {
+ // Ctrl+C or error
+ fmt.Println()
+ fmt.Println(" Dashboard skipped. Node is running in background.")
+ fmt.Println()
+ return
+ }
+ // User pressed ENTER - launch dashboard
+ fmt.Println()
+ _ = handleDashboard(cfg)
+
+ // After dashboard exit, show status
+ fmt.Println()
+ fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
+ fmt.Println(p.Colors.Success("โ Dashboard closed. Node is still running in background."))
+ fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
+ fmt.Println()
+ return
+ }
+
+ // No TTY available - try stdin with timeout (for scripts/CI/non-interactive)
+ done := make(chan bool, 1)
+ go func() {
+ reader := bufio.NewReader(os.Stdin)
+ _, _ = reader.ReadString('\n')
+ done <- true
+ }()
+
+ select {
+ case <-done:
+ // Got input - launch dashboard
+ fmt.Println()
+ _ = handleDashboard(cfg)
+
+ // After dashboard exit, show status
+ fmt.Println()
+ fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
+ fmt.Println(p.Colors.Success("โ Dashboard closed. Node is still running in background."))
+ fmt.Println("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
+ fmt.Println()
+ case <-time.After(2 * time.Second):
+ // Timeout - likely script/CI with no input available
+ fmt.Println()
+ fmt.Println(" Dashboard is available - run: push-validator dashboard")
+ fmt.Println()
+ }
+}
+
+// isTerminalInteractive checks if we're running in an interactive terminal
+func isTerminalInteractive() bool {
+ // Check stdin is a terminal
+ if !term.IsTerminal(int(os.Stdin.Fd())) {
+ return false
+ }
+ // Check stdout is a terminal
+ if !term.IsTerminal(int(os.Stdout.Fd())) {
+ return false
+ }
+ return true
+}
diff --git a/push-validator-manager/go.mod b/push-validator-manager/go.mod
new file mode 100644
index 00000000..a863d715
--- /dev/null
+++ b/push-validator-manager/go.mod
@@ -0,0 +1,51 @@
+module github.com/pushchain/push-chain-node/push-validator-manager
+
+go 1.23.0
+
+toolchain go1.23.11
+
+require (
+ github.com/cespare/xxhash/v2 v2.3.0
+ github.com/charmbracelet/bubbles v0.21.0
+ github.com/charmbracelet/bubbletea v1.3.4
+ github.com/charmbracelet/lipgloss v1.1.0
+ github.com/gorilla/websocket v1.5.1
+ github.com/nxadm/tail v1.4.11
+ github.com/shirou/gopsutil/v3 v3.24.5
+ github.com/spf13/cobra v1.8.0
+ golang.org/x/term v0.13.0
+ gopkg.in/yaml.v3 v3.0.1
+)
+
+require (
+ github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
+ github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
+ github.com/charmbracelet/x/ansi v0.8.0 // indirect
+ github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
+ github.com/charmbracelet/x/term v0.2.1 // indirect
+ github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
+ github.com/fsnotify/fsnotify v1.6.0 // indirect
+ github.com/go-ole/go-ole v1.2.6 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
+ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-localereader v0.0.1 // indirect
+ github.com/mattn/go-runewidth v0.0.16 // indirect
+ github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
+ github.com/muesli/cancelreader v0.2.2 // indirect
+ github.com/muesli/termenv v0.16.0 // indirect
+ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/shoenig/go-m1cpu v0.1.6 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ github.com/tklauser/go-sysconf v0.3.12 // indirect
+ github.com/tklauser/numcpus v0.6.1 // indirect
+ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
+ github.com/yusufpapurcu/wmi v1.2.4 // indirect
+ golang.org/x/net v0.17.0 // indirect
+ golang.org/x/sync v0.11.0 // indirect
+ golang.org/x/sys v0.30.0 // indirect
+ golang.org/x/text v0.13.0 // indirect
+ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+)
diff --git a/push-validator-manager/go.sum b/push-validator-manager/go.sum
new file mode 100644
index 00000000..9496abf2
--- /dev/null
+++ b/push-validator-manager/go.sum
@@ -0,0 +1,110 @@
+github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
+github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
+github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8=
+github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs=
+github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg=
+github.com/charmbracelet/bubbletea v1.3.4 h1:kCg7B+jSCFPLYRA52SDZjr51kG/fMUEoPoZrkaDHyoI=
+github.com/charmbracelet/bubbletea v1.3.4/go.mod h1:dtcUCyCGEX3g9tosuYiut3MXgY/Jsv9nKVdibKKRRXo=
+github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
+github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
+github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
+github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
+github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE=
+github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q=
+github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8=
+github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
+github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ=
+github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
+github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
+github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
+github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
+github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
+github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
+github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
+github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
+github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
+github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
+github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
+github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
+github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
+github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
+github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
+github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
+github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
+github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
+github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
+github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
+github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
+github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
+github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
+github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
+github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
+github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
+github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
+github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
+github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E=
+golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
+golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/push-validator-manager/install.sh b/push-validator-manager/install.sh
new file mode 100644
index 00000000..22a821ce
--- /dev/null
+++ b/push-validator-manager/install.sh
@@ -0,0 +1,1237 @@
+#!/usr/bin/env bash
+# Push Validator Manager (Go) โ Installer with local/clone build + guided start
+# Examples:
+# bash install.sh # default: reset data, build if needed, init+start, wait for sync
+# bash install.sh --no-reset --no-start # install only
+# bash install.sh --use-local # use current repo checkout to build
+# PNM_REF=feature/pnm bash install.sh # clone specific ref (branch/tag)
+
+set -euo pipefail
+IFS=$'\n\t'
+
+# Styling and output functions
+CYAN='\033[0;36m'; GREEN='\033[0;32m'; YELLOW='\033[0;33m'; RED='\033[0;31m'; BOLD='\033[1m'; DIM='\033[2m'; NC='\033[0m'
+NO_COLOR="${NO_COLOR:-}"
+VERBOSE="${VERBOSE:-no}"
+
+# Disable colors if NO_COLOR is set or not a terminal
+if [[ -n "$NO_COLOR" ]] || [[ ! -t 1 ]]; then
+ CYAN=''; GREEN=''; YELLOW=''; RED=''; BOLD=''; DIM=''; NC=''
+fi
+
+status() { echo -e "${CYAN}$*${NC}"; }
+ok() {
+ if [[ $PHASE_START_TIME -gt 0 ]]; then
+ local delta=$(($(date +%s) - PHASE_START_TIME))
+ local unit="s"
+ local time_val=$delta
+ # Show milliseconds for sub-second times
+ if [[ $delta -eq 0 ]]; then
+ time_val="<1"
+ unit="s"
+ fi
+ echo -e " ${GREEN}โ $* (${time_val}${unit})${NC}"
+ else
+ echo -e " ${GREEN}โ $*${NC}"
+ fi
+}
+warn() { echo -e " ${YELLOW}โ $*${NC}"; }
+err() { echo -e " ${RED}โ $*${NC}"; }
+phase() { echo -e "\n${BOLD}${CYAN}โธ $*${NC}"; }
+step() { echo -e " ${DIM}โ${NC} $*"; }
+verbose() { [[ "$VERBOSE" = "yes" ]] && echo -e " ${DIM}$*${NC}" || true; }
+
+# Helper: Indent output lines (adds 2-space prefix)
+indent_output() {
+ while IFS= read -r line; do
+ if [[ -n "$line" ]]; then
+ echo " $line"
+ else
+ echo ""
+ fi
+ done
+}
+
+# Helper: Find timeout command (macOS needs gtimeout)
+timeout_cmd() {
+ if command -v timeout >/dev/null 2>&1; then
+ echo "timeout"
+ elif command -v gtimeout >/dev/null 2>&1; then
+ echo "gtimeout"
+ else
+ echo ""
+ fi
+}
+
+# Helper: Check if node is running
+node_running() {
+ local TO; TO=$(timeout_cmd)
+ local status_json
+ if [[ -n "$TO" ]]; then
+ status_json=$($TO 2 "$MANAGER_BIN" status --output json 2>/dev/null || echo "{}")
+ else
+ status_json=$("$MANAGER_BIN" status --output json 2>/dev/null || echo "{}")
+ fi
+
+ if command -v jq >/dev/null 2>&1; then
+ echo "$status_json" | jq -er '.node.running // .running // false' >/dev/null 2>&1 && return 0 || return 1
+ else
+ echo "$status_json" | grep -q '"running"[[:space:]]*:[[:space:]]*true' && return 0 || return 1
+ fi
+}
+
+# Helper: Check if current node consensus key already exists in validator set
+node_is_validator() {
+ local result
+ if ! result=$("$MANAGER_BIN" register-validator --check-only --output json 2>/dev/null); then
+ return 1
+ fi
+ if command -v jq >/dev/null 2>&1; then
+ local flag
+ flag=$(echo "$result" | jq -r '.registered // false' 2>/dev/null || echo "false")
+ [[ "$flag" == "true" ]] && return 0 || return 1
+ else
+ echo "$result" | grep -q '"registered"[[:space:]]*:[[:space:]]*true' && return 0 || return 1
+ fi
+}
+
+# Helper: Print useful commands
+print_useful_cmds() {
+ echo
+ echo "Useful commands:"
+ echo " push-validator status # Check node status"
+ echo " push-validator logs # View logs"
+ echo " push-validator stop # Stop the node"
+ echo " push-validator restart # Restart the node"
+ echo " push-validator register-validator # Register as validator"
+ echo
+}
+
+# Helper: Prompt for user confirmation
+prompt_yes_no() {
+ local prompt="$1"
+ local default="${2:-n}"
+ local response
+
+ if [[ "$default" == "y" ]]; then
+ echo -n "$prompt [Y/n]: "
+ else
+ echo -n "$prompt [y/N]: "
+ fi
+
+ read -r response
+ response=${response:-$default}
+
+ case "$response" in
+ [yY][eE][sS]|[yY]) return 0 ;;
+ *) return 1 ;;
+ esac
+}
+
+# Helper: Update shell profile with Go PATH
+update_shell_profile() {
+ local go_install_dir="$1"
+ local profile_updated=0
+
+ # Detect shell and profile files
+ local shell_name="${SHELL##*/}"
+ local profile_files=()
+
+ case "$shell_name" in
+ bash)
+ profile_files=("$HOME/.bashrc" "$HOME/.bash_profile" "$HOME/.profile")
+ ;;
+ zsh)
+ profile_files=("$HOME/.zshrc" "$HOME/.zprofile")
+ ;;
+ *)
+ profile_files=("$HOME/.profile" "$HOME/.bashrc")
+ ;;
+ esac
+
+ local go_path_line="export PATH=\"$go_install_dir/bin:\$PATH\""
+
+ for profile in "${profile_files[@]}"; do
+ if [[ -f "$profile" ]]; then
+ # Check if Go path already exists
+ if ! grep -q "$go_install_dir/bin" "$profile" 2>/dev/null; then
+ echo "" >> "$profile"
+ echo "# Added by Push Chain installer" >> "$profile"
+ echo "$go_path_line" >> "$profile"
+ profile_updated=1
+ verbose "Updated $profile with Go PATH"
+ break
+ else
+ verbose "Go PATH already in $profile"
+ profile_updated=1 # Mark as updated since PATH already exists
+ break
+ fi
+ fi
+ done
+
+ # Create .profile if no profile exists
+ if [[ $profile_updated -eq 0 ]] && [[ ${#profile_files[@]} -gt 0 ]]; then
+ local default_profile="${profile_files[0]}"
+ echo "" >> "$default_profile"
+ echo "# Added by Push Chain installer" >> "$default_profile"
+ echo "$go_path_line" >> "$default_profile"
+ verbose "Created $default_profile with Go PATH"
+ fi
+
+ # Export for current session
+ export PATH="$go_install_dir/bin:$PATH"
+}
+
+# Helper: Install Go automatically
+install_go() {
+ local go_version="1.23.3"
+ local arch
+ local os="linux"
+ local download_url
+ local install_dir
+ local use_sudo=0
+ local temp_dir
+
+ # Detect architecture
+ case "$(uname -m)" in
+ x86_64|amd64)
+ arch="amd64"
+ ;;
+ aarch64|arm64)
+ arch="arm64"
+ ;;
+ armv7l)
+ arch="armv6l"
+ ;;
+ *)
+ err "Unsupported architecture: $(uname -m)"
+ return 1
+ ;;
+ esac
+
+ # Detect OS (already set to linux, but keeping for future macOS support)
+ if [[ "$OSTYPE" == "darwin"* ]]; then
+ os="darwin"
+ fi
+
+ download_url="https://go.dev/dl/go${go_version}.${os}-${arch}.tar.gz"
+
+ # Determine installation directory and sudo requirement
+ if [[ -w "/usr/local" ]]; then
+ install_dir="/usr/local"
+ use_sudo=0
+ elif command -v sudo >/dev/null 2>&1 && sudo -n true 2>/dev/null; then
+ install_dir="/usr/local"
+ use_sudo=1
+ else
+ # Install to user's home directory
+ install_dir="$HOME/.local"
+ use_sudo=0
+ mkdir -p "$install_dir"
+ fi
+
+ phase "Installing Go ${go_version}"
+
+ # Check if Go already exists at target location
+ if [[ -d "$install_dir/go" ]]; then
+ step "Backing up existing Go installation"
+ if [[ $use_sudo -eq 1 ]]; then
+ sudo mv "$install_dir/go" "$install_dir/go.backup.$(date +%s)" || true
+ else
+ mv "$install_dir/go" "$install_dir/go.backup.$(date +%s)" || true
+ fi
+ fi
+
+ # Create temp directory for download
+ temp_dir=$(mktemp -d)
+ trap "rm -rf '$temp_dir'" EXIT
+
+ step "Downloading Go ${go_version} for ${os}/${arch}"
+ if command -v curl >/dev/null 2>&1; then
+ curl -L --progress-bar -o "$temp_dir/go.tar.gz" "$download_url" || {
+ err "Failed to download Go"
+ return 1
+ }
+ elif command -v wget >/dev/null 2>&1; then
+ wget --show-progress -O "$temp_dir/go.tar.gz" "$download_url" || {
+ err "Failed to download Go"
+ return 1
+ }
+ else
+ err "Neither curl nor wget found. Cannot download Go."
+ return 1
+ fi
+
+ step "Extracting Go to $install_dir"
+ if [[ $use_sudo -eq 1 ]]; then
+ sudo tar -C "$install_dir" -xzf "$temp_dir/go.tar.gz" || {
+ err "Failed to extract Go"
+ return 1
+ }
+ else
+ tar -C "$install_dir" -xzf "$temp_dir/go.tar.gz" || {
+ err "Failed to extract Go"
+ return 1
+ }
+ fi
+
+ # Update PATH for current session and shell profile
+ step "Updating PATH environment"
+ update_shell_profile "$install_dir/go"
+
+ # Verify installation
+ if "$install_dir/go/bin/go" version >/dev/null 2>&1; then
+ local installed_version
+ installed_version=$("$install_dir/go/bin/go" version | awk '{print $3}')
+ ok "Go installed successfully: $installed_version"
+ echo
+ echo -e "${GREEN}Go has been installed to: $install_dir/go${NC}"
+ echo -e "${YELLOW}Note: You may need to restart your shell or run: source ~/.bashrc${NC}"
+ echo
+ trap - EXIT # Clear the EXIT trap before returning
+ return 0
+ else
+ err "Go installation verification failed"
+ trap - EXIT # Clear the EXIT trap before returning
+ return 1
+ fi
+}
+
+clean_data_and_preserve_keys() {
+ local mode="$1"
+ local suffix="${2:-1}"
+
+ local wallet_backup
+ local validator_backup
+
+ wallet_backup=$(mktemp -d 2>/dev/null || echo "/tmp/pchain-wallet-backup-$$-$suffix")
+ validator_backup=$(mktemp -d 2>/dev/null || echo "/tmp/pchain-validator-backup-$$-$suffix")
+
+ if [[ "$mode" == "initial" ]]; then
+ if [[ -d "$HOME_DIR" ]]; then
+ step "Backing up wallet keys (your account credentials)"
+ mkdir -p "$wallet_backup"
+ local backed_wallet=0
+ for keyring_dir in "$HOME_DIR"/keyring-*; do
+ if [[ -d "$keyring_dir" ]]; then
+ cp -r "$keyring_dir" "$wallet_backup/" 2>/dev/null || true
+ backed_wallet=1
+ fi
+ done
+ if [[ $backed_wallet -eq 1 ]]; then
+ ok "Wallets backed up"
+ fi
+ fi
+
+ if [[ -d "$HOME_DIR/config" ]]; then
+ step "Backing up validator keys"
+ mkdir -p "$validator_backup"
+ cp "$HOME_DIR/config/priv_validator_key.json" "$validator_backup/" 2>/dev/null || true
+ cp "$HOME_DIR/config/node_key.json" "$validator_backup/" 2>/dev/null || true
+ if [[ -n "$(ls -A "$validator_backup" 2>/dev/null)" ]]; then
+ ok "Validator keys backed up"
+ fi
+ fi
+ else
+ step "Backing up wallet and validator keys"
+ mkdir -p "$wallet_backup" "$validator_backup"
+ for keyring_dir in "$HOME_DIR"/keyring-*; do
+ if [[ -d "$keyring_dir" ]]; then
+ cp -r "$keyring_dir" "$wallet_backup/" 2>/dev/null || true
+ fi
+ done
+ cp "$HOME_DIR/config/priv_validator_key.json" "$validator_backup/" 2>/dev/null || true
+ cp "$HOME_DIR/config/node_key.json" "$validator_backup/" 2>/dev/null || true
+ ok "Keys backed up to temporary location"
+ fi
+
+ if [[ "$mode" == "initial" ]]; then
+ step "Removing old installation"
+ rm -rf "$ROOT_DIR" 2>/dev/null || true
+ rm -rf "$HOME_DIR/data" 2>/dev/null || true
+ rm -f "$HOME_DIR/pchaind.pid" 2>/dev/null || true
+ rm -f "$MANAGER_BIN" 2>/dev/null || true
+ rm -f "$INSTALL_BIN_DIR/pchaind" 2>/dev/null || true
+ rm -f "$HOME_DIR/.initial_state_sync" 2>/dev/null || true
+
+ rm -f "$HOME_DIR/config/config.toml" 2>/dev/null || true
+ rm -f "$HOME_DIR/config/app.toml" 2>/dev/null || true
+ rm -f "$HOME_DIR/config/addrbook.json" 2>/dev/null || true
+ rm -f "$HOME_DIR/config/genesis.json" 2>/dev/null || true
+ rm -f "$HOME_DIR/config/config.toml."*.bak 2>/dev/null || true
+ else
+ step "Cleaning all chain data (fixing potential corruption)"
+ rm -rf "$HOME_DIR/data" 2>/dev/null || true
+ rm -f "$HOME_DIR/.initial_state_sync" 2>/dev/null || true
+ mkdir -p "$HOME_DIR/data"
+ echo '{"height":"0","round":0,"step":0}' > "$HOME_DIR/data/priv_validator_state.json"
+ fi
+
+ if [[ "$mode" == "initial" ]]; then
+ if [[ -d "$wallet_backup" && -n "$(ls -A "$wallet_backup" 2>/dev/null)" ]]; then
+ step "Restoring wallets"
+ mkdir -p "$HOME_DIR"
+ cp -r "$wallet_backup"/. "$HOME_DIR/" 2>/dev/null || true
+ ok "Wallets restored"
+ fi
+ if [[ -d "$validator_backup" && -n "$(ls -A "$validator_backup" 2>/dev/null)" ]]; then
+ step "Restoring validator keys"
+ mkdir -p "$HOME_DIR/config"
+ cp -r "$validator_backup"/. "$HOME_DIR/config/" 2>/dev/null || true
+ ok "Validator keys restored"
+ fi
+ else
+ step "Restoring wallet and validator keys"
+ mkdir -p "$HOME_DIR" "$HOME_DIR/config"
+ cp -r "$wallet_backup"/. "$HOME_DIR/" 2>/dev/null || true
+ cp "$validator_backup"/priv_validator_key.json "$HOME_DIR/config/" 2>/dev/null || true
+ cp "$validator_backup"/node_key.json "$HOME_DIR/config/" 2>/dev/null || true
+ echo "$(date -u +%Y-%m-%dT%H:%M:%SZ)" > "$HOME_DIR/.initial_state_sync"
+ ok "Keys restored successfully"
+ fi
+
+ rm -rf "$wallet_backup" "$validator_backup" 2>/dev/null || true
+
+ if [[ "$mode" == "retry" ]]; then
+ ok "Data cleaned, ready for fresh sync"
+ fi
+}
+
+# Phase tracking with timing
+INSTALL_START_TIME=$(date +%s)
+PHASE_NUM=0
+TOTAL_PHASES=6 # Will be adjusted based on what's needed
+PHASE_START_TIME=0
+next_phase() {
+ ((++PHASE_NUM)) # Use pre-increment to avoid returning 0 with set -e
+ PHASE_START_TIME=$(date +%s)
+ phase "[$PHASE_NUM/$TOTAL_PHASES] $1"
+}
+
+# Script location (works when piped or invoked directly)
+if [ -n "${BASH_SOURCE+x}" ]; then SCRIPT_SOURCE="${BASH_SOURCE[0]}"; else SCRIPT_SOURCE="$0"; fi
+SELF_DIR="$(cd -- "$(dirname -- "$SCRIPT_SOURCE")" >/dev/null 2>&1 && pwd -P || pwd)"
+
+# Defaults (overridable via env)
+MONIKER="${MONIKER:-push-validator}"
+GENESIS_DOMAIN="${GENESIS_DOMAIN:-rpc-testnet-donut-node1.push.org}"
+KEYRING_BACKEND="${KEYRING_BACKEND:-test}"
+CHAIN_ID="${CHAIN_ID:-push_42101-1}"
+SNAPSHOT_RPC="${SNAPSHOT_RPC:-https://rpc-testnet-donut-node2.push.org}"
+RESET_DATA="${RESET_DATA:-yes}"
+AUTO_START="${AUTO_START:-yes}"
+PNM_REF="${PNM_REF:-feature/pnm}"
+BIN_DIR="${BIN_DIR:-$HOME/.local/bin}"
+PREFIX="${PREFIX:-}"
+
+# Flags
+USE_LOCAL="no"
+LOCAL_REPO=""
+PCHAIND="${PCHAIND:-}"
+PCHAIND_REF="${PCHAIND_REF:-}"
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --no-start) AUTO_START="no"; shift ;;
+ --start) AUTO_START="yes"; shift ;;
+ --no-reset) RESET_DATA="no"; shift ;;
+ --reset) RESET_DATA="yes"; shift ;;
+ --verbose) VERBOSE="yes"; shift ;;
+ --no-color) NO_COLOR="1"; shift ;;
+ --bin-dir) BIN_DIR="$2"; shift 2 ;;
+ --prefix) PREFIX="$2"; shift 2 ;;
+ --moniker) MONIKER="$2"; shift 2 ;;
+ --genesis) GENESIS_DOMAIN="$2"; shift 2 ;;
+ --keyring) KEYRING_BACKEND="$2"; shift 2 ;;
+ --chain-id) CHAIN_ID="$2"; shift 2 ;;
+ --snapshot-rpc) SNAPSHOT_RPC="$2"; shift 2 ;;
+ --pchaind-ref) PCHAIND_REF="$2"; shift 2 ;;
+ --use-local) USE_LOCAL="yes"; shift ;;
+ --local-repo) LOCAL_REPO="$2"; shift 2 ;;
+ --help)
+ echo "Push Validator Manager (Go) - Installer"
+ echo
+ echo "Usage: bash install.sh [OPTIONS]"
+ echo
+ echo "Installation Options:"
+ echo " --use-local Use current repository checkout to build"
+ echo " --local-repo DIR Use specific local repository directory"
+ echo " --bin-dir DIR Install binaries to DIR (default: ~/.local/bin)"
+ echo " --prefix DIR Use DIR as installation prefix (sets data dir)"
+ echo
+ echo "Node Configuration:"
+ echo " --moniker NAME Set validator moniker (default: push-validator)"
+ echo " --chain-id ID Set chain ID (default: push_42101-1)"
+ echo " --genesis DOMAIN Genesis domain (default: rpc-testnet-donut-node1.push.org)"
+ echo " --snapshot-rpc URL Snapshot RPC URL (default: https://rpc-testnet-donut-node2.push.org)"
+ echo " --keyring BACKEND Keyring backend (default: test)"
+ echo
+ echo "Build Options:"
+ echo " --pchaind-ref REF Build pchaind from specific git ref/branch/tag"
+ echo
+ echo "Behavior Options:"
+ echo " --reset Reset all data (default)"
+ echo " --no-reset Keep existing data"
+ echo " --start Start node after installation (default)"
+ echo " --no-start Install only, don't start"
+ echo
+ echo "Output Options:"
+ echo " --verbose Show verbose output"
+ echo " --no-color Disable colored output"
+ echo
+ echo "Environment Variables:"
+ echo " NO_COLOR Set to disable colors"
+ echo " VERBOSE Set to 'yes' for verbose output"
+ echo " PNM_REF Git ref for push-validator-manager (default: feature/pnm)"
+ echo " PCHAIND_REF Git ref for pchaind binary"
+ echo
+ echo "Examples:"
+ echo " bash install.sh --use-local --verbose"
+ echo " bash install.sh --no-reset --no-start"
+ echo " bash install.sh --bin-dir /usr/local/bin --prefix /opt/pchain"
+ echo " PNM_REF=main bash install.sh"
+ exit 0
+ ;;
+ *) err "Unknown flag: $1 (use --help for usage)"; exit 2 ;;
+ esac
+done
+
+# Paths
+if [[ -n "$PREFIX" ]]; then
+ ROOT_DIR="$PREFIX/share/push-validator"
+ INSTALL_BIN_DIR="$PREFIX/bin" # --prefix overrides BIN_DIR for relocatable installs
+ HOME_DIR="${HOME_DIR:-$PREFIX/data}"
+else
+ if [[ -n "${XDG_DATA_HOME:-}" ]]; then ROOT_DIR="$XDG_DATA_HOME/push-validator"; else ROOT_DIR="$HOME/.local/share/push-validator"; fi
+ INSTALL_BIN_DIR="$BIN_DIR"
+ HOME_DIR="${HOME_DIR:-$HOME/.pchain}"
+fi
+REPO_DIR="$ROOT_DIR/repo"
+MANAGER_BIN="$INSTALL_BIN_DIR/push-validator"
+
+# Detect what phases are needed BEFORE creating directories
+HAS_RUNNING_NODE="no"
+HAS_EXISTING_INSTALL="no"
+
+# Check if node is running or processes exist
+if [[ -x "$MANAGER_BIN" ]] && command -v "$MANAGER_BIN" >/dev/null 2>&1; then
+ # Manager exists, check if node is actually running via status
+ STATUS_JSON=$("$MANAGER_BIN" status --output json 2>/dev/null || echo "{}")
+ if echo "$STATUS_JSON" | grep -q '"running"[[:space:]]*:[[:space:]]*true'; then
+ HAS_RUNNING_NODE="yes"
+ fi
+elif pgrep -x pchaind >/dev/null 2>&1 || pgrep -x push-validator >/dev/null 2>&1; then
+ HAS_RUNNING_NODE="yes"
+fi
+
+# Check if installation exists (check for actual installation artifacts, not just config)
+if [[ -d "$ROOT_DIR" ]] || [[ -x "$MANAGER_BIN" ]]; then
+ HAS_EXISTING_INSTALL="yes"
+elif [[ -d "$HOME_DIR/data" ]] && [[ -n "$(ls -A "$HOME_DIR/data" 2>/dev/null)" ]]; then
+ # Only count as existing if data directory has content
+ HAS_EXISTING_INSTALL="yes"
+fi
+
+mkdir -p "$ROOT_DIR" "$INSTALL_BIN_DIR"
+
+verbose "Installation paths:"
+verbose " Root dir: $ROOT_DIR"
+verbose " Bin dir: $INSTALL_BIN_DIR"
+verbose " Home dir: $HOME_DIR"
+
+# Check git (simple, always needed)
+if ! command -v git >/dev/null 2>&1; then
+ err "Missing dependency: git"
+ echo
+ echo "Git is required to clone the repository."
+ echo
+ if [[ "$OSTYPE" == "darwin"* ]]; then
+ echo "Install with: brew install git"
+ echo "Or download from: https://git-scm.com/downloads"
+ elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
+ if command -v apt-get >/dev/null 2>&1; then
+ echo "Install with: sudo apt-get install git"
+ elif command -v yum >/dev/null 2>&1; then
+ echo "Install with: sudo yum install git"
+ else
+ echo "Install using your package manager or download from: https://git-scm.com/downloads"
+ fi
+ else
+ echo "Download from: https://git-scm.com/downloads"
+ fi
+ exit 1
+fi
+
+# Check Go with automatic installation option
+GO_NEEDS_INSTALL=0
+GO_NEEDS_UPGRADE=0
+
+if ! command -v go >/dev/null 2>&1; then
+ GO_NEEDS_INSTALL=1
+ warn "Go is not installed"
+else
+ # Validate Go version (requires 1.23+ for pchaind build)
+ GO_VERSION=$(go version | awk '{print $3}' | sed 's/go//')
+ GO_MAJOR=$(echo "$GO_VERSION" | cut -d. -f1)
+ GO_MINOR=$(echo "$GO_VERSION" | cut -d. -f2)
+
+ if [[ "$GO_MAJOR" -lt 1 ]] || [[ "$GO_MAJOR" -eq 1 && "$GO_MINOR" -lt 23 ]]; then
+ GO_NEEDS_UPGRADE=1
+ warn "Go version too old: $GO_VERSION (need 1.23+)"
+ else
+ verbose "Go version check passed: $GO_VERSION"
+ fi
+fi
+
+# Handle Go installation/upgrade if needed
+if [[ $GO_NEEDS_INSTALL -eq 1 ]] || [[ $GO_NEEDS_UPGRADE -eq 1 ]]; then
+ echo
+ if [[ $GO_NEEDS_INSTALL -eq 1 ]]; then
+ echo -e "${BOLD}Go 1.23 or higher is required to build the Push Chain binary.${NC}"
+ else
+ echo -e "${BOLD}Your Go version is too old. Go 1.23+ is required.${NC}"
+ fi
+ echo
+
+ # Check if we're in non-interactive mode (CI/automation)
+ if [[ ! -t 0 ]] || [[ "${CI:-false}" == "true" ]] || [[ "${NON_INTERACTIVE:-false}" == "true" ]]; then
+ echo "Running in non-interactive mode. Attempting automatic Go installation..."
+ if install_go; then
+ # Re-check Go after installation
+ if ! command -v go >/dev/null 2>&1; then
+ # Try with the newly installed path
+ if [[ -f "$HOME/.local/go/bin/go" ]]; then
+ export PATH="$HOME/.local/go/bin:$PATH"
+ elif [[ -f "/usr/local/go/bin/go" ]]; then
+ export PATH="/usr/local/go/bin:$PATH"
+ fi
+ fi
+
+ # Verify installation worked
+ if command -v go >/dev/null 2>&1; then
+ GO_VERSION=$(go version | awk '{print $3}' | sed 's/go//')
+ verbose "Go successfully installed: $GO_VERSION"
+ else
+ err "Go installation completed but 'go' command not found in PATH"
+ echo "Please add Go to your PATH and run the installer again."
+ exit 1
+ fi
+ else
+ err "Automatic Go installation failed"
+ echo
+ echo "Please install Go manually:"
+ echo " โข Download from: https://go.dev/dl/"
+ echo " โข Or use your package manager (ensure version 1.23+)"
+ exit 1
+ fi
+ else
+ # Interactive mode - prompt user
+ if prompt_yes_no "Would you like to install Go 1.23.3 automatically?" "y"; then
+ if install_go; then
+ # Re-check Go after installation
+ if ! command -v go >/dev/null 2>&1; then
+ # Try with the newly installed path
+ if [[ -f "$HOME/.local/go/bin/go" ]]; then
+ export PATH="$HOME/.local/go/bin:$PATH"
+ elif [[ -f "/usr/local/go/bin/go" ]]; then
+ export PATH="/usr/local/go/bin:$PATH"
+ fi
+ fi
+
+ # Verify installation worked
+ if command -v go >/dev/null 2>&1; then
+ GO_VERSION=$(go version | awk '{print $3}' | sed 's/go//')
+ verbose "Go successfully installed: $GO_VERSION"
+ else
+ err "Go installation completed but 'go' command not found in PATH"
+ echo "Please add Go to your PATH and run the installer again."
+ exit 1
+ fi
+ else
+ err "Go installation failed"
+ echo
+ echo "Please install Go manually and run this installer again."
+ echo "Download from: https://go.dev/dl/"
+ exit 1
+ fi
+ else
+ echo
+ echo "Manual installation required. Please install Go 1.23+ from:"
+ echo " โข Download: https://go.dev/dl/"
+ if [[ "$OSTYPE" == "darwin"* ]]; then
+ echo " โข Using Homebrew: brew install go"
+ elif command -v apt-get >/dev/null 2>&1; then
+ echo " โข Using apt: sudo apt-get install golang-go (check version)"
+ elif command -v yum >/dev/null 2>&1; then
+ echo " โข Using yum: sudo yum install golang (check version)"
+ fi
+ echo
+ echo "After installing Go, run this installer again."
+ exit 1
+ fi
+ fi
+fi
+
+# Ensure Go is accessible after installation (refresh command cache)
+if ! command -v go >/dev/null 2>&1; then
+ if [[ -f "/usr/local/go/bin/go" ]]; then
+ export PATH="/usr/local/go/bin:$PATH"
+ elif [[ -f "$HOME/.local/go/bin/go" ]]; then
+ export PATH="$HOME/.local/go/bin:$PATH"
+ fi
+ hash -r 2>/dev/null || true # Refresh bash command cache
+fi
+
+# Optional dependencies (warn if missing, fallbacks exist)
+if ! command -v jq >/dev/null 2>&1; then
+ warn "jq not found; JSON parsing will be less robust (using grep fallback)"
+fi
+TO_CMD=$(timeout_cmd)
+if [[ -z "$TO_CMD" ]]; then
+ warn "timeout/gtimeout not found; RPC checks may block longer than expected"
+fi
+
+# Store environment info (will print after manager is built)
+OS_NAME=$(uname -s | tr '[:upper:]' '[:lower:]')
+OS_ARCH=$(uname -m)
+GO_VER=$(go version | awk '{print $3}' | sed 's/go//')
+TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
+
+# Calculate total phases needed (detection already done above before mkdir)
+TOTAL_PHASES=4 # Base: Install Manager, Build Chain, Init, Start
+if [[ "$HAS_RUNNING_NODE" = "yes" ]]; then
+ ((TOTAL_PHASES++)) # Add stopping phase
+fi
+if [[ "$RESET_DATA" = "yes" ]] && [[ "$HAS_EXISTING_INSTALL" = "yes" ]]; then
+ ((TOTAL_PHASES++)) # Add cleaning phase
+fi
+
+verbose "Phases needed: $TOTAL_PHASES (running=$HAS_RUNNING_NODE, existing=$HAS_EXISTING_INSTALL)"
+
+# Print installation banner
+echo
+echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
+echo "โ PUSH VALIDATOR MANAGER INSTALLATION โ"
+echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
+echo
+
+# Stop any running processes first (only if needed)
+if [[ "$HAS_RUNNING_NODE" = "yes" ]]; then
+ next_phase "Stopping Validator Processes"
+if [[ -x "$MANAGER_BIN" ]]; then
+ step "Stopping manager gracefully"
+ "$MANAGER_BIN" stop >/dev/null 2>&1 || true
+ sleep 2
+fi
+# Kill any remaining pchaind processes
+step "Cleaning up remaining processes"
+
+# Try graceful PID-based approach first
+if [[ -x "$MANAGER_BIN" ]]; then
+ TO_CMD=$(timeout_cmd)
+ if [[ -n "$TO_CMD" ]]; then
+ STATUS_JSON=$($TO_CMD 5 "$MANAGER_BIN" status --output json 2>/dev/null || echo "{}")
+ else
+ STATUS_JSON=$("$MANAGER_BIN" status --output json 2>/dev/null || echo "{}")
+ fi
+ if command -v jq >/dev/null 2>&1; then
+ PID=$(echo "$STATUS_JSON" | jq -r '.node.pid // .pid // empty' 2>/dev/null)
+ else
+ PID=$(echo "$STATUS_JSON" | grep -o '"pid"[[:space:]]*:[[:space:]]*[0-9]*' | grep -o '[0-9]*$')
+ fi
+ if [[ -n "$PID" && "$PID" =~ ^[0-9]+$ ]]; then
+ kill -TERM "$PID" 2>/dev/null || true
+ sleep 1
+ kill -KILL "$PID" 2>/dev/null || true
+ fi
+fi
+
+# Fallback: use pkill with exact name matching (POSIX-portable)
+pkill -x pchaind 2>/dev/null || true
+pkill -x push-validator 2>/dev/null || true
+sleep 1
+ok "Stopped all validator processes"
+else
+ verbose "No running processes to stop (skipped)"
+fi
+
+# Clean install: remove all previous installation artifacts (preserve wallets and validator keys)
+NEED_INIT="no" # Track if we need to force init
+if [[ "$RESET_DATA" = "yes" ]] && [[ "$HAS_EXISTING_INSTALL" = "yes" ]]; then
+ next_phase "Cleaning Installation"
+ clean_data_and_preserve_keys "initial" "init"
+ NEED_INIT="yes" # Force init after full reset
+ ok "Clean installation ready"
+elif [[ "$RESET_DATA" = "yes" ]]; then
+ verbose "Fresh installation detected (skipped cleanup)"
+ NEED_INIT="yes" # Force init for fresh installations too
+else
+ verbose "Skipping data reset (--no-reset)"
+fi
+
+next_phase "Installing Validator Manager"
+verbose "Target directory: $ROOT_DIR"
+
+# Determine repo source
+if [[ "$USE_LOCAL" = "yes" || -n "$LOCAL_REPO" ]]; then
+ if [[ -n "$LOCAL_REPO" ]]; then REPO_DIR="$(cd "$LOCAL_REPO" && pwd -P)"; else REPO_DIR="$(cd "$SELF_DIR/.." && pwd -P)"; fi
+ step "Using local repository: $REPO_DIR"
+ if [[ ! -f "$REPO_DIR/push-validator-manager/go.mod" ]]; then
+ err "Expected Go module not found at: $REPO_DIR/push-validator-manager"; exit 1
+ fi
+else
+ rm -rf "$REPO_DIR"
+ step "Cloning push-chain-node (ref: $PNM_REF)"
+ git clone --quiet --depth 1 --branch "$PNM_REF" https://github.com/pushchain/push-chain-node "$REPO_DIR"
+fi
+
+# Build manager from source (ensures latest + no external runtime deps)
+if [[ ! -d "$REPO_DIR/push-validator-manager" ]]; then
+ err "Expected directory missing: $REPO_DIR/push-validator-manager"
+ warn "The cloned ref ('$PNM_REF') may not include the Go module yet."
+ # Suggest local usage if available
+ LOCAL_CANDIDATE="$(cd "$SELF_DIR/.." 2>/dev/null && pwd -P || true)"
+ if [[ -n "$LOCAL_CANDIDATE" && -d "$LOCAL_CANDIDATE/push-validator-manager" ]]; then
+ warn "Try: bash push-validator-manager/install.sh --use-local"
+ fi
+ warn "Or specify a branch/tag that contains it: PNM_REF=feature/pnm bash push-validator-manager/install.sh"
+ exit 1
+fi
+
+# Check if already up-to-date (idempotent install)
+SKIP_BUILD=no
+if [[ -x "$MANAGER_BIN" ]]; then
+ CURRENT_COMMIT=$(cd "$REPO_DIR/push-validator-manager" && git rev-parse --short HEAD 2>/dev/null || echo "unknown")
+ # Extract commit from version output (format: "push-validator vX.Y.Z (1f599bd) built ...")
+ INSTALLED_COMMIT=$("$MANAGER_BIN" version 2>/dev/null | sed -n 's/.*(\([0-9a-f]\{7,\}\)).*/\1/p')
+ # Only skip build if both are valid hex commits and match
+ if [[ "$CURRENT_COMMIT" =~ ^[0-9a-f]+$ ]] && [[ "$INSTALLED_COMMIT" =~ ^[0-9a-f]+$ ]] && [[ "$CURRENT_COMMIT" == "$INSTALLED_COMMIT" ]]; then
+ step "Manager already up-to-date ($CURRENT_COMMIT) - skipped"
+ SKIP_BUILD=yes
+ fi
+fi
+
+if [[ "$SKIP_BUILD" = "no" ]]; then
+ step "Building Push Validator Manager binary"
+ pushd "$REPO_DIR/push-validator-manager" >/dev/null
+
+ # Build version information
+ VERSION=${VERSION:-$(git describe --tags --always --dirty 2>/dev/null || echo "v1.0.0")}
+ COMMIT=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
+ BUILD_DATE=$(date -u '+%Y-%m-%dT%H:%M:%SZ')
+ LDFLAGS="-X main.Version=$VERSION -X main.Commit=$COMMIT -X main.BuildDate=$BUILD_DATE"
+
+ GOFLAGS="-trimpath" CGO_ENABLED=0 go build -mod=mod -ldflags="$LDFLAGS" -o "$MANAGER_BIN" ./cmd/push-validator
+ popd >/dev/null
+ chmod +x "$MANAGER_BIN"
+
+ # Compute and display SHA256
+ if command -v sha256sum >/dev/null 2>&1; then
+ MANAGER_SHA=$(sha256sum "$MANAGER_BIN" 2>/dev/null | awk '{print $1}')
+ elif command -v shasum >/dev/null 2>&1; then
+ MANAGER_SHA=$(shasum -a 256 "$MANAGER_BIN" 2>/dev/null | awk '{print $1}')
+ fi
+ if [[ -n "$MANAGER_SHA" ]]; then
+ SHA_SHORT="${MANAGER_SHA:0:8}...${MANAGER_SHA: -8}"
+ ok "Built push-validator (SHA256: $SHA_SHORT)"
+ else
+ ok "Built push-validator"
+ fi
+fi
+
+ok "Manager installed: $MANAGER_BIN"
+
+# Print environment banner now that manager is built
+MANAGER_VER_BANNER="dev unknown"
+if [[ -x "$MANAGER_BIN" ]]; then
+ # Parse full version output: "push-validator v1.0.0 (abc1234) built 2025-01-08"
+ MANAGER_FULL=$("$MANAGER_BIN" version 2>/dev/null || echo "unknown")
+ if [[ "$MANAGER_FULL" != "unknown" ]]; then
+ MANAGER_VER_BANNER=$(echo "$MANAGER_FULL" | awk '{print $2, $3}' | sed 's/[()]//g')
+ fi
+fi
+echo
+
+# Ensure PATH for current session
+case ":$PATH:" in *":$INSTALL_BIN_DIR:"*) : ;; *) export PATH="$INSTALL_BIN_DIR:$PATH" ;; esac
+
+# Persist PATH in common shell config files (idempotent - won't add duplicates)
+SHELL_CONFIG=""
+if [[ -f "$HOME/.zshrc" ]]; then SHELL_CONFIG="$HOME/.zshrc"; elif [[ -f "$HOME/.bashrc" ]]; then SHELL_CONFIG="$HOME/.bashrc"; elif [[ -f "$HOME/.bash_profile" ]]; then SHELL_CONFIG="$HOME/.bash_profile"; fi
+if [[ -n "$SHELL_CONFIG" ]]; then
+ # Check if PATH already contains this directory in an export statement
+ if ! grep -E "^[[:space:]]*export[[:space:]]+PATH=.*$INSTALL_BIN_DIR" "$SHELL_CONFIG" >/dev/null 2>&1; then
+ echo "" >> "$SHELL_CONFIG"
+ echo "# Push Validator Manager (Go)" >> "$SHELL_CONFIG"
+ echo "export PATH=\"$INSTALL_BIN_DIR:\$PATH\"" >> "$SHELL_CONFIG"
+ fi
+fi
+
+next_phase "Building Chain Binary"
+
+# Build or select pchaind (prefer locally built binary to match network upgrades)
+BUILD_SCRIPT="$REPO_DIR/push-validator-manager/scripts/build-pchaind.sh"
+if [[ -f "$BUILD_SCRIPT" ]]; then
+ step "Building Push Chain binary (Push Node Daemon) from source"
+ # Build from repo (whether local or cloned)
+ BUILD_OUTPUT="$REPO_DIR/push-validator-manager/scripts/build"
+ if bash "$BUILD_SCRIPT" "$REPO_DIR" "$BUILD_OUTPUT"; then
+ if [[ -f "$BUILD_OUTPUT/pchaind" ]]; then
+ mkdir -p "$INSTALL_BIN_DIR"
+ ln -sf "$BUILD_OUTPUT/pchaind" "$INSTALL_BIN_DIR/pchaind"
+ export PCHAIND="$INSTALL_BIN_DIR/pchaind"
+
+ # Get binary version
+ BINARY_VERSION=$("$BUILD_OUTPUT/pchaind" version 2>&1 | head -1 || echo "")
+ if [[ -n "$BINARY_VERSION" ]]; then
+ ok "Push Chain binary ready ($BINARY_VERSION)"
+ else
+ ok "Push Chain binary ready"
+ fi
+ else
+ warn "Build completed but binary not found at expected location"
+ fi
+ else
+ warn "Build failed; trying fallback options"
+ fi
+fi
+
+# Final fallback to system pchaind
+if [[ -z "$PCHAIND" || ! -f "$PCHAIND" ]]; then
+ if command -v pchaind >/dev/null 2>&1; then
+ step "Using system Push Node Daemon binary"
+ export PCHAIND="$(command -v pchaind)"
+ ok "Found existing Push Node Daemon: $PCHAIND"
+ else
+ err "Push Node Daemon (pchaind) not found"
+ err "Build failed and no system binary available"
+ err "Please ensure the build script works or install manually"
+ exit 1
+ fi
+fi
+
+verbose "Using built-in WebSocket monitor (no external dependency)"
+
+if [[ "$AUTO_START" = "yes" ]]; then
+ next_phase "Initializing Node"
+ # Initialize if: forced by reset, or config/genesis missing
+ if [[ "${NEED_INIT:-no}" = "yes" ]] || [[ ! -f "$HOME_DIR/config/config.toml" ]] || [[ ! -f "$HOME_DIR/config/genesis.json" ]]; then
+ step "Configuring node"
+ "$MANAGER_BIN" init \
+ --moniker "$MONIKER" \
+ --home "$HOME_DIR" \
+ --chain-id "$CHAIN_ID" \
+ --genesis-domain "$GENESIS_DOMAIN" \
+ --snapshot-rpc "$SNAPSHOT_RPC" \
+ --bin "${PCHAIND:-pchaind}" || { err "init failed"; exit 1; }
+ ok "Node initialized"
+ else
+ step "Configuration exists, skipping init"
+ fi
+
+ next_phase "Starting and Syncing Node"
+ MAX_RETRIES=5
+ RETRY_COUNT=0
+ SYNC_RC=0
+
+ while true; do
+ if [[ $RETRY_COUNT -eq 0 ]]; then
+ step "Starting Push Chain validator node"
+ else
+ step "Restarting node (attempt $((RETRY_COUNT + 1))/$((MAX_RETRIES + 1)))"
+ fi
+
+ "$MANAGER_BIN" start --no-prompt --home "$HOME_DIR" --bin "${PCHAIND:-pchaind}" 2>&1 | indent_output || { err "start failed"; exit 1; }
+
+ step "Waiting for state sync"
+ # Stream compact sync until fully synced (monitor prints snapshot/block progress)
+ set +e
+ "$MANAGER_BIN" sync --compact --window 30 --rpc "http://127.0.0.1:26657" --remote "https://$GENESIS_DOMAIN:443" --skip-final-message
+ SYNC_RC=$?
+ set -e
+
+ if [[ $SYNC_RC -eq 0 ]]; then
+ echo
+ sleep 5
+ echo -e " ${GREEN}โ Sync complete! Node is fully synced.${NC}"
+ break
+ fi
+
+ if [[ $SYNC_RC -eq 42 ]]; then
+ ((RETRY_COUNT++))
+
+ if [[ $RETRY_COUNT -gt $MAX_RETRIES ]]; then
+ echo
+ err "Sync failed after $MAX_RETRIES retry attempts"
+ echo
+ echo "The sync process repeatedly got stuck or encountered errors."
+ echo
+ echo "Common causes:"
+ echo " โข Network connectivity issues"
+ echo " โข State sync snapshot corruption (app hash mismatch)"
+ echo " โข RPC server temporarily unavailable"
+ echo " โข Insufficient peers for sync"
+ echo
+ echo "Troubleshooting steps:"
+ echo " 1. Check network: curl https://$GENESIS_DOMAIN/status"
+ echo " 2. Verify peers: push-validator status"
+ echo " 3. Check logs: tail -100 $HOME_DIR/logs/pchaind.log"
+ echo
+ echo "If issues persist:"
+ echo " โข Discord: https://discord.com/invite/pushchain"
+ echo " โข Support: https://push.org/support/"
+ echo
+ "$MANAGER_BIN" stop >/dev/null 2>&1 || true
+ exit 1
+ fi
+
+ warn "Sync stuck or failed. Performing full data reset (attempt $RETRY_COUNT/$MAX_RETRIES)..."
+ echo
+
+ step "Stopping node"
+ "$MANAGER_BIN" stop >/dev/null 2>&1 || true
+ sleep 2
+ pkill -x pchaind 2>/dev/null || true
+ pkill -x push-validator 2>/dev/null || true
+ clean_data_and_preserve_keys "retry" "$RETRY_COUNT"
+ echo
+ continue
+ fi
+
+ warn "Sync monitoring ended with code $SYNC_RC (not a stuck condition, skipping retry)"
+ "$MANAGER_BIN" stop >/dev/null 2>&1 || true
+ break
+ done
+
+ if [[ $SYNC_RC -ne 0 ]]; then
+ err "Sync failed with exit code $SYNC_RC"
+ exit $SYNC_RC
+ fi
+
+ # Detect whether a controlling TTY is available for prompts/log view
+ INTERACTIVE="no"
+ if [[ -t 0 ]] && [[ -t 1 ]]; then
+ INTERACTIVE="yes"
+ elif [[ -e /dev/tty ]]; then
+ INTERACTIVE="yes"
+ fi
+
+ REGISTRATION_STATUS="skipped"
+
+ ALREADY_VALIDATOR="no"
+ if node_is_validator; then
+ ALREADY_VALIDATOR="yes"
+ REGISTRATION_STATUS="already"
+ fi
+
+ # Prompt for validator registration if not already registered
+ if [[ "$INTERACTIVE" == "yes" ]] && [[ "$ALREADY_VALIDATOR" == "no" ]]; then
+ echo
+ echo "Next steps to become a validator:"
+ echo "1. Get test tokens from: https://faucet.push.org"
+ echo "2. Register as validator with the command below"
+ echo
+ fi
+
+ # Guard registration prompt in non-interactive mode
+ if [[ "$ALREADY_VALIDATOR" == "yes" ]]; then
+ RESP="N"
+ else
+ if [[ "$INTERACTIVE" == "yes" ]]; then
+ if [[ -e /dev/tty ]]; then
+ read -r -p "Register as a validator now? (y/N) " RESP < /dev/tty 2> /dev/tty || true
+ else
+ read -r -p "Register as a validator now? (y/N) " RESP || true
+ fi
+ else
+ RESP="N"
+ fi
+ fi
+ case "${RESP:-}" in
+ [Yy])
+ echo
+ echo "Push Validator Manager - Registration"
+ echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
+ # Run registration flow directly (CLI handles prompts and status checks)
+ if "$MANAGER_BIN" register-validator; then
+ REGISTRATION_STATUS="success"
+ else
+ REGISTRATION_STATUS="failed"
+ fi
+ ;;
+ *)
+ # Ensure clean separation before summary
+ echo
+ # Only mark as skipped if user declined and was not already a validator
+ if [[ "$ALREADY_VALIDATOR" != "yes" ]]; then
+ REGISTRATION_STATUS="skipped"
+ fi
+ ;;
+ esac
+fi
+
+# Calculate total time for summary
+INSTALL_END_TIME=$(date +%s)
+TOTAL_TIME=$((INSTALL_END_TIME - ${INSTALL_START_TIME:-$INSTALL_END_TIME}))
+
+# Get node information for unified summary
+MANAGER_VER=$("$MANAGER_BIN" version 2>/dev/null | awk '{print $2}' || echo "unknown")
+PCHAIND_PATH="${PCHAIND:-pchaind}"
+# Extract pchaind version if binary exists
+if command -v "$PCHAIND_PATH" >/dev/null 2>&1; then
+ CHAIN_VER=$("$PCHAIND_PATH" version 2>/dev/null | head -1 || echo "")
+ if [[ -n "$CHAIN_VER" ]]; then
+ PCHAIND_VER="$PCHAIND_PATH ($CHAIN_VER)"
+ else
+ PCHAIND_VER="$PCHAIND_PATH"
+ fi
+else
+ PCHAIND_VER="$PCHAIND_PATH"
+fi
+RPC_URL="http://127.0.0.1:26657"
+
+# Try to get Node status info
+TO_CMD=$(timeout_cmd)
+if [[ -n "$TO_CMD" ]]; then
+ STATUS_JSON=$($TO_CMD 5 "$MANAGER_BIN" status --output json 2>/dev/null || echo "{}")
+else
+ STATUS_JSON=$("$MANAGER_BIN" status --output json 2>/dev/null || echo "{}")
+fi
+if command -v jq >/dev/null 2>&1; then
+ NETWORK=$(echo "$STATUS_JSON" | jq -r '.network // .node.network // empty' 2>/dev/null)
+ MONIKER=$(echo "$STATUS_JSON" | jq -r '.moniker // .node.moniker // empty' 2>/dev/null)
+ SYNCED=$(echo "$STATUS_JSON" | jq -r '.synced // .node.synced // empty' 2>/dev/null)
+ PEERS=$(echo "$STATUS_JSON" | jq -r '.peers // .node.peers // empty' 2>/dev/null)
+else
+ NETWORK=$(echo "$STATUS_JSON" | grep -o '"network"[[:space:]]*:[[:space:]]*"[^"]*"' | sed 's/.*"network"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/')
+ MONIKER=$(echo "$STATUS_JSON" | grep -o '"moniker"[[:space:]]*:[[:space:]]*"[^"]*"' | sed 's/.*"moniker"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/')
+ SYNCED=$(echo "$STATUS_JSON" | grep -o '"synced"[[:space:]]*:[[:space:]]*"[^"]*"' | sed 's/.*"synced"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/')
+ PEERS=$(echo "$STATUS_JSON" | grep -o '"peers"[[:space:]]*:[[:space:]]*[0-9]*' | sed 's/.*"peers"[[:space:]]*:[[:space:]]*\([0-9]*\).*/\1/')
+fi
+
+# Determine node status indicator
+NODE_STATUS_ICON="โ๏ธ "
+if [[ "$SYNCED" == "true" ]]; then
+ NODE_STATUS_ICON="โ
"
+fi
+
+VALIDATOR_STATUS_ICON="โ"
+if [[ "$ALREADY_VALIDATOR" == "yes" ]]; then
+ VALIDATOR_STATUS_ICON="โ
"
+fi
+
+echo
+echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
+# Dynamic padding for Installation Complete header
+TEXT="INSTALLATION COMPLETE (${TOTAL_TIME}s)"
+TEXT_LEN=${#TEXT}
+BOX_WIDTH=63
+PADDING_LEFT=$(( (BOX_WIDTH - TEXT_LEN) / 2 ))
+PADDING_RIGHT=$(( BOX_WIDTH - TEXT_LEN - PADDING_LEFT ))
+printf "โ%*s%s%*sโ\n" $PADDING_LEFT "" "$TEXT" $PADDING_RIGHT ""
+echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
+echo
+echo " ๐ Node Status"
+echo " $NODE_STATUS_ICON Synced"
+echo " ๐ Peers: $PEERS"
+if [[ "$ALREADY_VALIDATOR" == "yes" ]]; then
+ echo " $VALIDATOR_STATUS_ICON Validator Registered"
+fi
+echo
+echo " โ๏ธ Configuration"
+if [[ -n "$NETWORK" ]]; then
+ echo " Network: $NETWORK"
+fi
+if [[ -n "$MONIKER" ]]; then
+ echo " Moniker: $MONIKER"
+fi
+echo " RPC: $RPC_URL"
+echo " Home: $HOME_DIR"
+echo
+echo " ๐ฆ Binaries"
+echo " Manager: $MANAGER_BIN ($MANAGER_VER)"
+echo " Chain: $PCHAIND_VER"
+echo
+echo " ๐ก Quick Commands"
+echo " โข Check status: push-validator status"
+echo " โข View dashboard: push-validator dashboard"
+echo " โข View logs: push-validator logs"
+echo " โข Stop node: push-validator stop"
+echo " โข Restart node: push-validator restart"
+if [[ "$ALREADY_VALIDATOR" == "no" ]]; then
+ echo " โข Register: push-validator register-validator"
+fi
+echo " โข All commands: push-validator help"
+echo
+
+if [[ "$INTERACTIVE" == "yes" ]]; then
+ # Interactive mode: show registration action status and pause before dashboard
+ echo
+ case "$REGISTRATION_STATUS" in
+ success)
+ ok "Validator registration completed"
+ ;;
+ failed)
+ warn "Validator registration encountered issues; check logs with: push-validator logs"
+ ;;
+ skipped)
+ warn "Validator registration was skipped"
+ echo "Run 'push-validator register-validator' when ready"
+ ;;
+ esac
+
+ if [[ "$REGISTRATION_STATUS" != "already" ]]; then
+ echo
+ fi
+
+ # Dashboard prompt with clear instructions and options
+ echo
+ echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
+ echo "โ DASHBOARD AVAILABLE โ"
+ echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
+ echo
+ echo " The node is running in the background."
+ echo " Press ENTER to open the interactive dashboard (or Ctrl+C to skip)"
+ echo " Note: The node will continue running in the background."
+ echo
+ echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
+ # Read from /dev/tty to work correctly when script is piped (e.g., curl | bash)
+ if [[ -e /dev/tty ]]; then
+ read -r -p "Press ENTER to continue to the dashboard... " < /dev/tty 2> /dev/tty || {
+ echo
+ echo " Dashboard skipped. Node is running in background."
+ echo
+ exit 0
+ }
+ else
+ # Fallback if /dev/tty is not available (shouldn't happen on most systems)
+ read -r -p "Press ENTER to continue to the dashboard... " || {
+ echo
+ echo " Dashboard skipped. Node is running in background."
+ echo
+ exit 0
+ }
+ fi
+
+ echo
+ "$MANAGER_BIN" dashboard < /dev/tty > /dev/tty 2>&1 || true
+
+ # After dashboard exit, show clear status and next steps
+ echo
+ echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
+ if node_running; then
+ ok "Dashboard closed. Node is still running in background."
+ else
+ warn "Node is not running"
+ echo " Start it with: push-validator start"
+ fi
+ echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
+else
+ # Non-interactive mode: show status
+ echo
+ if node_running; then
+ ok "Node is running in background"
+ else
+ warn "Node is not running"
+ echo "Start it with: push-validator start"
+ fi
+fi
diff --git a/push-validator-manager/internal/admin/admin.go b/push-validator-manager/internal/admin/admin.go
new file mode 100644
index 00000000..9e8b32e9
--- /dev/null
+++ b/push-validator-manager/internal/admin/admin.go
@@ -0,0 +1,139 @@
+package admin
+
+import (
+ "archive/tar"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+type ResetOptions struct {
+ HomeDir string
+ BinPath string // pchaind path
+ KeepAddrBook bool
+}
+
+type FullResetOptions struct {
+ HomeDir string
+ BinPath string // pchaind path
+}
+
+type BackupOptions struct {
+ HomeDir string
+ OutDir string // if empty, defaults to /backups
+}
+
+// Reset clears ALL blockchain data while preserving validator keys and keyring.
+// This ensures clean state without AppHash errors while maintaining validator identity.
+func Reset(opts ResetOptions) error {
+ if opts.HomeDir == "" { return fmt.Errorf("HomeDir required") }
+
+ // Backup address book if requested
+ addrBookPath := filepath.Join(opts.HomeDir, "config", "addrbook.json")
+ var addrBookData []byte
+ if opts.KeepAddrBook {
+ addrBookData, _ = os.ReadFile(addrBookPath)
+ }
+
+ // Remove entire data directory (ALL blockchain data including all databases)
+ _ = os.RemoveAll(filepath.Join(opts.HomeDir, "data"))
+
+ // Remove logs directory
+ _ = os.RemoveAll(filepath.Join(opts.HomeDir, "logs"))
+
+ // Recreate essential directories
+ _ = os.MkdirAll(filepath.Join(opts.HomeDir, "data"), 0o755)
+ _ = os.MkdirAll(filepath.Join(opts.HomeDir, "logs"), 0o755)
+
+ // Restore address book if it was backed up
+ if opts.KeepAddrBook && len(addrBookData) > 0 {
+ _ = os.WriteFile(addrBookPath, addrBookData, 0o644)
+ }
+
+ return nil
+}
+
+// FullReset removes ALL data including validator keys and keyring.
+// WARNING: This is destructive and creates a completely new validator identity.
+func FullReset(opts FullResetOptions) error {
+ if opts.HomeDir == "" { return fmt.Errorf("HomeDir required") }
+ if opts.BinPath == "" { opts.BinPath = "pchaind" }
+
+ // Remove entire data directory (includes all blockchain data)
+ _ = os.RemoveAll(filepath.Join(opts.HomeDir, "data"))
+
+ // Remove keyring (all keys)
+ _ = os.RemoveAll(filepath.Join(opts.HomeDir, "keyring-file"))
+ _ = os.RemoveAll(filepath.Join(opts.HomeDir, "keyring-test"))
+
+ // Remove validator keys
+ _ = os.Remove(filepath.Join(opts.HomeDir, "config", "priv_validator_key.json"))
+ _ = os.Remove(filepath.Join(opts.HomeDir, "config", "node_key.json"))
+
+ // Remove logs
+ _ = os.RemoveAll(filepath.Join(opts.HomeDir, "logs"))
+
+ // Clean address book
+ _ = os.Remove(filepath.Join(opts.HomeDir, "config", "addrbook.json"))
+
+ // Recreate essential directories
+ _ = os.MkdirAll(filepath.Join(opts.HomeDir, "data"), 0o755)
+ _ = os.MkdirAll(filepath.Join(opts.HomeDir, "logs"), 0o755)
+
+ return nil
+}
+
+// Backup creates a tar.gz with critical config files and priv_validator_state.json.
+// Returns the path to the backup file.
+func Backup(opts BackupOptions) (string, error) {
+ if opts.HomeDir == "" { return "", fmt.Errorf("HomeDir required") }
+ outDir := opts.OutDir
+ if outDir == "" { outDir = filepath.Join(opts.HomeDir, "backups") }
+ if err := os.MkdirAll(outDir, 0o755); err != nil { return "", err }
+ ts := time.Now().Format("20060102-150405")
+ outPath := filepath.Join(outDir, fmt.Sprintf("backup-%s.tar.gz", ts))
+ f, err := os.Create(outPath)
+ if err != nil { return "", err }
+ defer f.Close()
+ gz := gzip.NewWriter(f)
+ defer gz.Close()
+ tw := tar.NewWriter(gz)
+ defer tw.Close()
+
+ // Include important paths
+ include := []string{
+ filepath.Join(opts.HomeDir, "config", "config.toml"),
+ filepath.Join(opts.HomeDir, "config", "app.toml"),
+ filepath.Join(opts.HomeDir, "config", "genesis.json"),
+ filepath.Join(opts.HomeDir, "data", "priv_validator_state.json"),
+ }
+ for _, p := range include {
+ if err := addFile(tw, p, opts.HomeDir); err != nil {
+ // Skip missing files silently
+ _ = err
+ }
+ }
+ return outPath, nil
+}
+
+func addFile(tw *tar.Writer, path string, base string) error {
+ st, err := os.Stat(path)
+ if err != nil { return err }
+ if st.IsDir() { return nil }
+ rel := strings.TrimPrefix(path, base)
+ if strings.HasPrefix(rel, string(filepath.Separator)) { rel = rel[1:] }
+ hdr, err := tar.FileInfoHeader(st, "")
+ if err != nil { return err }
+ hdr.Name = rel
+ if err := tw.WriteHeader(hdr); err != nil { return err }
+ f, err := os.Open(path)
+ if err != nil { return err }
+ defer f.Close()
+ _, err = io.Copy(tw, f)
+ return err
+}
+
diff --git a/push-validator-manager/internal/bootstrap/bootstrap.go b/push-validator-manager/internal/bootstrap/bootstrap.go
new file mode 100644
index 00000000..086e2112
--- /dev/null
+++ b/push-validator-manager/internal/bootstrap/bootstrap.go
@@ -0,0 +1,353 @@
+package bootstrap
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/files"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/statesync"
+)
+
+type Options struct {
+ HomeDir string
+ ChainID string
+ Moniker string
+ Denom string // e.g., upc
+ GenesisDomain string // e.g., rpc-testnet-donut-node1.push.org
+ BinPath string // pchaind path
+ SnapshotRPCPrimary string // e.g., https://rpc-testnet-donut-node2.push.org
+ SnapshotRPCSecondary string // optional; if empty uses primary
+ Progress func(string) // optional callback for progress messages
+}
+
+type Service interface { Init(ctx context.Context, opts Options) error }
+
+// HTTPDoer matches http.Client's Do.
+type HTTPDoer interface { Do(*http.Request) (*http.Response, error) }
+
+// Runner runs commands; used for pchaind calls in init flow.
+type Runner interface { Run(ctx context.Context, name string, args ...string) error }
+
+type svc struct {
+ http HTTPDoer
+ run Runner
+ stp statesync.Provider
+}
+
+// New builds a default service with real http client and runner.
+func New() Service {
+ return &svc{http: &http.Client{Timeout: 5 * time.Second}, run: defaultRunner{}, stp: statesync.New()}
+}
+
+// NewWith allows injecting http client, runner, and statesync provider for testing.
+func NewWith(h HTTPDoer, r Runner, p statesync.Provider) Service {
+ if h == nil { h = &http.Client{Timeout: 5 * time.Second} }
+ if r == nil { r = defaultRunner{} }
+ if p == nil { p = statesync.New() }
+ return &svc{http: h, run: r, stp: p}
+}
+
+type defaultRunner struct{}
+func (defaultRunner) Run(ctx context.Context, name string, args ...string) error {
+ cmd := exec.CommandContext(ctx, name, args...)
+ cmd.Stdout = io.Discard
+ cmd.Stderr = io.Discard
+ return cmd.Run()
+}
+
+func (s *svc) Init(ctx context.Context, opts Options) error {
+ if opts.HomeDir == "" || opts.ChainID == "" {
+ return errors.New("HomeDir and ChainID required")
+ }
+ if opts.Moniker == "" { opts.Moniker = "push-validator" }
+ if opts.Denom == "" { opts.Denom = "upc" }
+ if opts.BinPath == "" { opts.BinPath = "pchaind" }
+ if opts.GenesisDomain == "" { return errors.New("GenesisDomain required") }
+
+ progress := opts.Progress
+ if progress == nil {
+ progress = func(string) {} // no-op if not provided
+ }
+
+ // Ensure base dirs
+ progress("Setting up node directories...")
+ if err := os.MkdirAll(filepath.Join(opts.HomeDir, "config"), 0o755); err != nil { return err }
+ if err := os.MkdirAll(filepath.Join(opts.HomeDir, "logs"), 0o755); err != nil { return err }
+
+ // Run `pchaind init` if config is missing
+ cfgPath := filepath.Join(opts.HomeDir, "config", "config.toml")
+ if _, err := os.Stat(cfgPath); os.IsNotExist(err) {
+ progress("Running pchaind init...")
+ if err := s.run.Run(ctx, opts.BinPath, "init", opts.Moniker, "--chain-id", opts.ChainID, "--default-denom", opts.Denom, "--home", opts.HomeDir, "--overwrite"); err != nil {
+ return fmt.Errorf("pchaind init: %w", err)
+ }
+ // In test environments where the runner is a noop, ensure the file exists
+ if _, err := os.Stat(cfgPath); os.IsNotExist(err) {
+ if mkerr := os.MkdirAll(filepath.Dir(cfgPath), 0o755); mkerr == nil {
+ _ = os.WriteFile(cfgPath, []byte(""), 0o644)
+ }
+ }
+ }
+
+ // Fetch genesis from remote
+ progress("Fetching genesis from network...")
+ base := baseURL(opts.GenesisDomain)
+ genesisURL := base + "/genesis"
+ gen, err := s.getGenesis(ctx, genesisURL)
+ if err != nil {
+ return fmt.Errorf("fetch genesis: %w", err)
+ }
+ genPath := filepath.Join(opts.HomeDir, "config", "genesis.json")
+ if err := os.WriteFile(genPath, gen, 0o644); err != nil { return err }
+
+ // Build peers from net_info; fallback to known RPC nodes
+ progress("Discovering network peers...")
+ peers := s.peersFromNetInfo(ctx, base+"/net_info")
+ if len(peers) == 0 {
+ peers = s.fallbackPeers(ctx, base)
+ }
+
+ // CRITICAL: Ensure snapshot RPC server is included as P2P peer for snapshot discovery
+ // State sync discovers snapshots via P2P (port 26656), not HTTP
+ snapPrimary := opts.SnapshotRPCPrimary
+ if snapPrimary == "" { snapPrimary = "https://rpc-testnet-donut-node2.push.org" }
+
+ // Fetch node ID for snapshot server and add to peers
+ snapPeers := s.getSnapshotPeers(ctx, []string{snapPrimary})
+ peers = append(peers, snapPeers...)
+
+ // Deduplicate peers (keep first occurrence)
+ seen := make(map[string]bool)
+ uniquePeers := make([]string, 0, len(peers))
+ for _, p := range peers {
+ if !seen[p] {
+ seen[p] = true
+ uniquePeers = append(uniquePeers, p)
+ }
+ }
+ peers = uniquePeers
+
+ // Configure peers via config store
+ cfgs := files.New(opts.HomeDir)
+ if len(peers) > 0 { if err := cfgs.SetPersistentPeers(peers); err != nil { return err } }
+
+ // Write priv_validator_state.json if missing
+ pvs := filepath.Join(opts.HomeDir, "data", "priv_validator_state.json")
+ if _, err := os.Stat(pvs); os.IsNotExist(err) {
+ if err := os.MkdirAll(filepath.Dir(pvs), 0o755); err != nil { return err }
+ if err := os.WriteFile(pvs, []byte("{\n \"height\": \"0\",\n \"round\": 0,\n \"step\": 0\n}\n"), 0o644); err != nil { return err }
+ }
+
+ // Configure state sync parameters using snapshot RPC (reuse variable from above)
+ progress("Configuring state sync parameters...")
+ tp, err := s.stp.ComputeTrust(ctx, snapPrimary)
+ if err != nil {
+ return fmt.Errorf("compute trust params: %w", err)
+ }
+ // Build and filter RPC servers to those that accept JSON-RPC POST (light client requirement)
+ // Add both primary and secondary (fallback to node1 if secondary not provided)
+ candidates := []string{hostToStateSyncURL(snapPrimary)}
+ snapSecondary := opts.SnapshotRPCSecondary
+ if snapSecondary == "" {
+ // Default to node1 as secondary witness if not specified
+ snapSecondary = "https://rpc-testnet-donut-node1.push.org"
+ }
+ if snapSecondary != snapPrimary {
+ candidates = append(candidates, hostToStateSyncURL(snapSecondary))
+ }
+
+ rpcServers := s.pickWorkingRPCs(ctx, candidates)
+ if len(rpcServers) == 0 {
+ return fmt.Errorf("no working RPC servers for state sync (JSON-RPC POST failed)")
+ }
+ // Ensure we provide two entries (primary + witness). Only duplicate as last resort.
+ if len(rpcServers) == 1 {
+ // Try adding the other node as fallback
+ fallback := "https://rpc-testnet-donut-node1.push.org:443"
+ if strings.Contains(snapPrimary, "node1") {
+ fallback = "https://rpc-testnet-donut-node2.push.org:443"
+ }
+ rpcServers = append(rpcServers, fallback)
+ }
+ progress("Backing up configuration...")
+ if _, err := cfgs.Backup(); err == nil { /* best-effort */ }
+ progress("Enabling state sync...")
+ if err := cfgs.EnableStateSync(files.StateSyncParams{
+ TrustHeight: tp.Height,
+ TrustHash: tp.Hash,
+ RPCServers: rpcServers,
+ TrustPeriod: "336h0m0s",
+ }); err != nil {
+ return err
+ }
+
+ // Clear data for state sync
+ progress("Preparing for initial sync...")
+ _ = s.run.Run(ctx, opts.BinPath, "tendermint", "unsafe-reset-all", "--home", opts.HomeDir, "--keep-addr-book")
+ // Mark initial state sync flag
+ _ = os.WriteFile(filepath.Join(opts.HomeDir, ".initial_state_sync"), []byte(time.Now().Format(time.RFC3339)), 0o644)
+
+ return nil
+}
+
+// ---- helpers ----
+
+func (s *svc) getGenesis(ctx context.Context, url string) ([]byte, error) {
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
+ resp, err := s.http.Do(req)
+ if err != nil { return nil, err }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 { return nil, fmt.Errorf("status %d", resp.StatusCode) }
+ var payload struct{ Result struct{ Genesis json.RawMessage `json:"genesis"` } `json:"result"` }
+ if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil { return nil, err }
+ if len(payload.Result.Genesis) == 0 { return nil, errors.New("empty genesis") }
+ return payload.Result.Genesis, nil
+}
+
+func (s *svc) peersFromNetInfo(ctx context.Context, url string) []string {
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
+ resp, err := s.http.Do(req)
+ if err != nil || resp.StatusCode != 200 { if resp != nil { resp.Body.Close() }; return nil }
+ defer resp.Body.Close()
+ var payload struct {
+ Result struct {
+ Peers []struct {
+ NodeInfo struct {
+ ID string `json:"id"`
+ ListenAddr string `json:"listen_addr"`
+ } `json:"node_info"`
+ RemoteIP string `json:"remote_ip"`
+ } `json:"peers"`
+ } `json:"result"`
+ }
+ if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil { return nil }
+ out := make([]string, 0, len(payload.Result.Peers))
+ for _, p := range payload.Result.Peers {
+ if strings.Contains(p.NodeInfo.ListenAddr, "0.0.0.0") { continue }
+ if p.NodeInfo.ID == "" || p.RemoteIP == "" { continue }
+ out = append(out, fmt.Sprintf("%s@%s:26656", p.NodeInfo.ID, p.RemoteIP))
+ if len(out) >= 4 { break }
+ }
+ return out
+}
+
+func (s *svc) fallbackPeers(ctx context.Context, base string) []string {
+ fetchID := func(statusURL string) string {
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, statusURL, nil)
+ resp, err := s.http.Do(req)
+ if err != nil || resp.StatusCode != 200 { if resp != nil { resp.Body.Close() }; return "" }
+ defer resp.Body.Close()
+ var payload struct{ Result struct{ NodeInfo struct{ ID string `json:"id"` } `json:"node_info"` } `json:"result"` }
+ if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil { return "" }
+ return payload.Result.NodeInfo.ID
+ }
+ var out []string
+ // When base is a domain-like string, use defaults; if it is a full URL, reuse host
+ if strings.HasPrefix(base, "http://") || strings.HasPrefix(base, "https://") {
+ u, err := url.Parse(base)
+ if err == nil {
+ host := u.Host
+ if id := fetchID(base + "/status"); id != "" { out = append(out, fmt.Sprintf("%s@%s:26656", id, host)) }
+ return out
+ }
+ }
+ if id := fetchID("https://rpc-testnet-donut-node1.push.org/status"); id != "" { out = append(out, fmt.Sprintf("%s@rpc-testnet-donut-node1.push.org:26656", id)) }
+ if id := fetchID("https://rpc-testnet-donut-node2.push.org/status"); id != "" { out = append(out, fmt.Sprintf("%s@rpc-testnet-donut-node2.push.org:26656", id)) }
+ return out
+}
+
+// getSnapshotPeers fetches P2P node IDs for snapshot RPC servers
+func (s *svc) getSnapshotPeers(ctx context.Context, rpcURLs []string) []string {
+ var out []string
+ for _, rpcURL := range rpcURLs {
+ if rpcURL == "" { continue }
+ // Extract host from URL
+ u, err := url.Parse(rpcURL)
+ if err != nil { continue }
+ host := u.Host
+ // Fetch node ID from /status
+ statusURL := strings.TrimRight(rpcURL, "/") + "/status"
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, statusURL, nil)
+ resp, err := s.http.Do(req)
+ if err != nil || resp.StatusCode != 200 { if resp != nil { resp.Body.Close() }; continue }
+ var payload struct{ Result struct{ NodeInfo struct{ ID string `json:"id"` } `json:"node_info"` } `json:"result"` }
+ if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil { resp.Body.Close(); continue }
+ resp.Body.Close()
+ if payload.Result.NodeInfo.ID != "" {
+ out = append(out, fmt.Sprintf("%s@%s:26656", payload.Result.NodeInfo.ID, host))
+ }
+ }
+ return out
+}
+
+func hostToStateSyncURL(rpc string) string {
+ // Convert base https://host[:port] to https://host:443 for state sync
+ rpc = strings.TrimRight(rpc, "/")
+ if strings.HasPrefix(rpc, "http://") {
+ h := strings.TrimPrefix(rpc, "http://")
+ if strings.Contains(h, ":") { return "http://" + h }
+ return "http://" + h + ":80"
+ }
+ if strings.HasPrefix(rpc, "https://") {
+ h := strings.TrimPrefix(rpc, "https://")
+ if strings.Contains(h, ":") { return "https://" + h }
+ return "https://" + h + ":443"
+ }
+ // default to https
+ if strings.Contains(rpc, ":") { return "https://" + rpc }
+ return "https://" + rpc + ":443"
+}
+
+func baseURL(genesisDomain string) string {
+ d := strings.TrimSpace(genesisDomain)
+ if strings.HasPrefix(d, "http://") || strings.HasPrefix(d, "https://") { return d }
+ if d == "" { return "https://rpc-testnet-donut-node1.push.org" }
+ return "https://" + d
+}
+
+// pickWorkingRPCs returns a subset of URLs that respond to a JSON-RPC POST (method=status) within timeout.
+func (s *svc) pickWorkingRPCs(ctx context.Context, urls []string) []string {
+ type req struct {
+ JSONRPC string `json:"jsonrpc"`
+ Method string `json:"method"`
+ ID int `json:"id"`
+ }
+ httpc := &http.Client{Timeout: 6 * time.Second}
+ var out []string
+ for _, u := range urls {
+ // Support bare hosts (e.g., from local tests) by defaulting to http://
+ if !(strings.HasPrefix(u, "http://") || strings.HasPrefix(u, "https://")) {
+ u = "http://" + strings.TrimRight(u, "/")
+ }
+ // Make a shallow copy of ctx with short timeout per probe
+ // attempt twice with short backoff
+ var ok bool
+ for attempt := 0; attempt < 2 && !ok; attempt++ {
+ cctx, cancel := context.WithTimeout(ctx, 6*time.Second)
+ body, _ := json.Marshal(req{JSONRPC: "2.0", Method: "status", ID: 1})
+ rq, _ := http.NewRequestWithContext(cctx, http.MethodPost, u, strings.NewReader(string(body)))
+ rq.Header.Set("Content-Type", "application/json")
+ resp, err := httpc.Do(rq)
+ cancel()
+ if err == nil && resp != nil && resp.StatusCode == 200 {
+ _ = resp.Body.Close()
+ ok = true
+ break
+ }
+ if resp != nil { _ = resp.Body.Close() }
+ time.Sleep(300 * time.Millisecond)
+ }
+ if ok { out = append(out, u) }
+ }
+ return out
+}
diff --git a/push-validator-manager/internal/bootstrap/bootstrap_test.go b/push-validator-manager/internal/bootstrap/bootstrap_test.go
new file mode 100644
index 00000000..c9f79d06
--- /dev/null
+++ b/push-validator-manager/internal/bootstrap/bootstrap_test.go
@@ -0,0 +1,91 @@
+package bootstrap
+
+import (
+ "context"
+ "encoding/json"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/statesync"
+)
+
+// fakeRunner just records invocations without executing anything.
+type fakeRunner struct{ calls [][]string }
+func (f *fakeRunner) Run(ctx context.Context, name string, args ...string) error {
+ call := append([]string{name}, args...)
+ f.calls = append(f.calls, call)
+ return nil
+}
+
+// fakeProvider returns static trust params.
+type fakeProvider struct{}
+func (fakeProvider) ComputeTrust(ctx context.Context, rpcURL string) (statesync.TrustParams, error) {
+ return statesync.TrustParams{Height: 4000, Hash: "ABC123"}, nil
+}
+
+func TestBootstrap_Init_FullFlow(t *testing.T) {
+ // Skip if sandbox disallows binding
+ if ln, err := net.Listen("tcp", "127.0.0.1:0"); err != nil { t.Skip("binding disabled in sandbox") } else { ln.Close() }
+
+ mux := http.NewServeMux()
+ // JSON-RPC POST status handler at root for light client probes
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == http.MethodPost {
+ _, _ = w.Write([]byte(`{"jsonrpc":"2.0","result":{"node_info":{"id":"test"},"sync_info":{"latest_block_height":"5000","catching_up":true}}}`))
+ return
+ }
+ http.NotFound(w, r)
+ })
+ mux.HandleFunc("/genesis", func(w http.ResponseWriter, r *http.Request) {
+ resp := map[string]any{"result": map[string]any{"genesis": map[string]any{"chain_id": "push_42101-1"}}}
+ _ = json.NewEncoder(w).Encode(resp)
+ })
+ mux.HandleFunc("/net_info", func(w http.ResponseWriter, r *http.Request) {
+ resp := map[string]any{"result": map[string]any{"peers": []map[string]any{
+ {"node_info": map[string]any{"id": "id1", "listen_addr": "tcp://0.0.0.0:26656"}, "remote_ip": "10.0.0.1"},
+ {"node_info": map[string]any{"id": "id2", "listen_addr": "tcp://1.2.3.4:26656"}, "remote_ip": "1.2.3.4"},
+ }}}
+ _ = json.NewEncoder(w).Encode(resp)
+ })
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+
+ home := t.TempDir()
+ r := &fakeRunner{}
+ svc := NewWith(srv.Client(), r, fakeProvider{})
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err := svc.Init(ctx, Options{
+ HomeDir: home,
+ ChainID: "push_42101-1",
+ Moniker: "testnode",
+ GenesisDomain: srv.URL, // full URL supported
+ BinPath: "pchaind",
+ SnapshotRPCPrimary: srv.URL, // uses fake provider anyway
+ })
+ if err != nil { t.Fatalf("init error: %v", err) }
+
+ // Verify files written
+ if _, err := os.Stat(filepath.Join(home, "config", "genesis.json")); err != nil { t.Fatalf("missing genesis.json: %v", err) }
+ b, err := os.ReadFile(filepath.Join(home, "config", "config.toml"))
+ if err != nil { t.Fatalf("missing config.toml: %v", err) }
+ s := string(b)
+ if !containsAll(s, []string{"[p2p]", "persistent_peers", "addr_book_strict = false"}) { t.Fatalf("p2p peers not configured: %s", s) }
+ if !containsAll(s, []string{"[statesync]", "enable = true", "trust_height = 4000", "trust_hash = \"ABC123\""}) {
+ t.Fatalf("statesync not configured: %s", s)
+ }
+ // Verify runner was invoked for init and unsafe-reset-all
+ if len(r.calls) == 0 { t.Fatalf("runner not called") }
+}
+
+func containsAll(s string, subs []string) bool {
+ for _, sub := range subs { if !contains(s, sub) { return false } }
+ return true
+}
+func contains(s, sub string) bool { return strings.Contains(s, sub) }
diff --git a/push-validator-manager/internal/config/config.go b/push-validator-manager/internal/config/config.go
new file mode 100644
index 00000000..31d33d49
--- /dev/null
+++ b/push-validator-manager/internal/config/config.go
@@ -0,0 +1,43 @@
+package config
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// Config holds user/system configuration for the manager.
+// File-backed configuration and env/flag merging will be added.
+type Config struct {
+ ChainID string
+ HomeDir string
+ GenesisDomain string
+ KeyringBackend string
+ SnapshotRPC string
+ RPCLocal string // e.g., http://127.0.0.1:26657
+ Denom string // staking denom (e.g., upc)
+}
+
+// Defaults sets chain-specific defaults aligned with current scripts.
+func Defaults() Config {
+ home, _ := os.UserHomeDir()
+ return Config{
+ ChainID: "push_42101-1",
+ HomeDir: filepath.Join(home, ".pchain"),
+ GenesisDomain: "rpc-testnet-donut-node1.push.org",
+ KeyringBackend: "test",
+ SnapshotRPC: "https://rpc-testnet-donut-node2.push.org",
+ RPCLocal: "http://127.0.0.1:26657",
+ Denom: "upc",
+ }
+}
+
+// Load returns default config with HOME_DIR override from environment.
+// Use flags for other configuration options.
+func Load() Config {
+ cfg := Defaults()
+ // Only support HOME_DIR env var (common pattern for XDG_* style overrides)
+ if v := os.Getenv("HOME_DIR"); v != "" {
+ cfg.HomeDir = v
+ }
+ return cfg
+}
diff --git a/push-validator-manager/internal/dashboard/chain_status.go b/push-validator-manager/internal/dashboard/chain_status.go
new file mode 100644
index 00000000..71cd12b9
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/chain_status.go
@@ -0,0 +1,211 @@
+package dashboard
+
+import (
+ "fmt"
+
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/charmbracelet/lipgloss"
+)
+
+// ChainStatus component shows chain sync status
+type ChainStatus struct {
+ BaseComponent
+ data DashboardData
+ icons Icons
+ etaCalc *ETACalculator
+ noEmoji bool
+}
+
+// NewChainStatus creates a new chain status component
+func NewChainStatus(noEmoji bool) *ChainStatus {
+ return &ChainStatus{
+ BaseComponent: BaseComponent{},
+ icons: NewIcons(noEmoji),
+ etaCalc: NewETACalculator(),
+ noEmoji: noEmoji,
+ }
+}
+
+// ID returns component identifier
+func (c *ChainStatus) ID() string {
+ return "chain_status"
+}
+
+// Title returns component title
+func (c *ChainStatus) Title() string {
+ return "Chain Status"
+}
+
+// MinWidth returns minimum width
+func (c *ChainStatus) MinWidth() int {
+ return 30
+}
+
+// MinHeight returns minimum height
+func (c *ChainStatus) MinHeight() int {
+ return 10
+}
+
+// Update receives dashboard data
+func (c *ChainStatus) Update(msg tea.Msg, data DashboardData) (Component, tea.Cmd) {
+ c.data = data
+
+ // Update ETA calculator
+ if data.Metrics.Chain.RemoteHeight > data.Metrics.Chain.LocalHeight {
+ blocksBehind := data.Metrics.Chain.RemoteHeight - data.Metrics.Chain.LocalHeight
+ c.etaCalc.AddSample(blocksBehind)
+ }
+
+ return c, nil
+}
+
+// View renders the component with caching
+func (c *ChainStatus) View(w, h int) string {
+ // Render with styling
+ style := lipgloss.NewStyle().
+ Border(lipgloss.RoundedBorder()).
+ BorderForeground(lipgloss.Color("63")).
+ Padding(0, 1)
+
+ content := c.renderContent(w)
+
+ // Check cache
+ if c.CheckCacheWithSize(content, w, h) {
+ return c.GetCached()
+ }
+
+ if w < 0 {
+ w = 0
+ }
+ if h < 0 {
+ h = 0
+ }
+
+ // Account for border width (2 chars: left + right) to prevent overflow
+ borderWidth := 2
+ contentWidth := w - borderWidth
+ if contentWidth < 0 {
+ contentWidth = 0
+ }
+
+ rendered := style.Width(contentWidth).Render(content)
+ c.UpdateCache(rendered)
+ return rendered
+}
+
+// renderContent builds plain text content
+func (c *ChainStatus) renderContent(w int) string {
+ var lines []string
+
+ // Interior width after accounting for rounded border (2 chars) and padding (2 chars).
+ inner := w - 4
+ if inner < 0 {
+ inner = 0
+ }
+
+ localHeight := c.data.Metrics.Chain.LocalHeight
+ remoteHeight := c.data.Metrics.Chain.RemoteHeight
+
+ // Check if node is running and RPC is available
+ if !c.data.NodeInfo.Running || !c.data.Metrics.Node.RPCListening {
+ lines = append(lines, fmt.Sprintf("%s Unknown", c.icons.Err))
+ if remoteHeight > 0 {
+ lines = append(lines, fmt.Sprintf("%s/%s", formatWithCommas(localHeight), formatWithCommas(remoteHeight)))
+ } else {
+ lines = append(lines, fmt.Sprintf("Height: %s", formatWithCommas(localHeight)))
+ }
+ } else {
+ // Always show sync-monitor-style progress bar
+ isCatchingUp := c.data.Metrics.Chain.CatchingUp
+ syncLine := renderSyncProgress(localHeight, remoteHeight, c.noEmoji, isCatchingUp)
+
+ // Add ETA: calculated when syncing, "0s" when in sync
+ if isCatchingUp && remoteHeight > localHeight {
+ eta := c.etaCalc.Calculate()
+ if eta != "" && eta != "calculating..." {
+ syncLine += " | ETA: " + eta
+ }
+ } else if remoteHeight > 0 {
+ syncLine += " | ETA: 0s"
+ }
+
+ lines = append(lines, syncLine)
+ }
+
+ // Use inner width for title centering
+ return fmt.Sprintf("%s\n%s", FormatTitle(c.Title(), inner), joinLines(lines, "\n"))
+}
+
+// renderSyncProgress creates sync-monitor-style progress line
+func renderSyncProgress(local, remote int64, noEmoji bool, isCatchingUp bool) string {
+ if remote <= 0 {
+ return ""
+ }
+
+ percent := float64(local) / float64(remote) * 100
+ if percent < 0 {
+ percent = 0
+ }
+ if percent > 100 {
+ percent = 100
+ }
+
+ width := 28
+ filled := int(percent / 100 * float64(width))
+ if filled < 0 {
+ filled = 0
+ }
+ if filled > width {
+ filled = width
+ }
+
+ bar := fmt.Sprintf("%s%s",
+ lipgloss.NewStyle().Foreground(lipgloss.Color("10")).Render(fmt.Sprintf("%s", repeatStr("โ", filled))),
+ lipgloss.NewStyle().Foreground(lipgloss.Color("240")).Render(fmt.Sprintf("%s", repeatStr("โ", width-filled))))
+
+ // Use different label based on sync state
+ icon := "๐ Syncing"
+ if !isCatchingUp {
+ icon = "๐ In Sync"
+ }
+ if noEmoji {
+ if isCatchingUp {
+ icon = "Syncing"
+ } else {
+ icon = "In Sync"
+ }
+ }
+
+ return fmt.Sprintf("%s [%s] %.2f%% | %s/%s blocks",
+ icon, bar, percent,
+ formatWithCommas(local),
+ formatWithCommas(remote))
+}
+
+// formatWithCommas adds comma separators to large numbers
+func formatWithCommas(n int64) string {
+ if n < 1000 {
+ return fmt.Sprintf("%d", n)
+ }
+
+ // Convert to string and add commas
+ s := fmt.Sprintf("%d", n)
+ var result string
+ for i, c := range s {
+ if i > 0 && (len(s)-i)%3 == 0 {
+ result += ","
+ }
+ result += string(c)
+ }
+ return result
+}
+
+// repeatStr repeats a string n times
+func repeatStr(s string, n int) string {
+ var result string
+ for i := 0; i < n; i++ {
+ result += s
+ }
+ return result
+}
+
diff --git a/push-validator-manager/internal/dashboard/component.go b/push-validator-manager/internal/dashboard/component.go
new file mode 100644
index 00000000..0164b6c5
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/component.go
@@ -0,0 +1,150 @@
+package dashboard
+
+import (
+ "fmt"
+
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/cespare/xxhash/v2"
+)
+
+// Component interface - all dashboard panels implement this
+type Component interface {
+ // Bubbletea lifecycle
+ Init() tea.Cmd
+ Update(msg tea.Msg, data DashboardData) (Component, tea.Cmd)
+ View(width, height int) string
+
+ // Metadata
+ ID() string
+ Title() string
+ MinWidth() int // Minimum width required
+ MinHeight() int // Minimum height required
+}
+
+// BaseComponent provides common functionality for all components
+// Includes hash-based caching to prevent unnecessary re-renders
+type BaseComponent struct {
+ id string
+ title string
+ minW int
+ minH int
+
+ // Performance optimization - cache rendered output
+ lastHash uint64
+ cached string
+}
+
+// ID returns component identifier
+func (c *BaseComponent) ID() string {
+ return c.id
+}
+
+// Title returns component title
+func (c *BaseComponent) Title() string {
+ return c.title
+}
+
+// MinWidth returns minimum width required
+func (c *BaseComponent) MinWidth() int {
+ return c.minW
+}
+
+// MinHeight returns minimum height required
+func (c *BaseComponent) MinHeight() int {
+ return c.minH
+}
+
+// Init performs initialization (default: no-op)
+func (c *BaseComponent) Init() tea.Cmd {
+ return nil
+}
+
+// CheckCache checks if content changed using xxhash
+// Returns true if cache hit (content unchanged)
+func (c *BaseComponent) CheckCache(content string) bool {
+ h64 := xxhash.Sum64String(content)
+ if h64 == c.lastHash && c.cached != "" {
+ return true // Cache hit
+ }
+ c.lastHash = h64
+ return false
+}
+
+// cacheKey generates a hash key including content and dimensions
+func (c *BaseComponent) cacheKey(content string, w, h int) uint64 {
+ // Include dimensions in hash to invalidate cache on resize
+ return xxhash.Sum64String(fmt.Sprintf("%dx%d|%s", w, h, content))
+}
+
+// CheckCacheWithSize checks if content or size changed using xxhash
+// Returns true if cache hit (content and dimensions unchanged)
+func (c *BaseComponent) CheckCacheWithSize(content string, w, h int) bool {
+ h64 := c.cacheKey(content, w, h)
+ if h64 == c.lastHash && c.cached != "" {
+ return true // Cache hit
+ }
+ c.lastHash = h64
+ return false
+}
+
+// UpdateCache stores rendered output in cache
+func (c *BaseComponent) UpdateCache(rendered string) {
+ c.cached = rendered
+}
+
+// GetCached returns cached output
+func (c *BaseComponent) GetCached() string {
+ return c.cached
+}
+
+// ComponentRegistry manages collection of dashboard components
+// Maintains deterministic registration order for consistent rendering
+type ComponentRegistry struct {
+ order []string // Ordered list of component IDs
+ components map[string]Component // ID -> Component lookup
+}
+
+// NewComponentRegistry creates a new registry
+func NewComponentRegistry() *ComponentRegistry {
+ return &ComponentRegistry{
+ order: make([]string, 0),
+ components: make(map[string]Component),
+ }
+}
+
+// Register adds a component to the registry
+func (r *ComponentRegistry) Register(comp Component) {
+ id := comp.ID()
+ if _, exists := r.components[id]; !exists {
+ r.order = append(r.order, id)
+ }
+ r.components[id] = comp
+}
+
+// Get retrieves a component by ID
+func (r *ComponentRegistry) Get(id string) Component {
+ return r.components[id]
+}
+
+// All returns all registered components in registration order
+func (r *ComponentRegistry) All() []Component {
+ comps := make([]Component, 0, len(r.order))
+ for _, id := range r.order {
+ comps = append(comps, r.components[id])
+ }
+ return comps
+}
+
+// UpdateAll updates all components with new data in registration order
+func (r *ComponentRegistry) UpdateAll(msg tea.Msg, data DashboardData) []tea.Cmd {
+ cmds := make([]tea.Cmd, 0, len(r.order))
+ for _, id := range r.order {
+ comp := r.components[id]
+ updated, cmd := comp.Update(msg, data)
+ r.components[id] = updated
+ if cmd != nil {
+ cmds = append(cmds, cmd)
+ }
+ }
+ return cmds
+}
diff --git a/push-validator-manager/internal/dashboard/dashboard.go b/push-validator-manager/internal/dashboard/dashboard.go
new file mode 100644
index 00000000..23affc99
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/dashboard.go
@@ -0,0 +1,769 @@
+package dashboard
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/charmbracelet/bubbles/help"
+ "github.com/charmbracelet/bubbles/key"
+ "github.com/charmbracelet/bubbles/spinner"
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/charmbracelet/lipgloss"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/metrics"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/node"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/process"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+)
+
+
+// keyMap defines keyboard shortcuts
+type keyMap struct {
+ Quit key.Binding
+ Refresh key.Binding
+ Help key.Binding
+ Up key.Binding
+ Down key.Binding
+ Left key.Binding
+ Right key.Binding
+ Search key.Binding
+ Follow key.Binding
+ Home key.Binding
+ End key.Binding
+}
+
+// ShortHelp implements help.KeyMap for inline help
+func (k keyMap) ShortHelp() []key.Binding {
+ return []key.Binding{k.Quit, k.Refresh, k.Help}
+}
+
+// FullHelp implements help.KeyMap for full help overlay
+func (k keyMap) FullHelp() [][]key.Binding {
+ return [][]key.Binding{
+ {k.Quit, k.Refresh, k.Help},
+ {k.Up, k.Down, k.Left, k.Right},
+ {k.Search, k.Follow, k.Home, k.End},
+ }
+}
+
+// newKeyMap creates default key bindings
+func newKeyMap() keyMap {
+ return keyMap{
+ Quit: key.NewBinding(
+ key.WithKeys("q", "ctrl+c"),
+ key.WithHelp("q", "quit"),
+ ),
+ Refresh: key.NewBinding(
+ key.WithKeys("r"),
+ key.WithHelp("r", "refresh now"),
+ ),
+ Help: key.NewBinding(
+ key.WithKeys("h"),
+ key.WithHelp("h", "toggle help"),
+ ),
+ Up: key.NewBinding(
+ key.WithKeys("up"),
+ key.WithHelp("โ", "scroll up logs"),
+ ),
+ Down: key.NewBinding(
+ key.WithKeys("down"),
+ key.WithHelp("โ", "scroll down logs"),
+ ),
+ Left: key.NewBinding(
+ key.WithKeys("left", "p"),
+ key.WithHelp("โ/p", "prev page validators"),
+ ),
+ Right: key.NewBinding(
+ key.WithKeys("right", "n"),
+ key.WithHelp("โ/n", "next page validators"),
+ ),
+ Search: key.NewBinding(
+ key.WithKeys("/"),
+ key.WithHelp("/", "search logs"),
+ ),
+ Follow: key.NewBinding(
+ key.WithKeys("f"),
+ key.WithHelp("f", "toggle follow mode"),
+ ),
+ Home: key.NewBinding(
+ key.WithKeys("t"),
+ key.WithHelp("t", "jump to oldest"),
+ ),
+ End: key.NewBinding(
+ key.WithKeys("l"),
+ key.WithHelp("l", "jump to latest"),
+ ),
+ }
+}
+
+// tickCmd returns a command that sends a tick message after interval
+func tickCmd(interval time.Duration) tea.Cmd {
+ return tea.Tick(interval, func(t time.Time) tea.Msg {
+ return tickMsg(t)
+ })
+}
+
+// Dashboard is the main Bubble Tea Model
+type Dashboard struct {
+ opts Options
+ data DashboardData
+ lastOK time.Time
+ err error
+ stale bool
+ registry *ComponentRegistry
+ layout *Layout
+ keys keyMap
+ help help.Model
+ spinner spinner.Model
+ width int
+ height int
+ showHelp bool
+ loading bool
+
+ // Context for cancelling in-flight fetches
+ fetchCancel context.CancelFunc
+
+ // Persistent metrics collector for CPU monitoring
+ collector *metrics.Collector
+
+ // Caching for expensive operations
+ cachedVersion string
+ cachedVersionAt time.Time
+ cachedVersionPID int
+}
+
+// New creates a new Dashboard instance
+func New(opts Options) *Dashboard {
+ // Apply sensible defaults to prevent zero-value bugs
+ if opts.RefreshInterval <= 0 {
+ opts.RefreshInterval = 2 * time.Second
+ }
+ if opts.RPCTimeout <= 0 {
+ rt := 15 * time.Second
+ if 2*opts.RefreshInterval < rt {
+ rt = 2 * opts.RefreshInterval
+ }
+ opts.RPCTimeout = rt
+ }
+
+ // Initialize component registry
+ registry := NewComponentRegistry()
+ registry.Register(NewHeader())
+ registry.Register(NewNodeStatus(opts.NoEmoji))
+ registry.Register(NewChainStatus(opts.NoEmoji))
+ registry.Register(NewNetworkStatus(opts.NoEmoji))
+ registry.Register(NewValidatorsList(opts.NoEmoji, opts.Config))
+ registry.Register(NewValidatorInfo(opts.NoEmoji))
+ registry.Register(NewLogViewer(opts.NoEmoji, opts.Config.HomeDir))
+
+ // Configure layout
+ layoutConfig := LayoutConfig{
+ Rows: []LayoutRow{
+ {Components: []string{"header"}, Weights: []int{100}, MinHeight: 4},
+ {Components: []string{"node_status", "chain_status"}, Weights: []int{50, 50}, MinHeight: 10},
+ {Components: []string{"network_status", "validator_info"}, Weights: []int{50, 50}, MinHeight: 10},
+ {Components: []string{"validators_list"}, Weights: []int{100}, MinHeight: 16},
+ {Components: []string{"log_viewer"}, Weights: []int{100}, MinHeight: 12},
+ },
+ }
+ layout := NewLayout(layoutConfig, registry)
+
+ // Initialize spinner
+ s := spinner.New()
+ s.Spinner = spinner.Dot
+ s.Style = lipgloss.NewStyle().Foreground(lipgloss.Color("205"))
+
+ return &Dashboard{
+ opts: opts,
+ registry: registry,
+ layout: layout,
+ keys: newKeyMap(),
+ help: help.New(),
+ spinner: s,
+ loading: true,
+ showHelp: false,
+ collector: metrics.New(), // Initialize persistent collector for continuous CPU monitoring
+ }
+}
+
+// Init initializes the dashboard (Bubble Tea lifecycle)
+func (m *Dashboard) Init() tea.Cmd {
+ return tea.Batch(
+ m.spinner.Tick,
+ m.fetchCmd(),
+ tickCmd(m.opts.RefreshInterval),
+ )
+}
+
+// Update handles messages (Bubble Tea lifecycle)
+func (m *Dashboard) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
+ switch msg := msg.(type) {
+ case tea.KeyMsg:
+ return m.handleKey(msg)
+
+ case tea.WindowSizeMsg:
+ m.width, m.height = msg.Width, msg.Height
+ return m, nil
+
+ case fetchStartedMsg:
+ // SAFE: assign cancel func on UI thread (not in Cmd goroutine)
+ if m.fetchCancel != nil {
+ m.fetchCancel() // Cancel any previous fetch
+ }
+ m.fetchCancel = msg.cancel
+ return m, nil
+
+ case tickMsg:
+ // CRITICAL: Only tickMsg schedules next tick (prevents double ticker)
+ // IMPORTANT: Only fetch if no fetch is currently in progress
+ // Otherwise the new fetch will cancel the previous one
+ cmds := []tea.Cmd{tickCmd(m.opts.RefreshInterval)}
+ if m.fetchCancel == nil {
+ // No fetch in progress, safe to start a new one
+ cmds = append(cmds, m.fetchCmd())
+ }
+ return m, tea.Batch(cmds...)
+
+ case dataMsg:
+ // Successful fetch - update data and clear error
+ m.data = DashboardData(msg)
+ m.lastOK = time.Now()
+ m.err = nil
+ m.stale = false
+ m.loading = false
+ m.fetchCancel = nil // Clear cancel to allow next fetch
+ // Update components
+ cmds := m.registry.UpdateAll(msg, m.data)
+ return m, tea.Batch(cmds...)
+
+ case dataErrMsg:
+ // Failed fetch - keep old data, show error, mark stale
+ m.err = msg.err
+ m.data.Err = msg.err // Set error in data so Header can display it
+ m.stale = time.Since(m.lastOK) > 10*time.Second
+ m.loading = false
+ m.fetchCancel = nil // Clear cancel to allow next fetch
+ // Update components to propagate error to Header
+ cmds := m.registry.UpdateAll(msg, m.data)
+ return m, tea.Batch(cmds...)
+
+ case forceRefreshMsg:
+ // User pressed 'r' - start new fetch immediately
+ return m, m.fetchCmd()
+
+ case toggleHelpMsg:
+ m.showHelp = !m.showHelp
+ return m, nil
+
+ case spinner.TickMsg:
+ var cmd tea.Cmd
+ m.spinner, cmd = m.spinner.Update(msg)
+ return m, cmd
+ }
+
+ return m, nil
+}
+
+// View renders the dashboard (Bubble Tea lifecycle)
+func (m *Dashboard) View() string {
+ // Add recovery for View method panics
+ defer func() {
+ if r := recover(); r != nil {
+ if m.opts.Debug {
+ fmt.Fprintf(os.Stderr, "Debug: View() panic recovered: %v\n", r)
+ }
+ }
+ }()
+
+ // Guard against zero-size render before first WindowSizeMsg
+ if m.width <= 0 || m.height <= 1 {
+ return ""
+ }
+
+ // Safety check for nil pointers
+ if m.registry == nil || m.layout == nil {
+ if m.opts.Debug {
+ fmt.Fprintf(os.Stderr, "Debug: Registry or layout is nil\n")
+ }
+ return "Initializing dashboard..."
+ }
+
+ if m.loading {
+ // Create bold, styled loading message with larger text
+ spinnerStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("205")).
+ Bold(true)
+
+ messageStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("39")).
+ Bold(true)
+
+ loadingBox := lipgloss.NewStyle().
+ Border(lipgloss.RoundedBorder()).
+ BorderForeground(lipgloss.Color("205")).
+ Padding(2, 4).
+ MarginTop(1).
+ Align(lipgloss.Center)
+
+ // Build the loading message with multiple lines
+ spinner := spinnerStyle.Render(m.spinner.View())
+ message := messageStyle.Render("CONNECTING TO RPC")
+ subtext := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("241")).
+ Render("Initializing dashboard...")
+
+ content := lipgloss.JoinVertical(lipgloss.Center,
+ spinner,
+ message,
+ "",
+ subtext,
+ )
+
+ styledBox := loadingBox.Render(content)
+
+ return lipgloss.Place(
+ m.width, m.height,
+ lipgloss.Center, lipgloss.Center,
+ styledBox,
+ )
+ }
+
+ if m.showHelp {
+ // Overlay command help with enhanced styling
+ helpView := getCommandHelpText()
+ return lipgloss.Place(
+ m.width, m.height,
+ lipgloss.Center, lipgloss.Center,
+ lipgloss.NewStyle().
+ Border(lipgloss.RoundedBorder()).
+ BorderForeground(lipgloss.Color("63")).
+ Padding(1, 2).
+ Render(helpView),
+ )
+ }
+
+ // DON'T reserve space for spacer - use full height
+ result := m.layout.Compute(m.width, m.height)
+
+ // Build rowMap with ALL cells (including header)
+ rowMap := make(map[int][]Cell)
+ for _, cell := range result.Cells {
+ rowMap[cell.Y] = append(rowMap[cell.Y], cell)
+ }
+
+ // Sort Y coordinates
+ ys := make([]int, 0, len(rowMap))
+ for y := range rowMap {
+ ys = append(ys, y)
+ }
+ sort.Ints(ys)
+
+ // Render all rows in order
+ var rows []string
+ for _, y := range ys {
+ cells := rowMap[y]
+ sort.Slice(cells, func(i, j int) bool { return cells[i].X < cells[j].X })
+
+ var rowCells []string
+ for _, cell := range cells {
+ if comp := m.registry.Get(cell.ID); comp != nil {
+ s := comp.View(cell.W, cell.H)
+ rowCells = append(rowCells, s)
+ }
+ }
+
+ if len(rowCells) > 0 {
+ joined := lipgloss.JoinHorizontal(lipgloss.Top, rowCells...)
+ rows = append(rows, joined)
+ }
+ }
+
+ // Join all rows WITHOUT any spacer
+ output := lipgloss.JoinVertical(lipgloss.Left, rows...)
+
+ // Show layout warning if present
+ if result.Warning != "" {
+ output += fmt.Sprintf("\nโ %s\n", result.Warning)
+ }
+
+ // Add footer with highlighted controls and commands
+ keyStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("39")).
+ Bold(true)
+ textStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("241"))
+ cmdStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("10"))
+
+ // Line 1: Dashboard controls
+ controlsLine := textStyle.Render("Controls: ") +
+ keyStyle.Render("h") +
+ textStyle.Render(" for help | ") +
+ keyStyle.Render("Ctrl+C") +
+ textStyle.Render(" to exit")
+
+ // Line 2: Quick CLI commands
+ commandsLine := textStyle.Render("Quick Commands: ") +
+ cmdStyle.Render("push-validator status") +
+ textStyle.Render(" | ") +
+ cmdStyle.Render("push-validator start") +
+ textStyle.Render(" | ") +
+ cmdStyle.Render("push-validator stop") +
+ textStyle.Render(" | ") +
+ cmdStyle.Render("push-validator dashboard") +
+ textStyle.Render(" | ") +
+ cmdStyle.Render("push-validator help")
+
+ footer := lipgloss.JoinVertical(lipgloss.Left, controlsLine, commandsLine)
+ output = lipgloss.JoinVertical(lipgloss.Left, output, footer)
+
+ return output
+}
+
+// getCommandHelpText returns formatted help text showing all available commands with styling
+func getCommandHelpText() string {
+ // Define color styles
+ titleStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("39")).
+ Bold(true)
+
+ sectionStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("39")).
+ Bold(true)
+
+ commandStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("10")).
+ Bold(true)
+
+ descStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("250"))
+
+ separatorStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("240"))
+
+ footerStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("226")).
+ Bold(true)
+
+ var help strings.Builder
+
+ // Title - properly center aligned with full-width separator
+ titleText := "Push Validator Manager"
+ contentWidth := 90
+ titleWidth := len(titleText)
+ titlePadding := strings.Repeat(" ", (contentWidth-titleWidth)/2)
+ help.WriteString(titlePadding + titleStyle.Render(titleText) + "\n")
+ help.WriteString(separatorStyle.Render(strings.Repeat("โ", contentWidth)) + "\n\n")
+
+ // USAGE
+ help.WriteString(sectionStyle.Render("USAGE") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator") + " " + descStyle.Render(" [flags]") + "\n\n")
+
+ // Quick Start
+ help.WriteString(sectionStyle.Render("Quick Start") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator start") + strings.Repeat(" ", 14) + descStyle.Render("Start the node process") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator status") + strings.Repeat(" ", 13) + descStyle.Render("Show node/rpc/sync status") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator dashboard") + strings.Repeat(" ", 10) + descStyle.Render("Live dashboard with metrics") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator sync") + strings.Repeat(" ", 15) + descStyle.Render("Monitor sync progress live") + "\n\n")
+
+ // Operations
+ help.WriteString(sectionStyle.Render("Operations") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator stop") + strings.Repeat(" ", 15) + descStyle.Render("Stop the node process") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator restart") + strings.Repeat(" ", 12) + descStyle.Render("Restart the node process") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator logs") + strings.Repeat(" ", 15) + descStyle.Render("Tail node logs") + "\n\n")
+
+ // Validator
+ help.WriteString(sectionStyle.Render("Validator") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator validators") + strings.Repeat(" ", 9) + descStyle.Render("List validators (--output json)") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator balance [addr]") + strings.Repeat(" ", 5) + descStyle.Render("Check account balance") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator register-validator") + " " + descStyle.Render("Register this node as validator") + "\n\n")
+
+ // Maintenance
+ help.WriteString(sectionStyle.Render("Maintenance") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator backup") + strings.Repeat(" ", 13) + descStyle.Render("Create config/state backup") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator reset") + strings.Repeat(" ", 14) + descStyle.Render("Reset chain data (keeps addr book)") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator full-reset") + strings.Repeat(" ", 9) + descStyle.Render("Complete reset (deletes ALL)") + "\n\n")
+
+ // Utilities
+ help.WriteString(sectionStyle.Render("Utilities") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator doctor") + strings.Repeat(" ", 13) + descStyle.Render("Run diagnostic checks") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator peers") + strings.Repeat(" ", 14) + descStyle.Render("List connected peers") + "\n")
+ help.WriteString(" " + commandStyle.Render("push-validator version") + strings.Repeat(" ", 12) + descStyle.Render("Show version information") + "\n\n")
+
+ // Footer
+ help.WriteString(footerStyle.Render("Press 'q', 'h', or 'esc' to close help"))
+
+ return help.String()
+}
+
+// handleKey processes keyboard input
+func (m *Dashboard) handleKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
+ // If help is showing, allow closing it with q, h, or esc
+ if m.showHelp {
+ switch msg.String() {
+ case "q", "h", "esc":
+ return m, func() tea.Msg { return toggleHelpMsg{} }
+ }
+ // Ignore other keys while help is showing
+ return m, nil
+ }
+
+ switch {
+ case key.Matches(msg, m.keys.Quit):
+ if m.fetchCancel != nil {
+ m.fetchCancel() // Cancel in-flight fetch
+ }
+ return m, tea.Quit
+
+ case key.Matches(msg, m.keys.Refresh):
+ return m, func() tea.Msg { return forceRefreshMsg{} }
+
+ case key.Matches(msg, m.keys.Help):
+ return m, func() tea.Msg { return toggleHelpMsg{} }
+
+ case key.Matches(msg, m.keys.Up), key.Matches(msg, m.keys.Down),
+ key.Matches(msg, m.keys.Left), key.Matches(msg, m.keys.Right),
+ key.Matches(msg, m.keys.Search), key.Matches(msg, m.keys.Follow),
+ key.Matches(msg, m.keys.Home), key.Matches(msg, m.keys.End):
+ // Forward to components (log viewer and validators list)
+ cmds := m.registry.UpdateAll(msg, m.data)
+ return m, tea.Batch(cmds...)
+ }
+
+ // Also forward other keys to components (for search input)
+ if msg.Type == tea.KeyRunes || msg.Type == tea.KeyBackspace || msg.Type == tea.KeyEscape || msg.Type == tea.KeyEnter {
+ cmds := m.registry.UpdateAll(msg, m.data)
+ return m, tea.Batch(cmds...)
+ }
+
+ return m, nil
+}
+
+// fetchCmd returns a Cmd that fetches data asynchronously
+func (m *Dashboard) fetchCmd() tea.Cmd {
+ // Use configurable RPC timeout from options
+ ctx, cancel := context.WithTimeout(context.Background(), m.opts.RPCTimeout)
+
+ // Direct return tea.Sequence - cleaner pattern
+ return tea.Sequence(
+ func() tea.Msg { return fetchStartedMsg{cancel: cancel} },
+ func() tea.Msg {
+ defer cancel()
+ data, err := m.fetchData(ctx)
+ if err != nil {
+ return dataErrMsg{err: err}
+ }
+ return dataMsg(data)
+ },
+ )
+}
+
+// fetchData does the actual blocking I/O (called from fetchCmd)
+func (m *Dashboard) fetchData(ctx context.Context) (DashboardData, error) {
+ data := DashboardData{LastUpdate: time.Now()}
+
+ // Use persistent collector for continuous CPU monitoring
+ data.Metrics = m.collector.Collect(ctx, m.opts.Config.RPCLocal, m.opts.Config.GenesisDomain)
+
+ // Fetch peer details
+ local := node.New(m.opts.Config.RPCLocal)
+ if peers, err := local.Peers(ctx); err == nil {
+ data.PeerList = make([]struct {
+ ID string
+ Addr string
+ }, len(peers))
+ for i, p := range peers {
+ data.PeerList[i].ID = p.ID
+ data.PeerList[i].Addr = p.Addr
+ }
+ }
+
+ // Fetch node info
+ sup := process.New(m.opts.Config.HomeDir)
+ data.NodeInfo.Running = sup.IsRunning()
+ if pid, ok := sup.PID(); ok {
+ data.NodeInfo.PID = pid
+ }
+
+ // Get uptime if node is running
+ if data.NodeInfo.Running {
+ if uptime, ok := sup.Uptime(); ok {
+ data.NodeInfo.Uptime = uptime
+ }
+ }
+
+ // Get cached binary version (only refresh every 5 min)
+ data.NodeInfo.BinaryVer = m.getCachedVersion(ctx, data.NodeInfo.Running, data.NodeInfo.PID)
+
+ // Fetch validator data (cached 30s)
+ if valList, err := validator.GetCachedValidatorsList(ctx, m.opts.Config); err == nil {
+ // Convert validator.ValidatorInfo to dashboard format
+ data.NetworkValidators.Total = valList.Total
+ data.NetworkValidators.Validators = make([]struct {
+ Moniker string
+ Status string
+ VotingPower int64
+ Commission string
+ CommissionRewards string // Accumulated commission rewards
+ OutstandingRewards string // Total outstanding rewards
+ Address string // Cosmos address (pushvaloper...)
+ EVMAddress string // EVM address (0x...)
+ Jailed bool // Whether validator is jailed
+ }, len(valList.Validators))
+
+ for i, v := range valList.Validators {
+ data.NetworkValidators.Validators[i].Moniker = v.Moniker
+ data.NetworkValidators.Validators[i].Status = v.Status
+ data.NetworkValidators.Validators[i].VotingPower = v.VotingPower
+ data.NetworkValidators.Validators[i].Commission = v.Commission
+ data.NetworkValidators.Validators[i].Address = v.OperatorAddress
+ data.NetworkValidators.Validators[i].Jailed = v.Jailed
+ // EVM address will be fetched on-demand when user toggles to show it
+ data.NetworkValidators.Validators[i].EVMAddress = ""
+ // Rewards are fetched on-demand by validators_list component (cached 30s)
+ data.NetworkValidators.Validators[i].CommissionRewards = ""
+ data.NetworkValidators.Validators[i].OutstandingRewards = ""
+ }
+ }
+
+ // Fetch my validator status (cached 30s)
+ if myVal, err := validator.GetCachedMyValidator(ctx, m.opts.Config); err == nil {
+ data.MyValidator.IsValidator = myVal.IsValidator
+ data.MyValidator.Address = myVal.Address
+ data.MyValidator.Moniker = myVal.Moniker
+ data.MyValidator.Status = myVal.Status
+ data.MyValidator.VotingPower = myVal.VotingPower
+ data.MyValidator.VotingPct = myVal.VotingPct
+ data.MyValidator.Commission = myVal.Commission
+ data.MyValidator.Jailed = myVal.Jailed
+ data.MyValidator.SlashingInfo.JailReason = myVal.SlashingInfo.JailReason
+ data.MyValidator.SlashingInfo.JailedUntil = myVal.SlashingInfo.JailedUntil
+ data.MyValidator.SlashingInfo.Tombstoned = myVal.SlashingInfo.Tombstoned
+ data.MyValidator.SlashingInfo.MissedBlocks = myVal.SlashingInfo.MissedBlocks
+ data.MyValidator.ValidatorExistsWithSameMoniker = myVal.ValidatorExistsWithSameMoniker
+ data.MyValidator.ConflictingMoniker = myVal.ConflictingMoniker
+
+ // Fetch rewards for my validator if registered (cached 30s)
+ if myVal.IsValidator && myVal.Address != "" {
+ if commRwd, outRwd, err := validator.GetCachedRewards(ctx, m.opts.Config, myVal.Address); err == nil {
+ data.MyValidator.CommissionRewards = commRwd
+ data.MyValidator.OutstandingRewards = outRwd
+ } else {
+ // Set placeholders on error
+ data.MyValidator.CommissionRewards = "โ"
+ data.MyValidator.OutstandingRewards = "โ"
+ }
+ }
+ }
+
+ return data, nil
+}
+
+// getCachedVersion fetches version with caching (5min TTL + PID-based invalidation)
+func (m *Dashboard) getCachedVersion(ctx context.Context, running bool, currentPID int) string {
+ // Don't call pchaind version when node is stopped
+ if !running {
+ return "โ"
+ }
+
+ // Invalidate cache if PID changed (process restarted)
+ if currentPID != m.cachedVersionPID {
+ m.cachedVersion = ""
+ m.cachedVersionPID = currentPID
+ m.cachedVersionAt = time.Time{} // Force immediate fetch
+ }
+
+ if time.Since(m.cachedVersionAt) < 5*time.Minute && m.cachedVersion != "" {
+ return m.cachedVersion
+ }
+
+ // First check if pchaind exists in PATH
+ pchainPath, err := exec.LookPath("pchaind")
+ if err != nil {
+ if m.opts.Debug {
+ fmt.Fprintf(os.Stderr, "Debug: pchaind not found in PATH: %v\n", err)
+ }
+ m.cachedVersion = "pchaind not found"
+ return m.cachedVersion
+ }
+
+ // Fetch version (can be slow - 200-500ms typical)
+ cmd := exec.CommandContext(ctx, pchainPath, "version")
+ out, err := cmd.Output()
+ if err == nil {
+ m.cachedVersion = strings.TrimSpace(string(out))
+ m.cachedVersionAt = time.Now()
+ } else {
+ if m.opts.Debug {
+ fmt.Fprintf(os.Stderr, "Debug: Failed to get pchaind version: %v\n", err)
+ }
+ m.cachedVersion = "version error"
+ }
+
+ return m.cachedVersion
+}
+
+// FetchDataOnce performs a single blocking data fetch for non-TTY mode
+func (m *Dashboard) FetchDataOnce(ctx context.Context) (DashboardData, error) {
+ return m.fetchData(ctx)
+}
+
+// RenderStatic renders a static text snapshot of dashboard data
+func (m *Dashboard) RenderStatic(data DashboardData) string {
+ var b strings.Builder
+
+ b.WriteString("=== PUSH VALIDATOR STATUS ===\n\n")
+
+ // Node Status
+ b.WriteString("NODE STATUS:\n")
+ if data.NodeInfo.Running {
+ b.WriteString(fmt.Sprintf(" Status: Running (PID: %d)\n", data.NodeInfo.PID))
+ b.WriteString(fmt.Sprintf(" Version: %s\n", data.NodeInfo.BinaryVer))
+ } else {
+ b.WriteString(" Status: Stopped\n")
+ }
+ b.WriteString(fmt.Sprintf(" RPC: %s\n", m.opts.Config.RPCLocal))
+ b.WriteString("\n")
+
+ // Chain Status
+ b.WriteString("CHAIN STATUS:\n")
+ b.WriteString(fmt.Sprintf(" Height: %s\n", HumanInt(data.Metrics.Chain.LocalHeight)))
+ if data.Metrics.Chain.RemoteHeight > 0 {
+ b.WriteString(fmt.Sprintf(" Remote Height: %s\n", HumanInt(data.Metrics.Chain.RemoteHeight)))
+ }
+ if data.Metrics.Chain.RemoteHeight > data.Metrics.Chain.LocalHeight {
+ blocksBehind := data.Metrics.Chain.RemoteHeight - data.Metrics.Chain.LocalHeight
+ b.WriteString(fmt.Sprintf(" Blocks Behind: %s\n", HumanInt(blocksBehind)))
+ }
+ b.WriteString(fmt.Sprintf(" Catching Up: %v\n", data.Metrics.Chain.CatchingUp))
+ b.WriteString("\n")
+
+ // Network Status
+ b.WriteString("NETWORK STATUS:\n")
+ b.WriteString(fmt.Sprintf(" Peers: %d\n", data.Metrics.Network.Peers))
+ b.WriteString(fmt.Sprintf(" Chain ID: %s\n", data.Metrics.Node.ChainID))
+ b.WriteString("\n")
+
+ // Validator Status
+ if data.MyValidator.IsValidator {
+ b.WriteString("VALIDATOR STATUS:\n")
+ b.WriteString(fmt.Sprintf(" Moniker: %s\n", data.MyValidator.Moniker))
+ b.WriteString(fmt.Sprintf(" Status: %s\n", data.MyValidator.Status))
+ b.WriteString(fmt.Sprintf(" Voting Power: %s", HumanInt(data.MyValidator.VotingPower)))
+ if data.MyValidator.VotingPct > 0 {
+ b.WriteString(fmt.Sprintf(" (%s)\n", Percent(data.MyValidator.VotingPct)))
+ } else {
+ b.WriteString("\n")
+ }
+ b.WriteString(fmt.Sprintf(" Jailed: %v\n", data.MyValidator.Jailed))
+ b.WriteString("\n")
+ }
+
+ b.WriteString(fmt.Sprintf("Last Update: %s\n", data.LastUpdate.Format("2006-01-02 15:04:05 MST")))
+
+ return b.String()
+}
diff --git a/push-validator-manager/internal/dashboard/header.go b/push-validator-manager/internal/dashboard/header.go
new file mode 100644
index 00000000..d9812380
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/header.go
@@ -0,0 +1,97 @@
+package dashboard
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/charmbracelet/lipgloss"
+)
+
+// timeNow is a variable for time.Now to enable deterministic testing
+var timeNow = time.Now
+
+// Header component shows dashboard title, timestamp, and status
+type Header struct {
+ BaseComponent
+ data DashboardData // Dashboard data with error info
+}
+
+// NewHeader creates a new header component
+func NewHeader() *Header {
+ return &Header{
+ BaseComponent: BaseComponent{},
+ }
+}
+
+// ID returns component identifier
+func (c *Header) ID() string {
+ return "header"
+}
+
+// Title returns component title
+func (c *Header) Title() string {
+ return "PUSH VALIDATOR DASHBOARD"
+}
+
+// MinWidth returns minimum width
+func (c *Header) MinWidth() int {
+ return 40
+}
+
+// MinHeight returns minimum height
+func (c *Header) MinHeight() int {
+ return 3
+}
+
+// Update receives dashboard data and updates internal state
+func (c *Header) Update(msg tea.Msg, data DashboardData) (Component, tea.Cmd) {
+ // Store entire data for access in View
+ c.data = data
+ return c, nil
+}
+
+// View renders the header matching canonical signature View(width, height int)
+func (c *Header) View(w, h int) string {
+ // Guard against invalid dimensions
+ if w <= 0 || h <= 0 {
+ return ""
+ }
+
+ // Build plain text content
+ // Calculate interior width for centering
+ inner := w - 4 // Account for border (2) + padding (2)
+ if inner < 0 {
+ inner = 0
+ }
+
+ // Apply bold + cyan highlighting to title
+ titleStyled := FormatTitle(c.Title(), inner)
+
+ var lines []string
+ lines = append(lines, titleStyled)
+
+ if c.data.Err != nil {
+ errLine := fmt.Sprintf("โ %s", c.data.Err.Error())
+ lines = append(lines, errLine)
+ }
+
+ content := strings.Join(lines, "\n")
+
+ // Match the exact styling pattern of data components for full compatibility
+ style := lipgloss.NewStyle().
+ Border(lipgloss.RoundedBorder()).
+ BorderForeground(lipgloss.Color("63")).
+ Padding(0, 1).
+ Align(lipgloss.Center)
+
+ // Account for border width (2 chars: left + right) to prevent overflow
+ borderWidth := 2
+ contentWidth := w - borderWidth
+ if contentWidth < 0 {
+ contentWidth = 0
+ }
+
+ return style.Width(contentWidth).Render(content)
+}
diff --git a/push-validator-manager/internal/dashboard/layout.go b/push-validator-manager/internal/dashboard/layout.go
new file mode 100644
index 00000000..e838a668
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/layout.go
@@ -0,0 +1,305 @@
+package dashboard
+
+import (
+ "sort"
+)
+
+// LayoutConfig defines the dashboard layout structure
+type LayoutConfig struct {
+ Rows []LayoutRow
+}
+
+// LayoutRow defines a single row in the layout
+type LayoutRow struct {
+ Components []string // Component IDs
+ Weights []int // Width distribution weights
+ MinHeight int // Minimum height for this row
+}
+
+// Cell represents a positioned component in the final layout
+type Cell struct {
+ ID string // Component ID
+ X, Y int // Position
+ W, H int // Dimensions
+}
+
+// LayoutResult is returned by Compute - concrete positioned components
+type LayoutResult struct {
+ Cells []Cell
+ Warning string // e.g., "Some panels hidden (terminal too narrow)"
+}
+
+// Layout manages component positioning
+type Layout struct {
+ config LayoutConfig
+ registry *ComponentRegistry
+}
+
+// NewLayout creates a new layout manager
+func NewLayout(config LayoutConfig, registry *ComponentRegistry) *Layout {
+ return &Layout{
+ config: config,
+ registry: registry,
+ }
+}
+
+// Compute builds the final layout with concrete Cell positions
+// Includes vertical slack distribution when terminal height > sum of MinHeight
+func (l *Layout) Compute(width, height int) LayoutResult {
+ result := LayoutResult{Cells: make([]Cell, 0)}
+
+ // Step 1: Calculate base row heights (MinHeight for each)
+ rowHeights := make([]int, len(l.config.Rows))
+ totalMinHeight := 0
+ for i, row := range l.config.Rows {
+ rowHeights[i] = row.MinHeight
+ totalMinHeight += row.MinHeight
+ }
+
+ // Step 2: Distribute vertical slack if available
+ // Only distribute to data rows (skip header at index 0)
+ verticalSlack := height - totalMinHeight
+ if verticalSlack > 0 && len(l.config.Rows) > 1 {
+ // Distribute slack only to data rows (rows 1+), not to header (row 0)
+ dataRows := len(l.config.Rows) - 1
+ extraPerRow := verticalSlack / dataRows
+ remainder := verticalSlack % dataRows
+
+ for i := 1; i < len(rowHeights); i++ { // Start at 1, skip header
+ rowHeights[i] += extraPerRow
+ if (i - 1) < remainder {
+ rowHeights[i]++ // Fair remainder distribution
+ }
+ }
+ }
+
+ // Step 3: Build cells with distributed heights
+ y := 0
+ for i, row := range l.config.Rows {
+ // Use full width for all rows; components apply their own borders/padding
+ usableWidth := width
+ widths, keptIDs, warning := l.computeRowWidths(row, usableWidth)
+ if warning != "" {
+ result.Warning = warning
+ }
+
+ // Build cells with kept component IDs (handles dropped components)
+ x := 0
+ for j := range keptIDs {
+ result.Cells = append(result.Cells, Cell{
+ ID: keptIDs[j],
+ X: x,
+ Y: y,
+ W: widths[j],
+ H: rowHeights[i], // Use distributed height
+ })
+ x += widths[j]
+ }
+ y += rowHeights[i]
+ }
+
+ return result
+}
+
+// computeRowWidths honors MinWidth, distributes by weights, handles remainder
+// Returns: widths, kept component IDs, warning message
+func (l *Layout) computeRowWidths(row LayoutRow, totalWidth int) ([]int, []string, string) {
+ widths := make([]int, len(row.Components))
+ keptIDs := append([]string(nil), row.Components...) // Default: keep all
+
+ // Step 1: Satisfy all MinWidth requirements
+ remainingWidth := totalWidth
+ for i, compID := range row.Components {
+ comp := l.registry.Get(compID)
+ if comp == nil {
+ // Component not found - use default min width
+ widths[i] = 20
+ remainingWidth -= 20
+ continue
+ }
+ minW := comp.MinWidth()
+ widths[i] = minW
+ remainingWidth -= minW
+ }
+
+ // Step 2: Check if MinWidth requirements can be satisfied
+ if remainingWidth < 0 {
+ // Try to handle insufficient width
+ return l.handleInsufficientWidth(row, totalWidth)
+ }
+
+ // Step 3: Distribute remaining width by weights + remainder
+ totalWeight := 0
+ for _, w := range row.Weights {
+ totalWeight += w
+ }
+
+ if totalWeight == 0 {
+ // No weights specified - distribute evenly
+ return widths, keptIDs, ""
+ }
+
+ // Track fractional parts for fair remainder distribution
+ type frac struct {
+ idx int
+ frac float64
+ }
+ fracs := make([]frac, len(row.Components))
+
+ distributed := 0
+ for i, weight := range row.Weights {
+ if i >= len(row.Components) {
+ break
+ }
+ exact := float64(remainingWidth*weight) / float64(totalWeight)
+ extra := int(exact)
+ widths[i] += extra
+ distributed += extra
+ fracs[i] = frac{idx: i, frac: exact - float64(extra)}
+ }
+
+ // Distribute remainder (remainingWidth - distributed) to largest fractional parts
+ remainder := remainingWidth - distributed
+ sort.Slice(fracs, func(i, j int) bool {
+ return fracs[i].frac > fracs[j].frac
+ })
+ for i := 0; i < remainder && i < len(fracs); i++ {
+ widths[fracs[i].idx]++
+ }
+
+ // Add border compensation: When widgets are side-by-side, each subtracts 2 for borders.
+ // To make borders touch, add +1 to each widget width (except the last one).
+ // This allows adjacent borders to overlap visually while maintaining proper sizing.
+ if len(widths) > 1 {
+ for i := 0; i < len(widths)-1; i++ {
+ widths[i]++ // Add 1 to compensate for border overlap
+ }
+ }
+
+ // Validate total width and trim if needed (safety check for edge cases)
+ totalAllocated := 0
+ for _, w := range widths {
+ totalAllocated += w
+ }
+ if totalAllocated > totalWidth {
+ excess := totalAllocated - totalWidth
+ // Trim from rightmost component first, respecting MinWidth
+ for i := len(widths) - 1; i >= 0 && excess > 0; i-- {
+ comp := l.registry.Get(row.Components[i])
+ if comp == nil {
+ continue
+ }
+ canTrim := widths[i] - comp.MinWidth()
+ if canTrim <= 0 {
+ continue
+ }
+ trim := excess
+ if trim > canTrim {
+ trim = canTrim
+ }
+ widths[i] -= trim
+ excess -= trim
+ }
+ }
+
+ return widths, keptIDs, ""
+}
+
+// handleInsufficientWidth tries stack mode or drops components
+// Returns: widths, kept component IDs, warning message
+func (l *Layout) handleInsufficientWidth(row LayoutRow, width int) ([]int, []string, string) {
+ // Keep only essential components
+ essential := []string{"header", "node_status", "chain_status"}
+ kept := keptEssentials(row.Components, essential)
+
+ if len(kept) == 0 {
+ kept = row.Components[:1] // Keep at least one
+ }
+
+ // CRITICAL: If we've already reduced to essential/minimal set but still don't fit,
+ // we must terminate recursion. Clamp widths to available space.
+ if len(kept) >= len(row.Components) || width < 10 {
+ // Same component set or terminal too narrow - force clamp to prevent infinite loop
+ // Drop components if terminal width cannot allocate at least one column each.
+ if width <= 0 {
+ return []int{1}, kept[:1], "Terminal too narrow - display truncated"
+ }
+ if len(kept) > width {
+ kept = kept[:width]
+ }
+ widths := clampEven(kept, width)
+ warning := "Terminal too narrow - display truncated"
+ return widths, kept, warning
+ }
+
+ // Components were dropped - try to fit remaining set
+ warning := "Some panels hidden (terminal too narrow)"
+ newRow := LayoutRow{
+ Components: kept,
+ Weights: equalWeights(len(kept)),
+ MinHeight: row.MinHeight,
+ }
+ widths, _, _ := l.computeRowWidths(newRow, width) // Recurse with reduced set
+ return widths, kept, warning
+}
+
+// contains checks if string is in slice
+func contains(slice []string, item string) bool {
+ for _, s := range slice {
+ if s == item {
+ return true
+ }
+ }
+ return false
+}
+
+// keptEssentials filters component IDs to keep only essential ones
+func keptEssentials(ids, essential []string) []string {
+ kept := []string{}
+ for _, id := range ids {
+ if contains(essential, id) {
+ kept = append(kept, id)
+ }
+ }
+ return kept
+}
+
+// clampEven distributes width evenly across components with minimum per-component width
+func clampEven(kept []string, width int) []int {
+ widths := make([]int, len(kept))
+ perComponent := width / len(kept)
+ if perComponent < 1 {
+ perComponent = 1 // Minimum 1 column per component
+ }
+
+ remainder := width - (perComponent * len(kept))
+ for i := range widths {
+ widths[i] = perComponent
+ if i < remainder {
+ widths[i]++ // Distribute remainder fairly
+ }
+ }
+
+ // Adjust if rounding caused the total width to exceed the available space.
+ total := 0
+ for _, w := range widths {
+ total += w
+ }
+ for i := len(widths) - 1; i >= 0 && total > width; i-- {
+ if widths[i] > 1 {
+ widths[i]--
+ total--
+ }
+ }
+ return widths
+}
+
+// equalWeights creates equal weights for n components using unit weights
+// The remainder distribution logic in computeRowWidths handles fairness
+func equalWeights(n int) []int {
+ weights := make([]int, n)
+ for i := range weights {
+ weights[i] = 1
+ }
+ return weights
+}
diff --git a/push-validator-manager/internal/dashboard/log_viewer.go b/push-validator-manager/internal/dashboard/log_viewer.go
new file mode 100644
index 00000000..b9810901
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/log_viewer.go
@@ -0,0 +1,517 @@
+package dashboard
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/charmbracelet/lipgloss"
+)
+
+// LogViewer component displays and tails log file with scrolling and search
+type LogViewer struct {
+ BaseComponent
+ logPath string
+ buffer *ringBuffer
+ scrollPos int // Current scroll position (0 = bottom/follow mode)
+ followMode bool // Auto-scroll to latest logs
+ searchMode bool // Search input active
+ searchTerm string // Current search filter
+ noEmoji bool
+ mu sync.RWMutex
+
+ // Background log tailer
+ cancel context.CancelFunc
+}
+
+// ringBuffer is a circular buffer for log lines
+type ringBuffer struct {
+ lines []string
+ size int
+ head int
+ count int
+ mu sync.RWMutex
+}
+
+func newRingBuffer(size int) *ringBuffer {
+ return &ringBuffer{
+ lines: make([]string, size),
+ size: size,
+ }
+}
+
+func (rb *ringBuffer) Add(line string) {
+ rb.mu.Lock()
+ defer rb.mu.Unlock()
+
+ rb.lines[rb.head] = line
+ rb.head = (rb.head + 1) % rb.size
+ if rb.count < rb.size {
+ rb.count++
+ }
+}
+
+func (rb *ringBuffer) GetAll() []string {
+ rb.mu.RLock()
+ defer rb.mu.RUnlock()
+
+ result := make([]string, rb.count)
+ if rb.count == 0 {
+ return result
+ }
+
+ // Calculate start position
+ start := rb.head - rb.count
+ if start < 0 {
+ start += rb.size
+ }
+
+ for i := 0; i < rb.count; i++ {
+ idx := (start + i) % rb.size
+ result[i] = rb.lines[idx]
+ }
+
+ return result
+}
+
+func (rb *ringBuffer) Count() int {
+ rb.mu.RLock()
+ defer rb.mu.RUnlock()
+ return rb.count
+}
+
+// NewLogViewer creates a new log viewer component
+func NewLogViewer(noEmoji bool, homeDir string) *LogViewer {
+ logPath := homeDir + "/logs/pchaind.log"
+
+ lv := &LogViewer{
+ BaseComponent: BaseComponent{},
+ logPath: logPath,
+ buffer: newRingBuffer(500),
+ followMode: true,
+ scrollPos: 0,
+ noEmoji: noEmoji,
+ mu: sync.RWMutex{}, // Explicit mutex initialization
+ }
+
+ // Start background log tailer AFTER all fields are initialized
+ ctx, cancel := context.WithCancel(context.Background())
+ lv.cancel = cancel
+
+ // Add delay to ensure component is fully ready
+ go func() {
+ time.Sleep(100 * time.Millisecond)
+ lv.tailLogs(ctx)
+ }()
+
+ return lv
+}
+
+// ID returns component identifier
+func (lv *LogViewer) ID() string {
+ return "log_viewer"
+}
+
+// Title returns component title
+func (lv *LogViewer) Title() string {
+ icon := "๐ Logs"
+ if lv.noEmoji {
+ icon = "Logs"
+ }
+
+ if lv.searchMode {
+ return fmt.Sprintf("%s [Search: %s]", icon, lv.searchTerm)
+ }
+
+ if !lv.followMode {
+ return fmt.Sprintf("%s [Paused - %d lines]", icon, lv.buffer.Count())
+ }
+
+ return icon
+}
+
+// MinWidth returns minimum width
+func (lv *LogViewer) MinWidth() int {
+ return 40
+}
+
+// MinHeight returns minimum height
+func (lv *LogViewer) MinHeight() int {
+ // Fixed 8 lines + title (1) + footer (1) + border padding (2) + spacing (1) = 13
+ return 13
+}
+
+// Update receives messages
+func (lv *LogViewer) Update(msg tea.Msg, data DashboardData) (Component, tea.Cmd) {
+ switch msg := msg.(type) {
+ case tea.KeyMsg:
+ return lv.handleKey(msg)
+ }
+
+ return lv, nil
+}
+
+// handleKey processes keyboard input
+func (lv *LogViewer) handleKey(msg tea.KeyMsg) (Component, tea.Cmd) {
+ lv.mu.Lock()
+ defer lv.mu.Unlock()
+
+ if lv.searchMode {
+ switch msg.String() {
+ case "esc":
+ lv.searchMode = false
+ lv.searchTerm = ""
+ case "enter":
+ lv.searchMode = false
+ case "backspace":
+ if len(lv.searchTerm) > 0 {
+ lv.searchTerm = lv.searchTerm[:len(lv.searchTerm)-1]
+ }
+ default:
+ // Add to search term
+ if len(msg.String()) == 1 {
+ lv.searchTerm += msg.String()
+ }
+ }
+ return lv, nil
+ }
+
+ switch msg.String() {
+ case "/":
+ lv.searchMode = true
+ lv.searchTerm = ""
+
+ case "f":
+ lv.followMode = !lv.followMode
+ if lv.followMode {
+ lv.scrollPos = 0
+ }
+
+ case "up":
+ if lv.followMode {
+ lv.followMode = false
+ }
+ lv.scrollPos++
+ // Bound scrollPos to buffer size to prevent overflow
+ if lv.scrollPos > lv.buffer.Count() {
+ lv.scrollPos = lv.buffer.Count()
+ }
+
+ case "down":
+ lv.scrollPos--
+ if lv.scrollPos <= 0 {
+ lv.scrollPos = 0
+ lv.followMode = true
+ }
+
+ case "t": // 't' for 'top' - jump to oldest logs
+ lv.followMode = false
+ lv.scrollPos = lv.buffer.Count()
+
+ case "l": // 'l' for 'latest' - jump to newest logs
+ lv.followMode = true
+ lv.scrollPos = 0
+ }
+
+ return lv, nil
+}
+
+// View renders the log viewer
+func (lv *LogViewer) View(w, h int) string {
+ // Add panic recovery to prevent dashboard crashes
+ defer func() {
+ if r := recover(); r != nil {
+ fmt.Fprintf(os.Stderr, "PANIC in LogViewer.View: %v\n", r)
+ }
+ }()
+
+ lv.mu.RLock()
+ defer lv.mu.RUnlock()
+
+ // Style
+ style := lipgloss.NewStyle().
+ Border(lipgloss.RoundedBorder()).
+ BorderForeground(lipgloss.Color("63")).
+ Padding(0, 1)
+
+ content := lv.renderContent(w, h)
+
+ // Check cache
+ if lv.CheckCacheWithSize(content, w, h) {
+ return lv.GetCached()
+ }
+
+ if w < 0 {
+ w = 0
+ }
+ if h < 0 {
+ h = 0
+ }
+
+ // Account for border
+ borderWidth := 2
+ contentWidth := w - borderWidth
+ if contentWidth < 0 {
+ contentWidth = 0
+ }
+
+ // Don't use MaxHeight - let border render fully
+ // The layout system already allocates the right amount of space
+ rendered := style.Width(contentWidth).Render(content)
+ lv.UpdateCache(rendered)
+ return rendered
+}
+
+// renderContent builds log content
+func (lv *LogViewer) renderContent(w, h int) string {
+ // Add panic recovery with detailed error info
+ defer func() {
+ if r := recover(); r != nil {
+ fmt.Fprintf(os.Stderr, "PANIC in LogViewer.renderContent: %v (w=%d, h=%d)\n", r, w, h)
+ }
+ }()
+
+ inner := w - 4
+ if inner < 0 {
+ inner = 0
+ }
+
+ // Title
+ title := FormatTitle(lv.Title(), inner)
+
+ // Get all lines
+ allLines := lv.buffer.GetAll()
+
+ // Filter by search term
+ var filteredLines []string
+ if lv.searchTerm != "" {
+ searchLower := strings.ToLower(lv.searchTerm)
+ for _, line := range allLines {
+ if strings.Contains(strings.ToLower(line), searchLower) {
+ filteredLines = append(filteredLines, line)
+ }
+ }
+ } else {
+ filteredLines = allLines
+ }
+
+ // Fixed 8-line display for stable log viewing
+ // This prevents the display from constantly adjusting as logs stream in
+ const fixedLogLines = 8
+ availableLines := fixedLogLines
+
+ // Apply scroll position
+ totalLines := len(filteredLines)
+ var visibleLines []string
+
+ if totalLines == 0 {
+ visibleLines = []string{"(no logs yet)"}
+ } else {
+ // Calculate slice range based on scroll position
+ endIdx := totalLines - lv.scrollPos
+ startIdx := endIdx - availableLines
+
+ // Bounds checking to prevent slice panic
+ if endIdx < 0 {
+ endIdx = 0
+ }
+ if endIdx > totalLines {
+ endIdx = totalLines
+ }
+ if startIdx < 0 {
+ startIdx = 0
+ }
+ // CRITICAL: Ensure startIdx <= endIdx to prevent panic
+ if startIdx > endIdx {
+ startIdx = endIdx
+ }
+
+ visibleLines = filteredLines[startIdx:endIdx]
+ }
+
+ // Render lines with color coding
+ var styledLines []string
+ for _, line := range visibleLines {
+ styledLine := lv.styleLogLine(line, inner)
+ styledLines = append(styledLines, styledLine)
+ }
+
+ content := strings.Join(styledLines, "\n")
+
+ // Add footer hint
+ footer := lv.renderFooter()
+
+ return fmt.Sprintf("%s\n%s\n%s", title, content, footer)
+}
+
+// styleLogLine applies color coding based on log level
+func (lv *LogViewer) styleLogLine(line string, maxWidth int) string {
+ // Don't truncate - let terminal handle line wrapping
+ // This allows users to see full log messages
+
+ if lv.noEmoji {
+ return line
+ }
+
+ // Detect log level and apply color
+ var style lipgloss.Style
+
+ // Pattern matching for common log levels
+ lowerLine := strings.ToLower(line)
+
+ if strings.Contains(lowerLine, "error") || strings.Contains(lowerLine, "fatal") || strings.Contains(lowerLine, "panic") {
+ style = lipgloss.NewStyle().Foreground(lipgloss.Color("196")) // Red
+ } else if strings.Contains(lowerLine, "warn") || strings.Contains(lowerLine, "warning") {
+ style = lipgloss.NewStyle().Foreground(lipgloss.Color("226")) // Yellow
+ } else if strings.Contains(lowerLine, "info") {
+ style = lipgloss.NewStyle().Foreground(lipgloss.Color("82")) // Green
+ } else if strings.Contains(lowerLine, "debug") || strings.Contains(lowerLine, "trace") {
+ style = lipgloss.NewStyle().Foreground(lipgloss.Color("240")) // Gray
+ } else {
+ // Default color
+ return line
+ }
+
+ return style.Render(line)
+}
+
+// renderFooter shows control hints
+func (lv *LogViewer) renderFooter() string {
+ if lv.searchMode {
+ return lipgloss.NewStyle().Foreground(lipgloss.Color("241")).
+ Render("Enter to apply | Esc to cancel")
+ }
+
+ var hints string
+ if lv.followMode {
+ hints = "โ/โ: scroll | f: pause | /: search | t: oldest"
+ } else {
+ hints = "โ/โ: scroll | f: live | /: search | l: latest | t: oldest"
+ }
+
+ return lipgloss.NewStyle().Foreground(lipgloss.Color("241")).Render(hints)
+}
+
+// tailLogs runs in background to tail log file
+func (lv *LogViewer) tailLogs(ctx context.Context) {
+ // Wait for log file to exist
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+
+ if _, err := os.Stat(lv.logPath); err == nil {
+ break
+ }
+ time.Sleep(1 * time.Second)
+ }
+
+ // Read initial backlog (last 100 lines)
+ if err := lv.loadBacklog(100); err != nil {
+ // Ignore error, file might not exist yet
+ }
+
+ // Start tailing
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+
+ if err := lv.followFile(ctx); err != nil {
+ time.Sleep(1 * time.Second)
+ continue
+ }
+ }
+}
+
+// loadBacklog reads last N lines from log file
+func (lv *LogViewer) loadBacklog(n int) error {
+ f, err := os.Open(lv.logPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ // Allow long log lines (up to 512 KiB)
+ bufSize := 512 * 1024
+ scanner.Buffer(make([]byte, bufSize), bufSize)
+
+ var lines []string
+ for scanner.Scan() {
+ line := scanner.Text()
+ if len(lines) == n {
+ lines = lines[1:]
+ }
+ lines = append(lines, line)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return err
+ }
+
+ // Add to buffer
+ for _, line := range lines {
+ lv.buffer.Add(line)
+ }
+
+ return nil
+}
+
+// followFile tails the log file
+func (lv *LogViewer) followFile(ctx context.Context) error {
+ f, err := os.Open(lv.logPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Seek to end
+ if _, err := f.Seek(0, io.SeekEnd); err != nil {
+ return err
+ }
+
+ // Allow long log lines (up to 512 KiB)
+ bufSize := 512 * 1024
+ reader := bufio.NewReaderSize(f, bufSize)
+
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ default:
+ }
+
+ line, err := reader.ReadString('\n')
+ if err == io.EOF {
+ time.Sleep(100 * time.Millisecond)
+ continue
+ }
+ if err != nil {
+ return err
+ }
+
+ // Add to buffer (strip newline)
+ lv.buffer.Add(strings.TrimSuffix(line, "\n"))
+ }
+}
+
+// Close stops the background tailer
+func (lv *LogViewer) Close() {
+ if lv.cancel != nil {
+ lv.cancel()
+ }
+}
+
+// Compile-time check that LogViewer implements Component
+var _ Component = (*LogViewer)(nil)
diff --git a/push-validator-manager/internal/dashboard/network_status.go b/push-validator-manager/internal/dashboard/network_status.go
new file mode 100644
index 00000000..5ff854a5
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/network_status.go
@@ -0,0 +1,133 @@
+package dashboard
+
+import (
+ "fmt"
+
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/charmbracelet/lipgloss"
+)
+
+// NetworkStatus component shows network connection status
+type NetworkStatus struct {
+ BaseComponent
+ data DashboardData
+ icons Icons
+}
+
+// NewNetworkStatus creates a new network status component
+func NewNetworkStatus(noEmoji bool) *NetworkStatus {
+ return &NetworkStatus{
+ BaseComponent: BaseComponent{},
+ icons: NewIcons(noEmoji),
+ }
+}
+
+// ID returns component identifier
+func (c *NetworkStatus) ID() string {
+ return "network_status"
+}
+
+// Title returns component title
+func (c *NetworkStatus) Title() string {
+ return "Network Status"
+}
+
+// MinWidth returns minimum width
+func (c *NetworkStatus) MinWidth() int {
+ return 25
+}
+
+// MinHeight returns minimum height
+func (c *NetworkStatus) MinHeight() int {
+ return 8
+}
+
+// Update receives dashboard data
+func (c *NetworkStatus) Update(msg tea.Msg, data DashboardData) (Component, tea.Cmd) {
+ c.data = data
+ return c, nil
+}
+
+// View renders the component with caching
+func (c *NetworkStatus) View(w, h int) string {
+ // Render with styling
+ style := lipgloss.NewStyle().
+ Border(lipgloss.RoundedBorder()).
+ BorderForeground(lipgloss.Color("63")).
+ Padding(0, 1)
+
+ content := c.renderContent(w)
+
+ // Check cache
+ if c.CheckCacheWithSize(content, w, h) {
+ return c.GetCached()
+ }
+
+ if w < 0 {
+ w = 0
+ }
+ if h < 0 {
+ h = 0
+ }
+
+ // Account for border width (2 chars: left + right) to prevent overflow
+ borderWidth := 2
+ contentWidth := w - borderWidth
+ if contentWidth < 0 {
+ contentWidth = 0
+ }
+
+ rendered := style.Width(contentWidth).Render(content)
+ c.UpdateCache(rendered)
+ return rendered
+}
+
+// renderContent builds plain text content
+func (c *NetworkStatus) renderContent(w int) string {
+ var lines []string
+
+ // Interior width after accounting for rounded border (2 chars) and padding (2 chars).
+ inner := w - 4
+ if inner < 0 {
+ inner = 0
+ }
+
+ // Peers list
+ if len(c.data.PeerList) > 0 {
+ lines = append(lines, fmt.Sprintf("Connected to %d peers (Node ID):", len(c.data.PeerList)))
+ maxDisplay := 5
+ for i, peer := range c.data.PeerList {
+ if i >= maxDisplay {
+ lines = append(lines, fmt.Sprintf(" ... and %d more", len(c.data.PeerList)-maxDisplay))
+ break
+ }
+ // Show full ID
+ lines = append(lines, fmt.Sprintf(" %s", peer.ID))
+ }
+ } else {
+ lines = append(lines, fmt.Sprintf("%s 0 peers", c.icons.Warn))
+ }
+
+ // Latency
+ if c.data.Metrics.Network.LatencyMS > 0 {
+ lines = append(lines, fmt.Sprintf("Latency: %dms", c.data.Metrics.Network.LatencyMS))
+ }
+
+ // Chain ID
+ if c.data.Metrics.Node.ChainID != "" {
+ lines = append(lines, fmt.Sprintf("Chain: %s", truncateWithEllipsis(c.data.Metrics.Node.ChainID, 24)))
+ }
+
+ // Node ID
+ if c.data.Metrics.Node.NodeID != "" {
+ // Show full node ID
+ lines = append(lines, fmt.Sprintf("Node ID: %s", c.data.Metrics.Node.NodeID))
+ }
+
+ // Moniker
+ if c.data.Metrics.Node.Moniker != "" {
+ lines = append(lines, fmt.Sprintf("Name: %s", c.data.Metrics.Node.Moniker))
+ }
+
+ return fmt.Sprintf("%s\n%s", FormatTitle(c.Title(), inner), joinLines(lines, "\n"))
+}
diff --git a/push-validator-manager/internal/dashboard/node_status.go b/push-validator-manager/internal/dashboard/node_status.go
new file mode 100644
index 00000000..3f6d82de
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/node_status.go
@@ -0,0 +1,137 @@
+package dashboard
+
+import (
+ "fmt"
+
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/charmbracelet/lipgloss"
+)
+
+// NodeStatus component shows node process status
+type NodeStatus struct {
+ BaseComponent
+ data DashboardData
+ icons Icons
+}
+
+// NewNodeStatus creates a new node status component
+func NewNodeStatus(noEmoji bool) *NodeStatus {
+ return &NodeStatus{
+ BaseComponent: BaseComponent{},
+ icons: NewIcons(noEmoji),
+ }
+}
+
+// ID returns component identifier
+func (c *NodeStatus) ID() string {
+ return "node_status"
+}
+
+// Title returns component title
+func (c *NodeStatus) Title() string {
+ return "Node Status"
+}
+
+// MinWidth returns minimum width
+func (c *NodeStatus) MinWidth() int {
+ return 25
+}
+
+// MinHeight returns minimum height
+func (c *NodeStatus) MinHeight() int {
+ return 8
+}
+
+// Update receives dashboard data
+func (c *NodeStatus) Update(msg tea.Msg, data DashboardData) (Component, tea.Cmd) {
+ c.data = data
+ return c, nil
+}
+
+// View renders the component with caching
+func (c *NodeStatus) View(w, h int) string {
+ // Render with styling
+ style := lipgloss.NewStyle().
+ Border(lipgloss.RoundedBorder()).
+ BorderForeground(lipgloss.Color("63")).
+ Padding(0, 1)
+
+ content := c.renderContent(w)
+
+ // Check cache
+ if c.CheckCacheWithSize(content, w, h) {
+ return c.GetCached()
+ }
+
+ if w < 0 {
+ w = 0
+ }
+ if h < 0 {
+ h = 0
+ }
+
+ // Account for border width (2 chars: left + right) to prevent overflow
+ borderWidth := 2
+ contentWidth := w - borderWidth
+ if contentWidth < 0 {
+ contentWidth = 0
+ }
+
+ rendered := style.Width(contentWidth).Render(content)
+ c.UpdateCache(rendered)
+ return rendered
+}
+
+// renderContent builds plain text content
+func (c *NodeStatus) renderContent(w int) string {
+ var lines []string
+
+ // Interior width after accounting for rounded border (2 chars) and padding (2 chars).
+ inner := w - 4
+ if inner < 0 {
+ inner = 0
+ }
+
+ // Status
+ icon := c.icons.Err
+ status := "Stopped"
+ if c.data.NodeInfo.Running {
+ icon = c.icons.OK
+ status = "Running"
+ if c.data.NodeInfo.PID != 0 {
+ status = fmt.Sprintf("Running (pid %d)", c.data.NodeInfo.PID)
+ }
+ }
+ lines = append(lines, fmt.Sprintf("%s %s", icon, status))
+
+ // RPC Status
+ rpcIcon := c.icons.Err
+ rpcStatus := "Not listening"
+ if c.data.Metrics.Node.RPCListening {
+ rpcIcon = c.icons.OK
+ rpcStatus = "Listening"
+ }
+ lines = append(lines, fmt.Sprintf("%s RPC: %s", rpcIcon, rpcStatus))
+
+ // Uptime
+ if c.data.NodeInfo.Uptime > 0 {
+ lines = append(lines, fmt.Sprintf("Uptime: %s", DurationShort(c.data.NodeInfo.Uptime)))
+ }
+
+ // System metrics
+ if c.data.Metrics.System.MemTotal > 0 {
+ memPct := float64(c.data.Metrics.System.MemUsed) / float64(c.data.Metrics.System.MemTotal)
+ lines = append(lines, fmt.Sprintf("Memory: %s", Percent(memPct)))
+ }
+ if c.data.Metrics.System.DiskTotal > 0 {
+ diskPct := float64(c.data.Metrics.System.DiskUsed) / float64(c.data.Metrics.System.DiskTotal)
+ lines = append(lines, fmt.Sprintf("Disk: %s", Percent(diskPct)))
+ }
+
+ // Binary Version
+ if c.data.NodeInfo.BinaryVer != "" {
+ lines = append(lines, fmt.Sprintf("Version: %s", c.data.NodeInfo.BinaryVer))
+ }
+
+ return fmt.Sprintf("%s\n%s", FormatTitle(c.Title(), inner), joinLines(lines, "\n"))
+}
diff --git a/push-validator-manager/internal/dashboard/types.go b/push-validator-manager/internal/dashboard/types.go
new file mode 100644
index 00000000..6217fd3f
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/types.go
@@ -0,0 +1,105 @@
+package dashboard
+
+import (
+ "context"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/metrics"
+)
+
+// Message types for Bubble Tea event loop - ensures deterministic control flow
+
+// tickMsg is sent periodically to trigger data refresh
+type tickMsg time.Time
+
+// dataMsg contains successfully fetched dashboard data
+type dataMsg DashboardData
+
+// dataErrMsg contains an error from a failed data fetch
+type dataErrMsg struct {
+ err error
+}
+
+// fetchStartedMsg is sent when a fetch begins, carrying the cancel function
+// This ensures cancel func is assigned on UI thread, not in Cmd goroutine
+type fetchStartedMsg struct {
+ cancel context.CancelFunc
+}
+
+// forceRefreshMsg is sent when user presses 'r' to refresh immediately
+type forceRefreshMsg struct{}
+
+// toggleHelpMsg is sent when user presses '?' to toggle help overlay
+type toggleHelpMsg struct{}
+
+// DashboardData aggregates all data shown in the dashboard
+type DashboardData struct {
+ // Reuse existing metrics collector
+ Metrics metrics.Snapshot
+
+ // Node process information
+ NodeInfo struct {
+ Running bool
+ PID int
+ Uptime time.Duration
+ BinaryVer string // Cached version (5min TTL)
+ }
+
+ // My validator status
+ MyValidator struct {
+ IsValidator bool
+ Address string
+ Moniker string
+ Status string
+ VotingPower int64
+ VotingPct float64 // Percentage of total voting power [0,1]
+ Commission string
+ CommissionRewards string // Accumulated commission rewards
+ OutstandingRewards string // Total outstanding rewards
+ Jailed bool
+ SlashingInfo struct {
+ JailReason string // "Downtime", "Double Sign", or "Unknown"
+ JailedUntil string // RFC3339 formatted timestamp
+ Tombstoned bool // Whether validator is permanently jailed (double sign)
+ MissedBlocks int64 // Number of missed blocks
+ }
+ ValidatorExistsWithSameMoniker bool // True if a different validator uses this node's moniker
+ ConflictingMoniker string // The moniker that conflicts
+ }
+
+ // Network validators list
+ NetworkValidators struct {
+ Validators []struct {
+ Moniker string
+ Status string
+ VotingPower int64
+ Commission string
+ CommissionRewards string // Accumulated commission rewards
+ OutstandingRewards string // Total outstanding rewards
+ Address string // Cosmos address (pushvaloper...)
+ EVMAddress string // EVM address (0x...)
+ Jailed bool // Whether validator is jailed
+ }
+ Total int
+ }
+
+ // Connected peers list
+ PeerList []struct {
+ ID string
+ Addr string
+ }
+
+ LastUpdate time.Time
+ Err error // Last fetch error (for display in header)
+}
+
+// Options configures dashboard behavior
+type Options struct {
+ Config config.Config
+ RefreshInterval time.Duration
+ RPCTimeout time.Duration // Timeout for RPC calls (default: 5s)
+ NoColor bool
+ NoEmoji bool
+ Debug bool // Enable debug output
+}
diff --git a/push-validator-manager/internal/dashboard/util.go b/push-validator-manager/internal/dashboard/util.go
new file mode 100644
index 00000000..e83747c5
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/util.go
@@ -0,0 +1,357 @@
+package dashboard
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/charmbracelet/lipgloss"
+)
+
+// HumanInt formats integers with thousands separators (handles negatives)
+func HumanInt(n int64) string {
+ sign := ""
+ if n < 0 {
+ sign = "-"
+ n = -n
+ }
+
+ s := strconv.FormatInt(n, 10)
+ if len(s) <= 3 {
+ return sign + s
+ }
+
+ var result strings.Builder
+ for i, c := range reverse(s) {
+ if i > 0 && i%3 == 0 {
+ result.WriteRune(',')
+ }
+ result.WriteRune(c)
+ }
+ return sign + reverse(result.String())
+}
+
+// FormatFloat formats floating-point numbers with thousand separators
+// Example: "902030185089.93" โ "902,030,185,089.93"
+func FormatFloat(s string) string {
+ // Handle empty or placeholder values
+ if s == "" || s == "โ" || s == "-" {
+ return s
+ }
+
+ // Split into integer and decimal parts
+ parts := strings.Split(s, ".")
+ intPart := parts[0]
+
+ // Handle short numbers (3 or fewer digits)
+ if len(intPart) <= 3 {
+ if len(parts) == 2 {
+ return intPart + "." + parts[1]
+ }
+ return intPart
+ }
+
+ // Format integer part with commas
+ var result strings.Builder
+ for i, c := range reverse(intPart) {
+ if i > 0 && i%3 == 0 {
+ result.WriteRune(',')
+ }
+ result.WriteRune(c)
+ }
+
+ formatted := reverse(result.String())
+ if len(parts) == 2 {
+ return formatted + "." + parts[1]
+ }
+ return formatted
+}
+
+// Percent formats percentage - takes fraction in [0,1], returns formatted % with up to 5 decimal places
+// IMPORTANT: Input convention is [0,1], not [0,100]
+// Example: Percent(0.00123) โ "0.123%", Percent(0.123) โ "12.3%"
+func Percent(fraction float64) string {
+ if fraction < 0 {
+ return "0.0%"
+ }
+ if fraction > 1 {
+ return "100.0%"
+ }
+ // Format with up to 5 decimal places, removing trailing zeros for cleaner display
+ formatted := fmt.Sprintf("%.5f", fraction*100)
+ formatted = strings.TrimRight(formatted, "0")
+ formatted = strings.TrimRight(formatted, ".")
+ return formatted + "%"
+}
+
+// truncateWithEllipsis caps string length to prevent overflow in fixed-width cells
+func truncateWithEllipsis(s string, maxLen int) string {
+ if maxLen <= 0 {
+ return ""
+ }
+ if maxLen == 1 {
+ return "โฆ"
+ }
+ runes := []rune(s)
+ if len(runes) <= maxLen {
+ return s
+ }
+ return string(runes[:maxLen-1]) + "โฆ"
+}
+
+// ProgressBar creates ASCII/Unicode progress bar
+func ProgressBar(fraction float64, width int, noEmoji bool) string {
+ if fraction < 0 {
+ fraction = 0
+ }
+ if fraction > 1 {
+ fraction = 1
+ }
+ if width < 3 {
+ // Too narrow for meaningful bar
+ return fmt.Sprintf("%.0f%%", fraction*100)
+ }
+
+ // Calculate bar width - ASCII mode needs room for brackets
+ barWidth := width
+ if noEmoji {
+ barWidth = width - 2 // Account for [ ] in ASCII mode only
+ }
+
+ filled := int(float64(barWidth) * fraction)
+ if filled > barWidth {
+ filled = barWidth
+ }
+
+ if noEmoji {
+ // ASCII-only mode with brackets
+ return "[" + strings.Repeat("=", filled) + strings.Repeat(" ", barWidth-filled) + "]"
+ }
+
+ // Unicode mode uses full width (no brackets)
+ return strings.Repeat("โ", filled) + strings.Repeat("โ", barWidth-filled)
+}
+
+// DurationShort formats duration concisely
+func DurationShort(d time.Duration) string {
+ if d < time.Minute {
+ return fmt.Sprintf("%ds", int(d.Seconds()))
+ }
+ if d < time.Hour {
+ return fmt.Sprintf("%dm", int(d.Minutes()))
+ }
+ if d < 24*time.Hour {
+ h := int(d.Hours())
+ m := int(d.Minutes()) % 60
+ if m == 0 {
+ return fmt.Sprintf("%dh", h)
+ }
+ return fmt.Sprintf("%dh%dm", h, m)
+ }
+ days := int(d.Hours()) / 24
+ h := int(d.Hours()) % 24
+ if h == 0 {
+ return fmt.Sprintf("%dd", days)
+ }
+ return fmt.Sprintf("%dd%dh", days, h)
+}
+
+// FormatTimestamp formats RFC3339 timestamp to human-readable format "MMM DD, HH:MM AM/PM TZ"
+// Converts to local timezone and includes timezone abbreviation
+// Returns empty string if parsing fails
+func FormatTimestamp(rfcTime string) string {
+ if rfcTime == "" {
+ return ""
+ }
+ t, err := time.Parse(time.RFC3339Nano, rfcTime)
+ if err != nil {
+ return ""
+ }
+ // Convert to local timezone and include timezone abbreviation with AM/PM
+ return t.Local().Format("Jan 02, 03:04 PM MST")
+}
+
+// TimeUntil calculates human-readable time remaining until a given RFC3339 timestamp
+// Returns empty string if timestamp is in the past or parsing fails
+func TimeUntil(rfcTime string) string {
+ if rfcTime == "" {
+ return ""
+ }
+ t, err := time.Parse(time.RFC3339Nano, rfcTime)
+ if err != nil {
+ return ""
+ }
+
+ remaining := time.Until(t)
+ if remaining <= 0 {
+ return "0s"
+ }
+
+ return DurationShort(remaining)
+}
+
+// ETACalculator maintains moving average for stable ETA
+type ETACalculator struct {
+ samples []struct {
+ blocksBehind int64
+ timestamp time.Time
+ }
+ maxSamples int
+ lastProgress time.Time
+}
+
+// NewETACalculator creates a new ETA calculator
+func NewETACalculator() *ETACalculator {
+ return &ETACalculator{maxSamples: 10}
+}
+
+// AddSample adds a new sample point
+func (e *ETACalculator) AddSample(blocksBehind int64) {
+ now := time.Now()
+ e.samples = append(e.samples, struct {
+ blocksBehind int64
+ timestamp time.Time
+ }{blocksBehind, now})
+
+ if len(e.samples) > e.maxSamples {
+ e.samples = e.samples[1:]
+ }
+
+ // Update last progress time if we have at least 2 samples and blocks decreased
+ if len(e.samples) >= 2 {
+ prev := e.samples[len(e.samples)-2].blocksBehind
+ if blocksBehind < prev {
+ e.lastProgress = now
+ }
+ }
+}
+
+// Calculate returns ETA as formatted string
+func (e *ETACalculator) Calculate() string {
+ // Need at least 2 samples to calculate rate
+ if len(e.samples) < 2 {
+ return "calculating..."
+ }
+
+ // Use most recent samples for better responsiveness
+ first := e.samples[0]
+ last := e.samples[len(e.samples)-1]
+
+ blocksDelta := first.blocksBehind - last.blocksBehind
+ timeDelta := last.timestamp.Sub(first.timestamp).Seconds()
+
+ // Need at least some time passed
+ if timeDelta < 0.1 {
+ return "calculating..."
+ }
+
+ // Check for stalled sync (no progress)
+ if blocksDelta <= 0 {
+ if !e.lastProgress.IsZero() && time.Since(e.lastProgress) > 30*time.Second {
+ return "stalled"
+ }
+ return "calculating..."
+ }
+
+ // Calculate sync rate (blocks/second)
+ rate := float64(blocksDelta) / timeDelta
+ if rate <= 0 {
+ return "calculating..."
+ }
+
+ // Calculate ETA: remaining blocks / rate
+ if last.blocksBehind <= 0 {
+ return "0s"
+ }
+
+ seconds := float64(last.blocksBehind) / rate
+ if seconds < 0 {
+ return "0s"
+ }
+ if seconds > 365*24*3600 { // Cap at 1 year
+ return ">1y"
+ }
+
+ return DurationShort(time.Duration(seconds * float64(time.Second)))
+}
+
+// Icons struct for consistent emoji/ASCII fallback
+type Icons struct {
+ OK string
+ Warn string
+ Err string
+ Peer string
+ Block string
+ Unknown string // Neutral icon for unknown/indeterminate states
+}
+
+// NewIcons creates icon set based on emoji preference
+func NewIcons(noEmoji bool) Icons {
+ if noEmoji {
+ return Icons{
+ OK: "[OK]",
+ Warn: "[!]",
+ Err: "[X]",
+ Peer: "#",
+ Block: "#",
+ Unknown: "[?]",
+ }
+ }
+ return Icons{
+ OK: "โ",
+ Warn: "โ ",
+ Err: "โ",
+ Peer: "๐",
+ Block: "๐ฆ",
+ Unknown: "โฏ",
+ }
+}
+
+// reverse reverses a string
+func reverse(s string) string {
+ runes := []rune(s)
+ for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 {
+ runes[i], runes[j] = runes[j], runes[i]
+ }
+ return string(runes)
+}
+
+// joinLines joins string slice efficiently using strings.Builder
+func joinLines(lines []string, sep string) string {
+ var b strings.Builder
+ for i, line := range lines {
+ if i > 0 {
+ b.WriteString(sep)
+ }
+ b.WriteString(line)
+ }
+ return b.String()
+}
+
+// innerWidthForBox calculates usable content width after accounting for border and padding
+// total: allocated width from layout
+// hasBorder: whether component has a border (adds 2 chars for left+right)
+// padLeftRight: horizontal padding value
+func innerWidthForBox(total int, hasBorder bool, padLeftRight int) int {
+ border := 0
+ if hasBorder {
+ border = 2 // left + right border chars
+ }
+ w := total - border - 2*padLeftRight
+ if w < 1 {
+ w = 1
+ }
+ return w
+}
+
+// FormatTitle formats component titles with bold + color styling, centered and capitalized
+func FormatTitle(title string, width int) string {
+ title = strings.ToUpper(title)
+ style := lipgloss.NewStyle().
+ Bold(true).
+ Foreground(lipgloss.Color("39")). // Bright cyan
+ Width(width).
+ Align(lipgloss.Center)
+ return style.Render(title)
+}
diff --git a/push-validator-manager/internal/dashboard/validator_info.go b/push-validator-manager/internal/dashboard/validator_info.go
new file mode 100644
index 00000000..3df0be60
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/validator_info.go
@@ -0,0 +1,301 @@
+package dashboard
+
+import (
+ "fmt"
+ "time"
+
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/charmbracelet/lipgloss"
+)
+
+// ValidatorInfo component shows validator-specific information
+type ValidatorInfo struct {
+ BaseComponent
+ data DashboardData
+ icons Icons
+}
+
+// NewValidatorInfo creates a new validator info component
+func NewValidatorInfo(noEmoji bool) *ValidatorInfo {
+ return &ValidatorInfo{
+ BaseComponent: BaseComponent{},
+ icons: NewIcons(noEmoji),
+ }
+}
+
+// ID returns component identifier
+func (c *ValidatorInfo) ID() string {
+ return "validator_info"
+}
+
+// Title returns component title
+func (c *ValidatorInfo) Title() string {
+ return "My Validator Status"
+}
+
+// MinWidth returns minimum width
+func (c *ValidatorInfo) MinWidth() int {
+ return 30
+}
+
+// MinHeight returns minimum height
+func (c *ValidatorInfo) MinHeight() int {
+ return 10
+}
+
+// Update receives dashboard data
+func (c *ValidatorInfo) Update(msg tea.Msg, data DashboardData) (Component, tea.Cmd) {
+ c.data = data
+ return c, nil
+}
+
+// View renders the component with caching
+func (c *ValidatorInfo) View(w, h int) string {
+ // Render with styling
+ style := lipgloss.NewStyle().
+ Border(lipgloss.RoundedBorder()).
+ BorderForeground(lipgloss.Color("63")).
+ Padding(0, 1)
+
+ content := c.renderContent(w)
+
+ // Check cache
+ if c.CheckCacheWithSize(content, w, h) {
+ return c.GetCached()
+ }
+
+ if w < 0 {
+ w = 0
+ }
+ if h < 0 {
+ h = 0
+ }
+
+ // Account for border width (2 chars: left + right) to prevent overflow
+ borderWidth := 2
+ contentWidth := w - borderWidth
+ if contentWidth < 0 {
+ contentWidth = 0
+ }
+
+ rendered := style.Width(contentWidth).Render(content)
+ c.UpdateCache(rendered)
+ return rendered
+}
+
+// renderContent builds plain text content
+func (c *ValidatorInfo) renderContent(w int) string {
+ // Interior width after accounting for rounded border (2 chars) and padding (2 chars).
+ inner := w - 4
+ if inner < 0 {
+ inner = 0
+ }
+
+ // Check if validator exists by moniker but consensus pubkey doesn't match
+ // (may have been created with a different key/node) - check this FIRST before other IsValidator checks
+ if !c.data.MyValidator.IsValidator && c.data.MyValidator.Moniker != "" && c.data.MyValidator.Status != "" {
+ var lines []string
+
+ // Warn that consensus pubkey doesn't match
+ lines = append(lines, fmt.Sprintf("%s Validator found by moniker", c.icons.Warn))
+ lines = append(lines, "but running with different key/node")
+ lines = append(lines, "")
+
+ // Show the actual validator status
+ statusIcon := c.icons.OK
+ if c.data.MyValidator.Jailed {
+ statusIcon = c.icons.Err
+ } else if c.data.MyValidator.Status == "UNBONDING" || c.data.MyValidator.Status == "UNBONDED" {
+ statusIcon = c.icons.Warn
+ }
+ lines = append(lines, fmt.Sprintf("%s Status: %s", statusIcon, c.data.MyValidator.Status))
+
+ // Show voting power
+ vpText := HumanInt(c.data.MyValidator.VotingPower)
+ if c.data.MyValidator.VotingPct > 0 {
+ vpText += fmt.Sprintf(" (%s)", Percent(c.data.MyValidator.VotingPct))
+ }
+ lines = append(lines, fmt.Sprintf("Power: %s", vpText))
+
+ // Show commission if available
+ if c.data.MyValidator.Commission != "" {
+ lines = append(lines, fmt.Sprintf("Commission: %s", c.data.MyValidator.Commission))
+ }
+
+ // Show jailed status with reason if applicable
+ if c.data.MyValidator.Jailed {
+ jailReason := c.data.MyValidator.SlashingInfo.JailReason
+ if jailReason == "" {
+ jailReason = "Unknown"
+ }
+ lines = append(lines, "")
+ lines = append(lines, fmt.Sprintf("%s Jailed: %s", c.icons.Err, jailReason))
+ }
+
+ lines = append(lines, "")
+ lines = append(lines, "To control this validator, run:")
+ lines = append(lines, "push-validator register")
+
+ return fmt.Sprintf("%s\n%s", FormatTitle(c.Title(), inner), joinLines(lines, "\n"))
+ }
+
+ // Check if this node is a validator
+ if !c.data.MyValidator.IsValidator {
+ // Check for moniker conflict
+ if c.data.MyValidator.ValidatorExistsWithSameMoniker {
+ return fmt.Sprintf("%s\n\n%s Not registered\n\n%s Moniker conflict detected!\nA different validator is using\nmoniker '%s'\n\nUse a different moniker to register:\npush-validator register",
+ FormatTitle(c.Title(), inner),
+ c.icons.Warn,
+ c.icons.Err,
+ truncateWithEllipsis(c.data.MyValidator.ConflictingMoniker, 20))
+ }
+
+ return fmt.Sprintf("%s\n\n%s Not registered as validator\n\nTo register, run:\npush-validator register", FormatTitle(c.Title(), inner), c.icons.Warn)
+ }
+
+ // Build left column
+ var leftLines []string
+
+ // Moniker
+ if c.data.MyValidator.Moniker != "" {
+ leftLines = append(leftLines, fmt.Sprintf("Moniker: %s", truncateWithEllipsis(c.data.MyValidator.Moniker, 22)))
+ }
+
+ // Status
+ statusIcon := c.icons.OK
+ if c.data.MyValidator.Jailed {
+ statusIcon = c.icons.Err
+ } else if c.data.MyValidator.Status == "UNBONDING" || c.data.MyValidator.Status == "UNBONDED" {
+ statusIcon = c.icons.Warn
+ }
+ leftLines = append(leftLines, fmt.Sprintf("%s Status: %s", statusIcon, c.data.MyValidator.Status))
+
+ // Voting Power
+ vpText := HumanInt(c.data.MyValidator.VotingPower)
+ if c.data.MyValidator.VotingPct > 0 {
+ vpText += fmt.Sprintf(" (%s)", Percent(c.data.MyValidator.VotingPct))
+ }
+ leftLines = append(leftLines, fmt.Sprintf("Power: %s", vpText))
+
+ // Commission
+ if c.data.MyValidator.Commission != "" {
+ leftLines = append(leftLines, fmt.Sprintf("Commission: %s", c.data.MyValidator.Commission))
+ }
+
+ // Commission Rewards
+ if c.data.MyValidator.CommissionRewards != "" && c.data.MyValidator.CommissionRewards != "โ" {
+ leftLines = append(leftLines, fmt.Sprintf("Commission Rewards: %s PC", FormatFloat(c.data.MyValidator.CommissionRewards)))
+ }
+
+ // Outstanding Rewards
+ if c.data.MyValidator.OutstandingRewards != "" && c.data.MyValidator.OutstandingRewards != "โ" {
+ leftLines = append(leftLines, fmt.Sprintf("Outstanding Rewards: %s PC", FormatFloat(c.data.MyValidator.OutstandingRewards)))
+ }
+
+ // Check if validator has any rewards to withdraw
+ hasCommRewards := c.data.MyValidator.CommissionRewards != "" &&
+ c.data.MyValidator.CommissionRewards != "โ" &&
+ c.data.MyValidator.CommissionRewards != "0"
+ hasOutRewards := c.data.MyValidator.OutstandingRewards != "" &&
+ c.data.MyValidator.OutstandingRewards != "โ" &&
+ c.data.MyValidator.OutstandingRewards != "0"
+
+ if hasCommRewards || hasOutRewards {
+ leftLines = append(leftLines, "")
+ withdrawStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("205")).Bold(true)
+ leftLines = append(leftLines, withdrawStyle.Render("Rewards available!"))
+ leftLines = append(leftLines, withdrawStyle.Render("Run: push-validator restake"))
+ leftLines = append(leftLines, withdrawStyle.Render("Run: push-validator withdraw-rewards"))
+ }
+
+ // If jailed, create two-column layout with jail details on the right
+ if c.data.MyValidator.Jailed {
+ var rightLines []string
+
+ // Right column header
+ rightLines = append(rightLines, "STATUS DETAILS")
+ rightLines = append(rightLines, "")
+
+ // Add status with jail indicator
+ statusText := c.data.MyValidator.Status
+ if c.data.MyValidator.Jailed {
+ statusText = fmt.Sprintf("%s (JAILED)", c.data.MyValidator.Status)
+ }
+ rightLines = append(rightLines, statusText)
+ rightLines = append(rightLines, "")
+
+ // Jail Reason
+ jailReason := c.data.MyValidator.SlashingInfo.JailReason
+ if jailReason == "" {
+ jailReason = "Unknown"
+ }
+ rightLines = append(rightLines, fmt.Sprintf("Reason: %s", jailReason))
+
+ // Missed Blocks
+ if c.data.MyValidator.SlashingInfo.MissedBlocks > 0 {
+ rightLines = append(rightLines, fmt.Sprintf("Missed: %s blks", HumanInt(c.data.MyValidator.SlashingInfo.MissedBlocks)))
+ }
+
+ // Tombstoned Status
+ if c.data.MyValidator.SlashingInfo.Tombstoned {
+ rightLines = append(rightLines, fmt.Sprintf("%s Tombstoned: Yes", c.icons.Err))
+ }
+
+ // Jailed Until Time
+ if c.data.MyValidator.SlashingInfo.JailedUntil != "" {
+ formatted := FormatTimestamp(c.data.MyValidator.SlashingInfo.JailedUntil)
+ if formatted != "" {
+ rightLines = append(rightLines, fmt.Sprintf("Until: %s", formatted))
+ }
+
+ // Time remaining
+ timeLeft := TimeUntil(c.data.MyValidator.SlashingInfo.JailedUntil)
+ if timeLeft != "" && timeLeft != "0s" {
+ rightLines = append(rightLines, fmt.Sprintf("Remaining: %s", timeLeft))
+ }
+
+ // Check if jail period has expired
+ if parseTimeExpired(c.data.MyValidator.SlashingInfo.JailedUntil) {
+ rightLines = append(rightLines, "")
+ unjailStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("205")).Bold(true)
+ rightLines = append(rightLines, unjailStyle.Render(fmt.Sprintf("%s Ready to unjail!", c.icons.OK)))
+ rightLines = append(rightLines, unjailStyle.Render("Run: push-validator unjail"))
+ }
+ }
+
+ // Create two-column layout
+ leftContent := joinLines(leftLines, "\n")
+ rightContent := joinLines(rightLines, "\n")
+
+ // Calculate column widths (simple split)
+ midWidth := inner / 2
+ leftWidth := midWidth
+ rightWidth := inner - midWidth - 2 // Account for spacing
+
+ leftStyle := lipgloss.NewStyle().Width(leftWidth)
+ rightStyle := lipgloss.NewStyle().Width(rightWidth)
+
+ leftRendered := leftStyle.Render(leftContent)
+ rightRendered := rightStyle.Render(rightContent)
+
+ twoColumnContent := lipgloss.JoinHorizontal(lipgloss.Top, leftRendered, " ", rightRendered)
+
+ return fmt.Sprintf("%s\n%s", FormatTitle(c.Title(), inner), twoColumnContent)
+ }
+
+ // Single column layout for non-jailed validators
+ lines := leftLines
+ return fmt.Sprintf("%s\n%s", FormatTitle(c.Title(), inner), joinLines(lines, "\n"))
+}
+
+// parseTimeExpired checks if an RFC3339 timestamp is in the past
+func parseTimeExpired(timeStr string) bool {
+ if timeStr == "" {
+ return false
+ }
+ t, err := time.Parse(time.RFC3339Nano, timeStr)
+ if err != nil {
+ return false
+ }
+ return time.Now().After(t)
+}
diff --git a/push-validator-manager/internal/dashboard/validators_list.go b/push-validator-manager/internal/dashboard/validators_list.go
new file mode 100644
index 00000000..50df3927
--- /dev/null
+++ b/push-validator-manager/internal/dashboard/validators_list.go
@@ -0,0 +1,530 @@
+package dashboard
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ tea "github.com/charmbracelet/bubbletea"
+ "github.com/charmbracelet/lipgloss"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/validator"
+)
+
+// rewardsFetchedMsg indicates rewards have been fetched and cache is updated
+type rewardsFetchedMsg struct{}
+
+// ValidatorsList component shows network validators
+type ValidatorsList struct {
+ BaseComponent
+ data DashboardData
+ icons Icons
+ cfg config.Config // Config for RPC queries
+ currentPage int // Current page (0-based)
+ pageSize int // Items per page
+ myValidatorAddress string // Address of current node's validator (if any)
+ showEVMAddress bool // Toggle between Cosmos and EVM address display
+ evmAddressCache map[string]string // Cache for fetched EVM addresses
+ fetchingEVMCache bool // Flag to prevent duplicate concurrent fetches
+ rewardsCache map[string]struct {
+ Commission string
+ Outstanding string
+ } // Cache for fetched rewards (TTL handled by central fetcher)
+ rewardsCacheMu sync.Mutex // Protect rewardsCache
+ fetchingRewards bool // Flag to prevent duplicate concurrent fetches
+ sortedValidators []struct {
+ Moniker string
+ Status string
+ VotingPower int64
+ Commission string
+ CommissionRewards string
+ OutstandingRewards string
+ Address string
+ EVMAddress string
+ Jailed bool
+ } // Sorted validators array shared between render and fetch
+}
+
+// NewValidatorsList creates a new validators list component
+func NewValidatorsList(noEmoji bool, cfg config.Config) *ValidatorsList {
+ return &ValidatorsList{
+ BaseComponent: BaseComponent{},
+ icons: NewIcons(noEmoji),
+ cfg: cfg,
+ currentPage: 0,
+ pageSize: 5,
+ showEVMAddress: true, // Set EVM as default address display
+ evmAddressCache: make(map[string]string),
+ rewardsCache: make(map[string]struct{ Commission string; Outstanding string }),
+ }
+}
+
+// ID returns component identifier
+func (c *ValidatorsList) ID() string {
+ return "validators_list"
+}
+
+// Title returns component title
+func (c *ValidatorsList) Title() string {
+ totalValidators := len(c.data.NetworkValidators.Validators)
+ if totalValidators == 0 {
+ return "Network Validators"
+ }
+
+ totalPages := (totalValidators + c.pageSize - 1) / c.pageSize
+ if totalPages > 1 {
+ return fmt.Sprintf("Network Validators (Page %d/%d)", c.currentPage+1, totalPages)
+ }
+ return "Network Validators"
+}
+
+// MinWidth returns minimum width
+func (c *ValidatorsList) MinWidth() int {
+ return 30
+}
+
+// MinHeight returns minimum height
+func (c *ValidatorsList) MinHeight() int {
+ return 16
+}
+
+// Update receives dashboard data
+func (c *ValidatorsList) Update(msg tea.Msg, data DashboardData) (Component, tea.Cmd) {
+ oldValidatorCount := len(c.data.NetworkValidators.Validators)
+ c.data = data
+ oldAddr := c.myValidatorAddress
+ c.myValidatorAddress = data.MyValidator.Address
+ _ = oldAddr // Suppress unused variable warning
+
+ // Update sorted validators array whenever data changes
+ if len(c.data.NetworkValidators.Validators) > 0 {
+ c.sortedValidators = c.getSortedValidators()
+
+ // Trigger initial rewards fetch for first page on first data load
+ if oldValidatorCount == 0 && !c.fetchingRewards && len(c.rewardsCache) == 0 {
+ c.fetchingRewards = true
+
+ // Also trigger initial EVM address fetch if showEVMAddress is true on first data load
+ if c.showEVMAddress && len(c.evmAddressCache) == 0 && !c.fetchingEVMCache {
+ c.fetchingEVMCache = true
+ return c, tea.Batch(c.fetchPageRewardsCmd(), c.fetchEVMAddressesCmd())
+ }
+
+ return c, c.fetchPageRewardsCmd()
+ }
+ }
+
+ switch msg := msg.(type) {
+ case tea.KeyMsg:
+ return c.handleKey(msg)
+ case rewardsFetchedMsg:
+ // Rewards have been fetched, trigger re-render
+ c.fetchingRewards = false
+ return c, nil
+ }
+
+ return c, nil
+}
+
+// getSortedValidators returns validators sorted by status and voting power
+func (c *ValidatorsList) getSortedValidators() []struct {
+ Moniker string
+ Status string
+ VotingPower int64
+ Commission string
+ CommissionRewards string
+ OutstandingRewards string
+ Address string
+ EVMAddress string
+ Jailed bool
+} {
+ validators := make([]struct {
+ Moniker string
+ Status string
+ VotingPower int64
+ Commission string
+ CommissionRewards string
+ OutstandingRewards string
+ Address string
+ EVMAddress string
+ Jailed bool
+ }, len(c.data.NetworkValidators.Validators))
+ copy(validators, c.data.NetworkValidators.Validators)
+
+ // Helper to get status sort order
+ statusOrder := func(status string) int {
+ switch status {
+ case "BONDED":
+ return 1
+ case "UNBONDING":
+ return 2
+ case "UNBONDED":
+ return 3
+ default:
+ return 4
+ }
+ }
+
+ sort.Slice(validators, func(i, j int) bool {
+ // My validator always comes first
+ iIsOurs := validators[i].Address == c.myValidatorAddress
+ jIsOurs := validators[j].Address == c.myValidatorAddress
+ if iIsOurs != jIsOurs {
+ return iIsOurs // True (our validator) sorts before false
+ }
+
+ // Sort by status first (BONDED < UNBONDING < UNBONDED)
+ iOrder := statusOrder(validators[i].Status)
+ jOrder := statusOrder(validators[j].Status)
+ if iOrder != jOrder {
+ return iOrder < jOrder
+ }
+ // Within same status, sort by voting power (highest first)
+ return validators[i].VotingPower > validators[j].VotingPower
+ })
+
+ return validators
+}
+
+// handleKey processes keyboard input for pagination and toggles
+func (c *ValidatorsList) handleKey(msg tea.KeyMsg) (Component, tea.Cmd) {
+ totalValidators := len(c.data.NetworkValidators.Validators)
+
+ switch msg.String() {
+ case "e":
+ // Toggle between Cosmos and EVM address display
+ c.showEVMAddress = !c.showEVMAddress
+ // If toggling to EVM and cache is empty, start fetching addresses
+ if c.showEVMAddress && len(c.evmAddressCache) == 0 && !c.fetchingEVMCache {
+ return c, c.fetchEVMAddressesCmd()
+ }
+ return c, nil
+ }
+
+ if totalValidators == 0 {
+ return c, nil
+ }
+
+ totalPages := (totalValidators + c.pageSize - 1) / c.pageSize
+
+ switch msg.String() {
+ case "left", "p":
+ // Previous page
+ if c.currentPage > 0 {
+ c.currentPage--
+ c.fetchingRewards = true // Set flag before fetch
+ return c, c.fetchPageRewardsCmd() // Trigger rewards fetch for new page
+ }
+ case "right", "n":
+ // Next page
+ if c.currentPage < totalPages-1 {
+ c.currentPage++
+ c.fetchingRewards = true // Set flag before fetch
+ return c, c.fetchPageRewardsCmd() // Trigger rewards fetch for new page
+ }
+ }
+
+ return c, nil
+}
+
+// View renders the component with caching
+func (c *ValidatorsList) View(w, h int) string {
+ // Render with styling
+ style := lipgloss.NewStyle().
+ Border(lipgloss.RoundedBorder()).
+ BorderForeground(lipgloss.Color("63")).
+ Padding(0, 1)
+
+ content := c.renderContent(w)
+
+ // Check cache
+ if c.CheckCacheWithSize(content, w, h) {
+ return c.GetCached()
+ }
+
+ if w < 0 {
+ w = 0
+ }
+ if h < 0 {
+ h = 0
+ }
+
+ // Account for border width (2 chars: left + right) to prevent overflow
+ borderWidth := 2
+ contentWidth := w - borderWidth
+ if contentWidth < 0 {
+ contentWidth = 0
+ }
+
+ rendered := style.Width(contentWidth).Render(content)
+ c.UpdateCache(rendered)
+ return rendered
+}
+
+// renderContent builds plain text content
+func (c *ValidatorsList) renderContent(w int) string {
+ var lines []string
+
+ // Interior width after accounting for rounded border (2 chars) and padding (2 chars).
+ inner := w - 4
+ if inner < 0 {
+ inner = 0
+ }
+
+ // Check if validator data is available
+ if c.data.NetworkValidators.Total == 0 {
+ return fmt.Sprintf("%s\n\n%s Loading validators...", FormatTitle(c.Title(), inner), c.icons.Warn)
+ }
+
+ // Use pre-sorted validators from Update()
+ // This ensures fetch and render use the same validator order
+ validators := c.sortedValidators
+ if len(validators) == 0 {
+ return fmt.Sprintf("%s\n\n%s Loading validators...", FormatTitle(c.Title(), inner), c.icons.Warn)
+ }
+
+ // Table header - show different label based on toggle
+ addressLabel := "ADDRESS"
+ if c.showEVMAddress {
+ addressLabel = "ADDRESS (EVM)"
+ } else {
+ addressLabel = "ADDRESS (COSMOS)"
+ }
+ headerLine := fmt.Sprintf("%-40s %-24s %-9s %-11s %-18s %-18s %s", "NODE NAME", "STATUS", "STAKE(PC)", "COMMISSION%", "COMMISSION REWARDS", "OUTSTANDING REWARDS", addressLabel)
+ lines = append(lines, headerLine)
+ // Create separator line that spans full component width
+ lines = append(lines, strings.Repeat("โ", inner))
+
+ // Calculate pagination bounds
+ totalValidators := len(validators)
+ startIdx := c.currentPage * c.pageSize
+ endIdx := startIdx + c.pageSize
+ if endIdx > totalValidators {
+ endIdx = totalValidators
+ }
+
+ // Bounds check
+ if startIdx >= totalValidators {
+ startIdx = 0
+ c.currentPage = 0
+ endIdx = c.pageSize
+ if endIdx > totalValidators {
+ endIdx = totalValidators
+ }
+ }
+
+ // Display validators for current page (always show pageSize rows for consistent layout)
+ for row := 0; row < c.pageSize; row++ {
+ i := startIdx + row
+
+ // If we have a validator at this position, render it
+ if i < endIdx && i < totalValidators {
+ v := validators[i]
+
+ // Check if this is our validator
+ isOurValidator := c.myValidatorAddress != "" && v.Address == c.myValidatorAddress
+
+ // Show full moniker with indicator if our validator
+ moniker := v.Moniker
+ if isOurValidator {
+ moniker = moniker + " [My Validator]"
+ }
+ // Truncate if still too long (40 chars max for display)
+ moniker = truncateWithEllipsis(moniker, 40)
+
+ // Show full status with jail indicator
+ status := v.Status
+ if v.Jailed && (v.Status == "UNBONDING" || v.Status == "UNBONDED") {
+ status = status + " (JAILED)"
+ }
+
+ // Format voting power (compact display)
+ powerStr := fmt.Sprintf("%s", HumanInt(v.VotingPower))
+
+ // Commission percentage (already formatted from staking query)
+ commission := v.Commission
+ if len(commission) > 5 {
+ commission = commission[:5]
+ }
+
+ // Commission rewards (fetched on-demand with 30s cache)
+ commRewards := v.CommissionRewards
+ c.rewardsCacheMu.Lock()
+ if cached, exists := c.rewardsCache[v.Address]; exists {
+ commRewards = cached.Commission
+ }
+ c.rewardsCacheMu.Unlock()
+ if commRewards == "" {
+ commRewards = "โ"
+ }
+
+ // Outstanding rewards (fetched on-demand with 30s cache)
+ outRewards := v.OutstandingRewards
+ c.rewardsCacheMu.Lock()
+ if cached, exists := c.rewardsCache[v.Address]; exists {
+ outRewards = cached.Outstanding
+ }
+ c.rewardsCacheMu.Unlock()
+ if outRewards == "" {
+ outRewards = "โ"
+ }
+
+ // Select address based on toggle
+ address := v.Address
+ if c.showEVMAddress {
+ // Try to get from cache first, fallback to data, then placeholder
+ cachedAddr := c.getEVMAddressFromCache(v.Address)
+ if cachedAddr != "" {
+ address = cachedAddr
+ } else if v.EVMAddress != "" {
+ address = v.EVMAddress
+ } else {
+ address = "โ"
+ }
+ }
+
+ // Build row with flexible-width columns
+ line := fmt.Sprintf("%-40s %-24s %-9s %-11s %-18s %-18s %s",
+ moniker, status, powerStr, commission, FormatFloat(commRewards), FormatFloat(outRewards), address)
+
+ // Apply highlighting to own validator rows
+ if isOurValidator {
+ // Light green for own validator
+ highlightStyle := lipgloss.NewStyle().
+ Foreground(lipgloss.Color("10")) // Light green
+ line = highlightStyle.Render(line)
+ }
+
+ lines = append(lines, line)
+ } else {
+ // Add empty line for padding to maintain consistent table height
+ lines = append(lines, "")
+ }
+ }
+
+ lines = append(lines, "")
+
+ // Add pagination footer with toggle info
+ totalPages := (totalValidators + c.pageSize - 1) / c.pageSize
+ var footer string
+ if totalPages > 1 {
+ footer = fmt.Sprintf("โ / โ: change page | e: toggle EVM/Cosmos | Total: %d validators", c.data.NetworkValidators.Total)
+ } else {
+ footer = fmt.Sprintf("e: toggle EVM/Cosmos | Total: %d validators", c.data.NetworkValidators.Total)
+ }
+ lines = append(lines, lipgloss.NewStyle().Foreground(lipgloss.Color("241")).Render(footer))
+
+ return fmt.Sprintf("%s\n%s", FormatTitle(c.Title(), inner), joinLines(lines, "\n"))
+}
+
+// fetchEVMAddressesCmd returns a command to fetch EVM addresses in background
+func (c *ValidatorsList) fetchEVMAddressesCmd() tea.Cmd {
+ return func() tea.Msg {
+ c.fetchingEVMCache = true
+ defer func() { c.fetchingEVMCache = false }()
+
+ // Fetch EVM addresses for all validators with generous timeout
+ for _, v := range c.data.NetworkValidators.Validators {
+ if v.Address == "" {
+ continue
+ }
+ // Check cache first
+ if _, exists := c.evmAddressCache[v.Address]; exists {
+ continue
+ }
+ // Fetch with a timeout per address (3 seconds each)
+ evmAddr := validator.GetEVMAddress(context.Background(), v.Address)
+ c.evmAddressCache[v.Address] = evmAddr
+ }
+ return nil
+ }
+}
+
+// getEVMAddressFromCache returns cached EVM address or empty string
+func (c *ValidatorsList) getEVMAddressFromCache(address string) string {
+ if addr, exists := c.evmAddressCache[address]; exists {
+ return addr
+ }
+ return ""
+}
+
+// getStatusIcon returns appropriate icon for validator status
+func (c *ValidatorsList) getStatusIcon(status string) string {
+ switch status {
+ case "BONDED":
+ return c.icons.OK
+ case "UNBONDING":
+ return c.icons.Warn
+ case "UNBONDED":
+ return c.icons.Err
+ default:
+ return c.icons.Warn
+ }
+}
+
+// fetchPageRewardsCmd returns a command to fetch rewards for current page in parallel
+func (c *ValidatorsList) fetchPageRewardsCmd() tea.Cmd {
+ return func() tea.Msg {
+ // Get validators for current page from SORTED array (same order as render)
+ totalValidators := len(c.sortedValidators)
+ startIdx := c.currentPage * c.pageSize
+ endIdx := startIdx + c.pageSize
+ if endIdx > totalValidators {
+ endIdx = totalValidators
+ }
+
+ if startIdx >= totalValidators {
+ return nil
+ }
+
+ // Fetch rewards in parallel using goroutines
+ var wg sync.WaitGroup
+ for i := startIdx; i < endIdx; i++ {
+ v := c.sortedValidators[i]
+ if v.Address == "" {
+ continue
+ }
+
+ // Skip if already cached
+ c.rewardsCacheMu.Lock()
+ _, exists := c.rewardsCache[v.Address]
+ c.rewardsCacheMu.Unlock()
+ if exists {
+ continue
+ }
+
+ wg.Add(1)
+ go func(addr string) {
+ defer wg.Done()
+
+ // Fetch with a 15-second timeout per validator
+ // Increased from 5s to handle network latency and slower nodes
+ ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+
+ if commRwd, outRwd, err := validator.GetValidatorRewards(ctx, c.cfg, addr); err == nil {
+ c.rewardsCacheMu.Lock()
+ c.rewardsCache[addr] = struct {
+ Commission string
+ Outstanding string
+ }{
+ Commission: commRwd,
+ Outstanding: outRwd,
+ }
+ c.rewardsCacheMu.Unlock()
+ } else {
+ // Log fetch errors for debugging
+ fmt.Fprintf(os.Stderr, "Failed to fetch rewards for validator %s: %v\n", addr, err)
+ }
+ }(v.Address)
+ }
+
+ wg.Wait()
+ return rewardsFetchedMsg{}
+ }
+}
+
diff --git a/push-validator-manager/internal/exitcodes/codes.go b/push-validator-manager/internal/exitcodes/codes.go
new file mode 100644
index 00000000..7748f1e7
--- /dev/null
+++ b/push-validator-manager/internal/exitcodes/codes.go
@@ -0,0 +1,71 @@
+package exitcodes
+
+import (
+ "fmt"
+ "os"
+)
+
+// Standard exit codes for push-validator-manager
+const (
+ // Success indicates successful command completion
+ Success = 0
+
+ // GeneralError indicates a general/unknown error
+ GeneralError = 1
+
+ // InvalidArgs indicates invalid command-line arguments or flags
+ InvalidArgs = 2
+
+ // PreconditionFailed indicates a precondition was not met
+ // (e.g., node not initialized, missing config, already running)
+ PreconditionFailed = 3
+
+ // NetworkError indicates network/connectivity failure
+ // (e.g., RPC unreachable, timeout, DNS failure)
+ NetworkError = 4
+
+ // ProcessError indicates process management failure
+ // (e.g., failed to start/stop, permission denied)
+ ProcessError = 5
+
+ // SyncStuck indicates the sync monitor detected no progress and timed out
+ SyncStuck = 42
+
+ // ValidationError indicates validation failure
+ // (e.g., invalid config, corrupted data)
+ ValidationError = 6
+)
+
+// Exit terminates the program with the given code
+func Exit(code int) {
+ os.Exit(code)
+}
+
+// ExitWithError prints error message to stderr and exits with the given code
+func ExitWithError(code int, msg string) {
+ fmt.Fprintln(os.Stderr, msg)
+ os.Exit(code)
+}
+
+// ExitWithErrorf prints formatted error message to stderr and exits
+func ExitWithErrorf(code int, format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, format+"\n", args...)
+ os.Exit(code)
+}
+
+// CodeForError returns the appropriate exit code for an error.
+// Unwraps ErrorWithCode for explicit codes, otherwise returns GeneralError.
+// Use explicit error constructors (NetworkErr, ProcessErr, etc.) for specific codes.
+func CodeForError(err error) int {
+ if err == nil {
+ return Success
+ }
+
+ // Check if error has explicit code
+ if ec, ok := err.(*ErrorWithCode); ok {
+ return ec.Code
+ }
+
+ // Default to general error - callers should use explicit error constructors
+ return GeneralError
+}
diff --git a/push-validator-manager/internal/exitcodes/errors.go b/push-validator-manager/internal/exitcodes/errors.go
new file mode 100644
index 00000000..60b026d3
--- /dev/null
+++ b/push-validator-manager/internal/exitcodes/errors.go
@@ -0,0 +1,78 @@
+package exitcodes
+
+import "fmt"
+
+// ErrorWithCode is an error that carries an explicit exit code
+type ErrorWithCode struct {
+ Code int
+ Message string
+ Cause error
+}
+
+func (e *ErrorWithCode) Error() string {
+ if e.Cause != nil {
+ return fmt.Sprintf("%s: %v", e.Message, e.Cause)
+ }
+ return e.Message
+}
+
+func (e *ErrorWithCode) Unwrap() error {
+ return e.Cause
+}
+
+// NewError creates an error with an explicit exit code
+func NewError(code int, message string) *ErrorWithCode {
+ return &ErrorWithCode{Code: code, Message: message}
+}
+
+// NewErrorf creates an error with formatted message and exit code
+func NewErrorf(code int, format string, args ...interface{}) *ErrorWithCode {
+ return &ErrorWithCode{Code: code, Message: fmt.Sprintf(format, args...)}
+}
+
+// WrapError wraps an existing error with an exit code
+func WrapError(code int, message string, cause error) *ErrorWithCode {
+ return &ErrorWithCode{Code: code, Message: message, Cause: cause}
+}
+
+// Common error constructors
+
+func InvalidArgsError(message string) *ErrorWithCode {
+ return NewError(InvalidArgs, message)
+}
+
+func InvalidArgsErrorf(format string, args ...interface{}) *ErrorWithCode {
+ return NewErrorf(InvalidArgs, format, args...)
+}
+
+func PreconditionError(message string) *ErrorWithCode {
+ return NewError(PreconditionFailed, message)
+}
+
+func PreconditionErrorf(format string, args ...interface{}) *ErrorWithCode {
+ return NewErrorf(PreconditionFailed, format, args...)
+}
+
+func NetworkErr(message string) *ErrorWithCode {
+ return NewError(NetworkError, message)
+}
+
+func NetworkErrf(format string, args ...interface{}) *ErrorWithCode {
+ return NewErrorf(NetworkError, format, args...)
+}
+
+func ProcessErr(message string) *ErrorWithCode {
+ return NewError(ProcessError, message)
+}
+
+func ProcessErrf(format string, args ...interface{}) *ErrorWithCode {
+ return NewErrorf(ProcessError, format, args...)
+}
+
+func ValidationErr(message string) *ErrorWithCode {
+ return NewError(ValidationError, message)
+}
+
+func ValidationErrf(format string, args ...interface{}) *ErrorWithCode {
+ return NewErrorf(ValidationError, format, args...)
+}
diff --git a/push-validator-manager/internal/files/configstore.go b/push-validator-manager/internal/files/configstore.go
new file mode 100644
index 00000000..0b7ad071
--- /dev/null
+++ b/push-validator-manager/internal/files/configstore.go
@@ -0,0 +1,122 @@
+package files
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+)
+
+// ConfigStore abstracts config/app.toml and related files with idempotent writes.
+type ConfigStore interface {
+ EnableStateSync(params StateSyncParams) error
+ DisableStateSync() error
+ SetPersistentPeers(peers []string) error
+ Backup() (string, error) // returns backup path of config.toml
+}
+
+type StateSyncParams struct {
+ TrustHeight int64
+ TrustHash string
+ RPCServers []string // full URLs, comma separated when rendered
+ TrustPeriod string // e.g., 336h0m0s
+}
+
+type store struct { home string }
+
+// New returns a filesystem-backed store rooted at home.
+func New(home string) ConfigStore { return &store{home: home} }
+
+func (s *store) cfgPath() string { return filepath.Join(s.home, "config", "config.toml") }
+
+func (s *store) readConfig() (string, error) {
+ b, err := os.ReadFile(s.cfgPath())
+ if err != nil { return "", err }
+ return string(b), nil
+}
+
+func (s *store) writeConfig(content string) error {
+ return os.WriteFile(s.cfgPath(), []byte(content), 0o644)
+}
+
+func (s *store) Backup() (string, error) {
+ src := s.cfgPath()
+ ts := time.Now().Format("20060102-150405")
+ dst := src + "." + ts + ".bak"
+ b, err := os.ReadFile(src)
+ if err != nil { return "", err }
+ if err := os.WriteFile(dst, b, 0o644); err != nil { return "", err }
+ return dst, nil
+}
+
+func (s *store) EnableStateSync(params StateSyncParams) error {
+ content, err := s.readConfig()
+ if err != nil { return err }
+ // Ensure [statesync] section
+ if !regexp.MustCompile(`(?m)^\[statesync\]\s*$`).MatchString(content) {
+ content += "\n[statesync]\n"
+ }
+ content = setInSection(content, "statesync", map[string]string{
+ "enable": "true",
+ "rpc_servers": fmt.Sprintf("\"%s\"", strings.Join(params.RPCServers, ",")),
+ "trust_height": fmt.Sprintf("%d", params.TrustHeight),
+ "trust_hash": fmt.Sprintf("\"%s\"", params.TrustHash),
+ "trust_period": fmt.Sprintf("\"%s\"", valueOrDefault(params.TrustPeriod, "336h0m0s")),
+ })
+ return s.writeConfig(content)
+}
+
+func (s *store) DisableStateSync() error {
+ content, err := s.readConfig()
+ if err != nil { return err }
+ content = setInSection(content, "statesync", map[string]string{
+ "enable": "false",
+ })
+ return s.writeConfig(content)
+}
+
+func (s *store) SetPersistentPeers(peers []string) error {
+ content, err := s.readConfig()
+ if err != nil { return err }
+ if !regexp.MustCompile(`(?m)^\[p2p\]\s*$`).MatchString(content) {
+ content += "\n[p2p]\n"
+ }
+ content = setInSection(content, "p2p", map[string]string{
+ "persistent_peers": fmt.Sprintf("\"%s\"", strings.Join(peers, ",")),
+ "addr_book_strict": "false",
+ })
+ return s.writeConfig(content)
+}
+
+func setInSection(content, section string, kv map[string]string) string {
+ // Locate section bounds
+ reStart := regexp.MustCompile("(?m)^\\[" + regexp.QuoteMeta(section) + "\\]\\s*$")
+ loc := reStart.FindStringIndex(content)
+ if loc == nil { return content }
+ start := loc[1]
+ // Find next section
+ reAny := regexp.MustCompile("(?m)^\\[[^]]+\\]\\s*$")
+ next := reAny.FindStringIndex(content[start:])
+ end := len(content)
+ if next != nil { end = start + next[0] }
+ before := content[:start]
+ block := content[start:end]
+ after := content[end:]
+ // Apply/replace keys within block
+ for k, v := range kv {
+ re := regexp.MustCompile("(?m)^\\s*" + regexp.QuoteMeta(k) + "\\s*=\\s*.*$")
+ line := fmt.Sprintf("%s = %s", k, v)
+ if re.MatchString(block) {
+ block = re.ReplaceAllString(block, line)
+ } else {
+ if len(strings.TrimSpace(block)) > 0 && !strings.HasSuffix(block, "\n") { block += "\n" }
+ block += line + "\n"
+ }
+ }
+ return before + block + after
+}
+
+func valueOrDefault(s, d string) string { if s == "" { return d }; return s }
+
diff --git a/push-validator-manager/internal/files/configstore_test.go b/push-validator-manager/internal/files/configstore_test.go
new file mode 100644
index 00000000..d511effe
--- /dev/null
+++ b/push-validator-manager/internal/files/configstore_test.go
@@ -0,0 +1,51 @@
+package files
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func TestConfigStore_StateSyncAndPeers(t *testing.T) {
+ dir := t.TempDir()
+ // Seed minimal config.toml without sections
+ cfgDir := filepath.Join(dir, "config")
+ if err := os.MkdirAll(cfgDir, 0o755); err != nil { t.Fatal(err) }
+ cfgPath := filepath.Join(cfgDir, "config.toml")
+ if err := os.WriteFile(cfgPath, []byte(""), 0o644); err != nil { t.Fatal(err) }
+
+ s := New(dir).(*store)
+
+ // Set peers
+ peers := []string{"id1@host1:26656", "id2@host2:26656"}
+ if err := s.SetPersistentPeers(peers); err != nil { t.Fatal(err) }
+ b, _ := os.ReadFile(cfgPath)
+ got := string(b)
+ if !strings.Contains(got, "[p2p]") { t.Fatalf("missing [p2p] section: %s", got) }
+ if !strings.Contains(got, "persistent_peers = \"id1@host1:26656,id2@host2:26656\"") {
+ t.Fatalf("peers not set correctly: %s", got)
+ }
+ if !strings.Contains(got, "addr_book_strict = false") { t.Fatalf("addr_book_strict not set: %s", got) }
+
+ // Enable state sync
+ if err := s.EnableStateSync(StateSyncParams{TrustHeight: 1234, TrustHash: "ABCDEF", RPCServers: []string{"https://a:443","https://b:443"}}); err != nil { t.Fatal(err) }
+ b, _ = os.ReadFile(cfgPath)
+ got = string(b)
+ if !strings.Contains(got, "[statesync]") { t.Fatalf("missing [statesync] section: %s", got) }
+ if !strings.Contains(got, "enable = true") { t.Fatalf("statesync enable missing: %s", got) }
+ if !strings.Contains(got, "trust_height = 1234") { t.Fatalf("trust_height missing: %s", got) }
+ if !strings.Contains(got, "trust_hash = \"ABCDEF\"") { t.Fatalf("trust_hash missing: %s", got) }
+ if !strings.Contains(got, "rpc_servers = \"https://a:443,https://b:443\"") { t.Fatalf("rpc_servers missing: %s", got) }
+
+ // Disable state sync
+ if err := s.DisableStateSync(); err != nil { t.Fatal(err) }
+ b, _ = os.ReadFile(cfgPath)
+ if !strings.Contains(string(b), "enable = false") { t.Fatalf("disable did not set enable=false: %s", string(b)) }
+
+ // Backup
+ p, err := s.Backup()
+ if err != nil { t.Fatal(err) }
+ if _, err := os.Stat(p); err != nil { t.Fatalf("backup not created: %v", err) }
+}
+
diff --git a/push-validator-manager/internal/metrics/collector.go b/push-validator-manager/internal/metrics/collector.go
new file mode 100644
index 00000000..ed45bde2
--- /dev/null
+++ b/push-validator-manager/internal/metrics/collector.go
@@ -0,0 +1,180 @@
+package metrics
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/node"
+ "github.com/shirou/gopsutil/v3/cpu"
+ "github.com/shirou/gopsutil/v3/disk"
+ "github.com/shirou/gopsutil/v3/mem"
+)
+
+type System struct {
+ CPUPercent float64
+ MemUsed uint64
+ MemTotal uint64
+ DiskUsed uint64
+ DiskTotal uint64
+}
+
+type Network struct {
+ Peers int
+ LatencyMS int64
+}
+
+type Chain struct {
+ LocalHeight int64
+ RemoteHeight int64
+ CatchingUp bool
+}
+
+type Node struct {
+ ChainID string
+ NodeID string
+ Moniker string
+ RPCListening bool
+}
+
+type Snapshot struct {
+ System System
+ Network Network
+ Chain Chain
+ Node Node
+}
+
+type Collector struct {
+ mu sync.RWMutex
+ lastCPU float64
+ cpuRunning bool
+ cpuDone chan struct{} // Signal to stop CPU collection
+}
+
+// New creates a Collector with background CPU monitoring started immediately
+// Use this for long-running processes like the dashboard
+func New() *Collector {
+ c := &Collector{
+ cpuDone: make(chan struct{}),
+ }
+ // Start background CPU collection immediately
+ go c.updateCPU()
+ return c
+}
+
+// NewWithoutCPU creates a Collector without starting CPU monitoring
+// Use this for short-lived commands like status that don't need continuous CPU tracking
+func NewWithoutCPU() *Collector {
+ return &Collector{
+ cpuDone: make(chan struct{}),
+ }
+}
+
+// Start begins background CPU collection (safe to call on any collector)
+func (c *Collector) Start() {
+ c.mu.Lock()
+ if !c.cpuRunning {
+ c.cpuRunning = true
+ c.mu.Unlock()
+ go c.updateCPU()
+ } else {
+ c.mu.Unlock()
+ }
+}
+
+// Stop halts background CPU collection
+func (c *Collector) Stop() {
+ c.mu.Lock()
+ if c.cpuRunning {
+ c.cpuRunning = false
+ c.mu.Unlock()
+ select {
+ case c.cpuDone <- struct{}{}:
+ default:
+ }
+ } else {
+ c.mu.Unlock()
+ }
+}
+
+// updateCPU runs in background to continuously update CPU metrics
+func (c *Collector) updateCPU() {
+ for {
+ select {
+ case <-c.cpuDone:
+ // Stop signal received
+ c.mu.Lock()
+ c.cpuRunning = false
+ c.mu.Unlock()
+ return
+ default:
+ if percent, err := cpu.Percent(time.Second, false); err == nil && len(percent) > 0 {
+ c.mu.Lock()
+ c.lastCPU = percent[0]
+ c.mu.Unlock()
+ }
+ // Small sleep to prevent tight loop
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+}
+
+// Collect queries local and remote RPCs to produce minimal metrics without external deps.
+func (c *Collector) Collect(ctx context.Context, localRPC, remoteRPC string) Snapshot {
+ snap := Snapshot{}
+ local := node.New(localRPC)
+
+ // Construct proper HTTP URL from genesis domain if it's just a hostname
+ remoteURL := remoteRPC
+ if !strings.HasPrefix(remoteRPC, "http://") && !strings.HasPrefix(remoteRPC, "https://") {
+ // Default to HTTPS for remote endpoints
+ remoteURL = fmt.Sprintf("https://%s:443", remoteRPC)
+ }
+ remote := node.New(remoteURL)
+
+ // Local status
+ if st, err := local.Status(ctx); err == nil {
+ snap.Chain.LocalHeight = st.Height
+ snap.Chain.CatchingUp = st.CatchingUp
+ snap.Node.ChainID = st.Network
+ snap.Node.NodeID = st.NodeID
+ snap.Node.Moniker = st.Moniker
+ snap.Node.RPCListening = true // If we got a response, RPC is listening
+ }
+ // Remote status
+ if st, err := remote.RemoteStatus(ctx, remoteURL); err == nil {
+ snap.Chain.RemoteHeight = st.Height
+ }
+ // Peers count (best-effort)
+ if peers, err := local.Peers(ctx); err == nil {
+ snap.Network.Peers = len(peers)
+ }
+ // Latency: time a single remote /status call
+ t0 := time.Now()
+ if _, err := remote.RemoteStatus(ctx, remoteURL); err == nil {
+ snap.Network.LatencyMS = time.Since(t0).Milliseconds()
+ }
+
+ // System metrics
+ // CPU usage - return cached value from background collection
+ c.mu.RLock()
+ snap.System.CPUPercent = c.lastCPU
+ c.mu.RUnlock()
+
+ // Memory usage
+ if vmStat, err := mem.VirtualMemory(); err == nil {
+ snap.System.MemUsed = vmStat.Used
+ snap.System.MemTotal = vmStat.Total
+ }
+
+ // Disk usage - get usage for root filesystem
+ if diskStat, err := disk.Usage("/"); err == nil {
+ snap.System.DiskUsed = diskStat.Used
+ snap.System.DiskTotal = diskStat.Total
+ }
+
+ return snap
+}
+
diff --git a/push-validator-manager/internal/node/client.go b/push-validator-manager/internal/node/client.go
new file mode 100644
index 00000000..16e8ad18
--- /dev/null
+++ b/push-validator-manager/internal/node/client.go
@@ -0,0 +1,148 @@
+package node
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Client defines the RPC/WS client surface area we depend on.
+type Client interface {
+ Status(ctx context.Context) (Status, error)
+ RemoteStatus(ctx context.Context, baseURL string) (Status, error)
+ BlockHash(ctx context.Context, height int64) (string, error)
+ Peers(ctx context.Context) ([]Peer, error)
+ SubscribeHeaders(ctx context.Context) (<-chan Header, error)
+}
+
+type Status struct {
+ NodeID string
+ Moniker string
+ Network string // chain-id
+ CatchingUp bool
+ Height int64
+}
+
+type Peer struct {
+ ID string
+ Addr string // host:port
+}
+
+type Header struct {
+ Height int64
+ Time time.Time
+}
+
+type httpClient struct {
+ http *http.Client
+ base string // e.g. http://127.0.0.1:26657
+ wsURL string // e.g. ws://127.0.0.1:26657/websocket
+}
+
+// New constructs a JSON-RPC client with sane timeouts. If wsURL is empty, it is derived from base.
+func New(base string) Client {
+ base = strings.TrimRight(base, "/")
+ ws := deriveWS(base)
+ return &httpClient{
+ http: &http.Client{Timeout: 2500 * time.Millisecond},
+ base: base,
+ wsURL: ws,
+ }
+}
+
+func deriveWS(base string) string {
+ // http://host:port -> ws://host:port/websocket
+ // https:// -> wss://
+ if strings.HasPrefix(base, "http://") {
+ return "ws://" + strings.TrimPrefix(base, "http://") + "/websocket"
+ }
+ if strings.HasPrefix(base, "https://") {
+ return "wss://" + strings.TrimPrefix(base, "https://") + "/websocket"
+ }
+ // default
+ return "ws://" + base + "/websocket"
+}
+
+func (c *httpClient) Status(ctx context.Context) (Status, error) {
+ return c.RemoteStatus(ctx, c.base)
+}
+
+func (c *httpClient) RemoteStatus(ctx context.Context, baseURL string) (Status, error) {
+ baseURL = strings.TrimRight(baseURL, "/")
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, baseURL+"/status", nil)
+ resp, err := c.http.Do(req)
+ if err != nil { return Status{}, err }
+ defer resp.Body.Close()
+ var payload struct {
+ Result struct {
+ NodeInfo struct{
+ ID string `json:"id"`
+ Moniker string `json:"moniker"`
+ Network string `json:"network"`
+ } `json:"node_info"`
+ SyncInfo struct{
+ CatchingUp bool `json:"catching_up"`
+ Height string `json:"latest_block_height"`
+ } `json:"sync_info"`
+ } `json:"result"`
+ }
+ if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil { return Status{}, err }
+ h, _ := strconv.ParseInt(payload.Result.SyncInfo.Height, 10, 64)
+ return Status{
+ NodeID: payload.Result.NodeInfo.ID,
+ Moniker: payload.Result.NodeInfo.Moniker,
+ Network: payload.Result.NodeInfo.Network,
+ CatchingUp: payload.Result.SyncInfo.CatchingUp,
+ Height: h,
+ }, nil
+}
+
+func (c *httpClient) BlockHash(ctx context.Context, height int64) (string, error) {
+ u := c.base + "/block"
+ if height > 0 {
+ q := url.Values{}
+ q.Set("height", strconv.FormatInt(height, 10))
+ u += "?" + q.Encode()
+ }
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
+ resp, err := c.http.Do(req)
+ if err != nil { return "", err }
+ defer resp.Body.Close()
+ var payload struct{ Result struct{ BlockID struct{ Hash string `json:"hash"` } `json:"block_id"` } `json:"result"` }
+ if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil { return "", err }
+ return strings.ToUpper(payload.Result.BlockID.Hash), nil
+}
+
+func (c *httpClient) Peers(ctx context.Context) ([]Peer, error) {
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, c.base+"/net_info", nil)
+ resp, err := c.http.Do(req)
+ if err != nil { return nil, err }
+ defer resp.Body.Close()
+ var payload struct {
+ Result struct {
+ Peers []struct {
+ NodeInfo struct {
+ ID string `json:"id"`
+ ListenAddr string `json:"listen_addr"`
+ } `json:"node_info"`
+ RemoteIP string `json:"remote_ip"`
+ } `json:"peers"`
+ } `json:"result"`
+ }
+ if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil { return nil, err }
+ out := make([]Peer, 0, len(payload.Result.Peers))
+ for _, p := range payload.Result.Peers {
+ if p.NodeInfo.ID == "" || p.RemoteIP == "" { continue }
+ out = append(out, Peer{ID: p.NodeInfo.ID, Addr: fmt.Sprintf("%s:26656", p.RemoteIP)})
+ }
+ return out, nil
+}
+
+func (c *httpClient) SubscribeHeaders(ctx context.Context) (<-chan Header, error) {
+ return DialAndSubscribeHeaders(ctx, c.wsURL)
+}
diff --git a/push-validator-manager/internal/node/ws.go b/push-validator-manager/internal/node/ws.go
new file mode 100644
index 00000000..ae223b53
--- /dev/null
+++ b/push-validator-manager/internal/node/ws.go
@@ -0,0 +1,107 @@
+package node
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "time"
+
+ "github.com/gorilla/websocket"
+)
+
+// DialAndSubscribeHeaders uses gorilla/websocket to subscribe to NewBlockHeader events and stream heights.
+func DialAndSubscribeHeaders(ctx context.Context, wsURL string) (<-chan Header, error) {
+ u, err := url.Parse(wsURL)
+ if err != nil { return nil, err }
+ if u.Path == "" { u.Path = "/websocket" }
+
+ d := websocket.Dialer{
+ Subprotocols: []string{"jsonrpc"},
+ HandshakeTimeout: 5 * time.Second,
+ EnableCompression: false,
+ }
+ // nolint:bodyclose
+ conn, _, err := d.DialContext(ctx, u.String(), map[string][]string{"Origin": {"http://localhost"}})
+ if err != nil { return nil, err }
+
+ // Send subscribe request
+ sub := map[string]any{
+ "jsonrpc": "2.0",
+ "method": "subscribe",
+ // Prefer cometbft.event key for 0.38+; servers typically support both.
+ "params": map[string]string{"query": "cometbft.event='NewBlockHeader'"},
+ "id": 1,
+ }
+ if err := conn.WriteJSON(sub); err != nil { _ = conn.Close(); return nil, err }
+
+ out := make(chan Header, 32)
+ go func() {
+ defer close(out)
+ defer func() {
+ // attempt proper close handshake
+ deadline := time.Now().Add(1500 * time.Millisecond)
+ _ = conn.SetWriteDeadline(deadline)
+ _ = conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), deadline)
+ // best-effort wait for server close
+ _ = conn.SetReadDeadline(deadline)
+ _, _, _ = conn.ReadMessage()
+ _ = conn.Close()
+ }()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ // Read next message
+ // Read next message (gorilla handles ping/pong)
+ _, msg, err := conn.ReadMessage()
+ if err != nil {
+ // graceful exits on normal closure or going away
+ if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {
+ return
+ }
+ // any other error: return and let defer handle close
+ return
+ }
+ if h, ok := parseHeaderHeight(msg); ok { out <- h; continue }
+ // handle pong/ping implicitly via gorilla; ignore others
+ }
+ }()
+ return out, nil
+}
+
+func parseHeaderHeight(b []byte) (Header, bool) {
+ var payload struct {
+ Result struct {
+ Data struct {
+ Value struct {
+ Header struct {
+ Height string `json:"height"`
+ Time time.Time `json:"time"`
+ } `json:"header"`
+ } `json:"value"`
+ } `json:"data"`
+ } `json:"result"`
+ }
+ if err := json.Unmarshal(b, &payload); err != nil { return Header{}, false }
+ if payload.Result.Data.Value.Header.Height == "" { return Header{}, false }
+ // Accept both tm.event and cometbft.event streams; height parse only
+ h, err := strconvParseInt(payload.Result.Data.Value.Header.Height)
+ if err != nil { return Header{}, false }
+ return Header{Height: h, Time: payload.Result.Data.Value.Header.Time}, true
+}
+
+func strconvParseInt(s string) (int64, error) {
+ var n int64
+ var sign int64 = 1
+ if s == "" { return 0, fmt.Errorf("empty") }
+ if s[0] == '-' { sign = -1; s = s[1:] }
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c < '0' || c > '9' { return 0, fmt.Errorf("invalid") }
+ n = n*10 + int64(c-'0')
+ }
+ return sign * n, nil
+}
diff --git a/push-validator-manager/internal/node/ws_test.go b/push-validator-manager/internal/node/ws_test.go
new file mode 100644
index 00000000..e3d48842
--- /dev/null
+++ b/push-validator-manager/internal/node/ws_test.go
@@ -0,0 +1,144 @@
+package node
+
+import (
+ "bufio"
+ "context"
+ "crypto/sha1"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "testing"
+ "time"
+)
+
+// minimal server-side frame writer (no masking)
+func wsWriteFrame(w *bufio.Writer, opcode byte, payload []byte) error {
+ header := []byte{0x80 | (opcode & 0x0F)} // FIN + opcode
+ n := len(payload)
+ switch {
+ case n <= 125:
+ header = append(header, byte(n))
+ case n <= 65535:
+ header = append(header, 126, byte(n>>8), byte(n))
+ default:
+ header = append(header, 127, 0, 0, 0, 0, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
+ }
+ if _, err := w.Write(header); err != nil { return err }
+ if _, err := w.Write(payload); err != nil { return err }
+ return w.Flush()
+}
+
+// server-side reader for client frames (masked), returns opcode and unmasked payload
+func wsReadFrame(r *bufio.Reader) (byte, []byte, error) {
+ b1, err := r.ReadByte()
+ if err != nil { return 0, nil, err }
+ b2, err := r.ReadByte()
+ if err != nil { return 0, nil, err }
+ opcode := b1 & 0x0F
+ masked := (b2 & 0x80) != 0
+ ln := int(b2 & 0x7F)
+ if ln == 126 {
+ b, err := readNTest(r, 2)
+ if err != nil { return 0, nil, err }
+ ln = int(b[0])<<8 | int(b[1])
+ } else if ln == 127 {
+ b, err := readNTest(r, 8)
+ if err != nil { return 0, nil, err }
+ ln = int(b[4])<<24 | int(b[5])<<16 | int(b[6])<<8 | int(b[7])
+ }
+ var maskKey [4]byte
+ if masked {
+ mk, err := readNTest(r, 4)
+ if err != nil { return 0, nil, err }
+ copy(maskKey[:], mk)
+ }
+ payload, err := readNTest(r, ln)
+ if err != nil { return 0, nil, err }
+ if masked {
+ for i := 0; i < ln; i++ { payload[i] ^= maskKey[i%4] }
+ }
+ return opcode, payload, nil
+}
+
+func readNTest(r *bufio.Reader, n int) ([]byte, error) {
+ buf := make([]byte, n)
+ _, err := io.ReadFull(r, buf)
+ return buf, err
+}
+
+func TestDialAndSubscribeHeaders_JSONRPCSubprotocol(t *testing.T) {
+ // Some sandboxes restrict binding; detect and skip.
+ probe, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil { t.Skipf("skipping: cannot bind due to sandbox: %v", err) }
+ probe.Close()
+
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil { t.Skipf("skipping: cannot listen: %v", err) }
+ defer ln.Close()
+
+ srvErr := make(chan error, 1)
+ go func() {
+ c, err := ln.Accept()
+ if err != nil { srvErr <- err; return }
+ defer c.Close()
+ br := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c))
+ // Read HTTP request
+ statusLine, err := br.Reader.ReadString('\n')
+ if err != nil { srvErr <- err; return }
+ if !strings.HasPrefix(statusLine, "GET ") { srvErr <- fmt.Errorf("bad request line: %s", statusLine); return }
+ headers := map[string]string{}
+ for {
+ line, err := br.Reader.ReadString('\n')
+ if err != nil { srvErr <- err; return }
+ line = strings.TrimRight(line, "\r\n")
+ if line == "" { break }
+ if i := strings.Index(line, ":"); i > 0 {
+ k := strings.ToLower(strings.TrimSpace(line[:i]))
+ v := strings.TrimSpace(line[i+1:])
+ headers[k] = v
+ }
+ }
+ // Validate JSONRPC subprotocol requested
+ if sp := headers["sec-websocket-protocol"]; !strings.Contains(strings.ToLower(sp), "jsonrpc") {
+ srvErr <- fmt.Errorf("missing jsonrpc subprotocol: %q", sp); return
+ }
+ key := headers["sec-websocket-key"]
+ if key == "" { srvErr <- fmt.Errorf("missing sec-websocket-key") ; return }
+ acc := sha1.Sum([]byte(key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"))
+ accept := base64.StdEncoding.EncodeToString(acc[:])
+ // Respond 101 with subprotocol
+ resp := fmt.Sprintf("HTTP/1.1 101 Switching Protocols\r\n"+
+ "Upgrade: websocket\r\n"+
+ "Connection: Upgrade\r\n"+
+ "Sec-WebSocket-Accept: %s\r\n"+
+ "Sec-WebSocket-Protocol: jsonrpc\r\n\r\n", accept)
+ if _, err := br.Writer.WriteString(resp); err != nil { srvErr <- err; return }
+ if err := br.Writer.Flush(); err != nil { srvErr <- err; return }
+ // Read client's subscribe frame
+ if _, _, err := wsReadFrame(br.Reader); err != nil { srvErr <- fmt.Errorf("read subscribe: %w", err); return }
+ // Write a header event frame
+ payload := []byte(`{"jsonrpc":"2.0","result":{"data":{"value":{"header":{"height":"42","time":"2024-01-01T00:00:00Z"}}}}}`)
+ if err := wsWriteFrame(br.Writer, 0x1, payload); err != nil { srvErr <- err; return }
+ // Give client time to read
+ time.Sleep(100 * time.Millisecond)
+ // Close
+ _ = wsWriteFrame(br.Writer, 0x8, []byte{})
+ srvErr <- nil
+ }()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ wsURL := fmt.Sprintf("ws://%s/websocket", ln.Addr().String())
+ ch, err := DialAndSubscribeHeaders(ctx, wsURL)
+ if err != nil { t.Fatalf("dial/subscribe error: %v", err) }
+ select {
+ case h, ok := <-ch:
+ if !ok { t.Fatal("channel closed") }
+ if h.Height != 42 { t.Fatalf("got height %d want 42", h.Height) }
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for header")
+ }
+ if e := <-srvErr; e != nil { t.Fatalf("server error: %v", e) }
+}
diff --git a/push-validator-manager/internal/process/supervisor.go b/push-validator-manager/internal/process/supervisor.go
new file mode 100644
index 00000000..404e66be
--- /dev/null
+++ b/push-validator-manager/internal/process/supervisor.go
@@ -0,0 +1,263 @@
+package process
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+)
+
+// Supervisor controls the pchaind process: start/stop/restart and status.
+// Implementation handles detached exec, PID files, and log paths.
+type Supervisor interface {
+ Start(opts StartOpts) (int, error) // returns PID
+ Stop() error
+ Restart(opts StartOpts) (int, error)
+ IsRunning() bool
+ PID() (int, bool)
+ Uptime() (time.Duration, bool) // returns uptime duration and whether process is running
+ LogPath() string
+}
+
+// StartOpts captures settings for launching the daemon.
+type StartOpts struct {
+ HomeDir string
+ Moniker string
+ BinPath string // path to pchaind (defaults to "pchaind" if empty)
+ ExtraArgs []string // additional args to append after defaults
+}
+
+type supervisor struct {
+ pidFile string
+ logFile string
+ mu sync.Mutex
+}
+
+// New returns a process supervisor bound to the given home dir.
+func New(home string) Supervisor {
+ return &supervisor{
+ pidFile: filepath.Join(home, "pchaind.pid"),
+ logFile: filepath.Join(home, "logs", "pchaind.log"),
+ }
+}
+
+func (s *supervisor) LogPath() string { return s.logFile }
+
+func (s *supervisor) PID() (int, bool) {
+ b, err := os.ReadFile(s.pidFile)
+ if err != nil { return 0, false }
+ txt := strings.TrimSpace(string(b))
+ if txt == "" { return 0, false }
+ pid, err := strconv.Atoi(txt)
+ if err != nil { return 0, false }
+ if processAlive(pid) { return pid, true }
+ return 0, false
+}
+
+func (s *supervisor) IsRunning() bool {
+ _, ok := s.PID()
+ return ok
+}
+
+func (s *supervisor) Uptime() (time.Duration, bool) {
+ pid, ok := s.PID()
+ if !ok {
+ return 0, false
+ }
+
+ // Use ps to get elapsed time in seconds (works on Linux and macOS)
+ cmd := exec.Command("ps", "-p", strconv.Itoa(pid), "-o", "etimes=")
+ out, err := cmd.Output()
+ if err != nil {
+ return 0, false
+ }
+
+ // Parse elapsed seconds
+ elapsed := strings.TrimSpace(string(out))
+ seconds, err := strconv.ParseInt(elapsed, 10, 64)
+ if err != nil {
+ return 0, false
+ }
+
+ return time.Duration(seconds) * time.Second, true
+}
+
+func (s *supervisor) Stop() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ pid, ok := s.PID()
+ if !ok { return nil }
+ // Try graceful TERM
+ _ = syscall.Kill(pid, syscall.SIGTERM)
+ // Wait up to 15 seconds
+ deadline := time.Now().Add(15 * time.Second)
+ for time.Now().Before(deadline) {
+ if !processAlive(pid) {
+ _ = os.Remove(s.pidFile)
+ return nil
+ }
+ time.Sleep(300 * time.Millisecond)
+ }
+ // Force kill
+ _ = syscall.Kill(pid, syscall.SIGKILL)
+ time.Sleep(500 * time.Millisecond)
+ _ = os.Remove(s.pidFile)
+ if processAlive(pid) { return errors.New("failed to stop pchaind") }
+ return nil
+}
+
+func (s *supervisor) Restart(opts StartOpts) (int, error) {
+ if err := s.Stop(); err != nil { return 0, err }
+ return s.Start(opts)
+}
+
+func (s *supervisor) Start(opts StartOpts) (int, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if opts.HomeDir == "" { return 0, errors.New("HomeDir required") }
+ if s.IsRunning() {
+ pid, _ := s.PID()
+ return pid, nil
+ }
+
+ // Check if genesis.json exists before starting
+ genesisPath := filepath.Join(opts.HomeDir, "config", "genesis.json")
+ if _, err := os.Stat(genesisPath); os.IsNotExist(err) {
+ return 0, fmt.Errorf("genesis.json not found at %s. Please run 'init' first", genesisPath)
+ }
+
+ // Check if this node needs state sync (fresh start or marked for sync)
+ needsStateSyncPath := filepath.Join(opts.HomeDir, ".initial_state_sync")
+ blockstorePath := filepath.Join(opts.HomeDir, "data", "blockstore.db")
+
+ needsStateSync := false
+ if _, err := os.Stat(needsStateSyncPath); err == nil {
+ needsStateSync = true
+ } else if _, err := os.Stat(blockstorePath); os.IsNotExist(err) {
+ needsStateSync = true
+ }
+
+ // If state sync is needed, reset data right before starting
+ if needsStateSync {
+ bin := opts.BinPath
+ if bin == "" { bin = "pchaind" }
+
+ // Run tendermint unsafe-reset-all to clear data for state sync
+ cmd := exec.Command(bin, "tendermint", "unsafe-reset-all", "--home", opts.HomeDir, "--keep-addr-book")
+ if err := cmd.Run(); err != nil {
+ // Non-fatal: continue anyway as node might work
+ _ = err
+ }
+
+ // Ensure priv_validator_state.json exists after reset
+ pvsPath := filepath.Join(opts.HomeDir, "data", "priv_validator_state.json")
+ if _, err := os.Stat(pvsPath); os.IsNotExist(err) {
+ _ = os.MkdirAll(filepath.Join(opts.HomeDir, "data"), 0o755)
+ _ = os.WriteFile(pvsPath, []byte(`{"height":"0","round":0,"step":0}`), 0o644)
+ }
+
+ // Remove the marker file after processing
+ _ = os.Remove(needsStateSyncPath)
+ }
+
+ if err := os.MkdirAll(filepath.Join(opts.HomeDir, "logs"), 0o755); err != nil {
+ return 0, err
+ }
+ bin := opts.BinPath
+ if bin == "" { bin = "pchaind" }
+
+ // Build args: pchaind start --home
+ args := []string{"start", "--home", opts.HomeDir}
+ // if RPC port env set, leave default
+ if len(opts.ExtraArgs) > 0 { args = append(args, opts.ExtraArgs...) }
+
+ // Open/append log file
+ lf, err := os.OpenFile(s.logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
+ if err != nil { return 0, err }
+
+ cmd := exec.Command(bin, args...)
+ cmd.Stdout = lf
+ cmd.Stderr = lf
+ cmd.Stdin = nil
+ // Detach from this session/process group
+ cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
+
+ if err := cmd.Start(); err != nil {
+ _ = lf.Close()
+ return 0, fmt.Errorf("start pchaind: %w", err)
+ }
+ // Write PID file
+ pid := cmd.Process.Pid
+ if err := os.WriteFile(s.pidFile, []byte(strconv.Itoa(pid)), 0o644); err != nil {
+ // Best effort stop if we can't persist PID
+ _ = syscall.Kill(pid, syscall.SIGTERM)
+ _ = lf.Close()
+ return 0, err
+ }
+ // We do not wait; keep log file open a bit to avoid losing early bytes
+ go func(f *os.File) {
+ // Flush quickly and close after a small delay
+ time.Sleep(500 * time.Millisecond)
+ _ = f.Sync()
+ _ = f.Close()
+ }(lf)
+ return pid, nil
+}
+
+func processAlive(pid int) bool {
+ if pid <= 0 { return false }
+ // signal 0 tests for existence without sending a signal
+ err := syscall.Kill(pid, 0)
+ return err == nil
+}
+
+// IsRPCListening returns true if TCP connection to the RPC port succeeds.
+func IsRPCListening(hostport string, timeout time.Duration) bool {
+ if hostport == "" { hostport = "127.0.0.1:26657" }
+ d := net.Dialer{Timeout: timeout}
+ conn, err := d.Dial("tcp", hostport)
+ if err != nil { return false }
+ _ = conn.Close()
+ return true
+}
+
+// TailFollow streams appended content from the log file to w until ctx cancellation via closeCh.
+// This is a naive poll-based follower to avoid non-portable fs notify deps.
+func TailFollow(path string, w io.Writer, stop <-chan struct{}) error {
+ f, err := os.Open(path)
+ if err != nil { return err }
+ defer f.Close()
+
+ // Start at end of file
+ if _, err := f.Seek(0, io.SeekEnd); err != nil { return err }
+
+ buf := make([]byte, 4096)
+ for {
+ select {
+ case <-stop:
+ return nil
+ default:
+ }
+ n, err := f.Read(buf)
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ time.Sleep(500 * time.Millisecond)
+ continue
+ }
+ return err
+ }
+ if n > 0 {
+ if _, werr := w.Write(buf[:n]); werr != nil { return werr }
+ }
+ }
+}
+
diff --git a/push-validator-manager/internal/process/supervisor_test.go b/push-validator-manager/internal/process/supervisor_test.go
new file mode 100644
index 00000000..9abf73d0
--- /dev/null
+++ b/push-validator-manager/internal/process/supervisor_test.go
@@ -0,0 +1,17 @@
+package process
+
+import (
+ "net"
+ "testing"
+ "time"
+)
+
+func TestIsRPCListening(t *testing.T) {
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil { t.Skipf("skipping: cannot bind due to sandbox: %v", err) }
+ defer ln.Close()
+ addr := ln.Addr().String()
+ if !IsRPCListening(addr, 200*time.Millisecond) { t.Fatalf("expected listening true for %s", addr) }
+ ln.Close()
+ if IsRPCListening(addr, 200*time.Millisecond) { t.Fatalf("expected listening false after close for %s", addr) }
+}
diff --git a/push-validator-manager/internal/statesync/provider.go b/push-validator-manager/internal/statesync/provider.go
new file mode 100644
index 00000000..cd8cbd98
--- /dev/null
+++ b/push-validator-manager/internal/statesync/provider.go
@@ -0,0 +1,127 @@
+package statesync
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Provider computes trust params from a remote snapshot-enabled RPC.
+type Provider interface {
+ ComputeTrust(ctx context.Context, rpcURL string) (TrustParams, error)
+}
+
+type TrustParams struct {
+ Height int64
+ Hash string
+}
+
+type provider struct{ http *http.Client }
+
+// New returns a provider with sane timeouts.
+func New() Provider { return &provider{http: &http.Client{Timeout: 6 * time.Second}} }
+
+func (p *provider) ComputeTrust(ctx context.Context, rpcURL string) (TrustParams, error) {
+ base := strings.TrimRight(rpcURL, "/")
+ // Get latest height with fallback: try /status first, then /block
+ latestHeight, err := p.latestHeight(ctx, base)
+ if err != nil { return TrustParams{}, err }
+ if latestHeight < 2 { latestHeight = 2 }
+
+ // Snapshots are taken at 1000-block intervals
+ // We need to align trust heights with these intervals
+ snapshotInterval := int64(1000)
+
+ // Try recent snapshot intervals (1-5 intervals back from latest)
+ // This ensures we target actual snapshot heights
+ offsetIntervals := []int{1, 2, 3, 4, 5}
+ var lastErr error
+
+ for _, intervals := range offsetIntervals {
+ // Calculate snapshot-aligned height
+ // E.g., if latest is 1136240 and intervals=1: (1136240/1000 - 1) * 1000 = 1135000
+ trustH := ((latestHeight / snapshotInterval) - int64(intervals)) * snapshotInterval
+ if trustH < snapshotInterval { trustH = snapshotInterval }
+
+ hash, err := p.blockHash(ctx, base, trustH)
+ if err == nil && hash != "" {
+ return TrustParams{Height: trustH, Hash: strings.ToUpper(hash)}, nil
+ }
+ lastErr = err
+ }
+ if lastErr == nil { lastErr = errors.New("could not determine trust hash from RPC") }
+ return TrustParams{}, lastErr
+}
+
+// latestHeight attempts to read latest height via /status (preferred) then /block.
+func (p *provider) latestHeight(ctx context.Context, base string) (int64, error) {
+ // Try /status
+ if h, err := p.latestFromStatus(ctx, base); err == nil && h > 0 {
+ return h, nil
+ }
+ // Fallback to /block
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, base+"/block", nil)
+ resp, err := p.doWithRetry(req)
+ if err != nil { return 0, err }
+ defer resp.Body.Close()
+ var latest struct { Result struct { Block struct { Header struct { Height string `json:"height"` } `json:"header"` } `json:"block"` } `json:"result"` }
+ if err := json.NewDecoder(resp.Body).Decode(&latest); err != nil { return 0, err }
+ h, _ := strconv.ParseInt(latest.Result.Block.Header.Height, 10, 64)
+ return h, nil
+}
+
+func (p *provider) latestFromStatus(ctx context.Context, base string) (int64, error) {
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, base+"/status", nil)
+ resp, err := p.doWithRetry(req)
+ if err != nil { return 0, err }
+ defer resp.Body.Close()
+ var payload struct { Result struct { SyncInfo struct { Height string `json:"latest_block_height"` } `json:"sync_info"` } `json:"result"` }
+ if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil { return 0, err }
+ if payload.Result.SyncInfo.Height == "" { return 0, errors.New("empty height") }
+ h, err := strconv.ParseInt(payload.Result.SyncInfo.Height, 10, 64)
+ if err != nil { return 0, err }
+ return h, nil
+}
+
+// blockHash fetches the block hash for a given height, trying /block then /commit.
+func (p *provider) blockHash(ctx context.Context, base string, height int64) (string, error) {
+ q := url.Values{"height": []string{strconv.FormatInt(height, 10)}}.Encode()
+ // Try /block?height=
+ u := base + "/block?" + q
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
+ if resp, err := p.doWithRetry(req); err == nil {
+ defer resp.Body.Close()
+ var blk struct { Result struct { BlockID struct { Hash string `json:"hash"` } `json:"block_id"` } `json:"result"` }
+ if err := json.NewDecoder(resp.Body).Decode(&blk); err == nil && blk.Result.BlockID.Hash != "" { return blk.Result.BlockID.Hash, nil }
+ }
+ // Fallback to /commit?height=
+ u2 := base + "/commit?" + q
+ req2, _ := http.NewRequestWithContext(ctx, http.MethodGet, u2, nil)
+ resp2, err := p.doWithRetry(req2)
+ if err != nil { return "", err }
+ defer resp2.Body.Close()
+ var cm struct { Result struct { SignedHeader struct { Commit struct { BlockID struct { Hash string `json:"hash"` } `json:"block_id"` } `json:"commit"` } `json:"signed_header"` } `json:"result"` }
+ if err := json.NewDecoder(resp2.Body).Decode(&cm); err != nil { return "", err }
+ return cm.Result.SignedHeader.Commit.BlockID.Hash, nil
+}
+
+// doWithRetry performs a request with small retries for transient failures.
+func (p *provider) doWithRetry(req *http.Request) (*http.Response, error) {
+ var lastErr error
+ for i := 0; i < 3; i++ {
+ resp, err := p.http.Do(req)
+ if err == nil && resp != nil && resp.StatusCode == 200 {
+ return resp, nil
+ }
+ if resp != nil && resp.Body != nil { resp.Body.Close() }
+ lastErr = err
+ time.Sleep(time.Duration(100*(i+1)) * time.Millisecond)
+ }
+ if lastErr == nil { lastErr = errors.New("request failed") }
+ return nil, lastErr
+}
diff --git a/push-validator-manager/internal/statesync/provider_fallback_test.go b/push-validator-manager/internal/statesync/provider_fallback_test.go
new file mode 100644
index 00000000..7825adea
--- /dev/null
+++ b/push-validator-manager/internal/statesync/provider_fallback_test.go
@@ -0,0 +1,77 @@
+package statesync
+
+import (
+ "context"
+ "encoding/json"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+// Test fallback across candidate heights and endpoint (/commit).
+func TestProvider_ComputeTrust_Fallbacks(t *testing.T) {
+ // Some sandboxes restrict binding; detect and skip.
+ probe, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil { t.Skipf("skipping: cannot bind due to sandbox: %v", err) }
+ probe.Close()
+ // Latest height via /status
+ mux := http.NewServeMux()
+ mux.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) {
+ resp := map[string]any{"result": map[string]any{"sync_info": map[string]any{"latest_block_height": "5000"}}}
+ _ = json.NewEncoder(w).Encode(resp)
+ })
+ // /block?height=4000 -> simulate pruned (404)
+ // /block?height=4500 -> simulate error (500)
+ // /block?height=4750 -> simulate decode error to force /commit fallback
+ // /commit?height=4750 -> success with hash
+ mux.HandleFunc("/block", func(w http.ResponseWriter, r *http.Request) {
+ h := r.URL.Query().Get("height")
+ switch h {
+ case "": // not used in this test
+ resp := map[string]any{"result": map[string]any{"block": map[string]any{"header": map[string]any{"height": "5000"}}}}
+ _ = json.NewEncoder(w).Encode(resp)
+ case "4000":
+ http.NotFound(w, r)
+ case "4500":
+ w.WriteHeader(http.StatusInternalServerError)
+ _, _ = w.Write([]byte("oops"))
+ case "4750":
+ // Malformed body to force fallback to /commit
+ _, _ = w.Write([]byte("{bad json"))
+ default:
+ resp := map[string]any{"result": map[string]any{"block_id": map[string]any{"hash": "zzz"}}}
+ _ = json.NewEncoder(w).Encode(resp)
+ }
+ })
+ mux.HandleFunc("/commit", func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Query().Get("height") == "4750" {
+ resp := map[string]any{
+ "result": map[string]any{
+ "signed_header": map[string]any{
+ "commit": map[string]any{
+ "block_id": map[string]any{"hash": "def456"},
+ },
+ },
+ },
+ }
+ _ = json.NewEncoder(w).Encode(resp)
+ return
+ }
+ http.NotFound(w, r)
+ })
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+
+ p := New()
+ tp, err := p.ComputeTrust(context.Background(), srv.URL)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if tp.Height != 4750 {
+ t.Fatalf("expected fallback trust height 4750, got %d", tp.Height)
+ }
+ if tp.Hash != "DEF456" {
+ t.Fatalf("expected hash DEF456, got %s", tp.Hash)
+ }
+}
diff --git a/push-validator-manager/internal/statesync/provider_test.go b/push-validator-manager/internal/statesync/provider_test.go
new file mode 100644
index 00000000..7f0f2c67
--- /dev/null
+++ b/push-validator-manager/internal/statesync/provider_test.go
@@ -0,0 +1,39 @@
+package statesync
+
+import (
+ "context"
+ "encoding/json"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestProvider_ComputeTrust(t *testing.T) {
+ // Simulate /block latest height and /block?height=
+ // Some sandboxes restrict binding; detect and skip.
+ probe, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil { t.Skipf("skipping: cannot bind due to sandbox: %v", err) }
+ probe.Close()
+
+ mux := http.NewServeMux()
+ mux.HandleFunc("/block", func(w http.ResponseWriter, r *http.Request) {
+ if h := r.URL.Query().Get("height"); h != "" {
+ // Return block_id.hash for requested height
+ resp := map[string]any{"result": map[string]any{"block_id": map[string]any{"hash": "abc123"}}}
+ _ = json.NewEncoder(w).Encode(resp)
+ return
+ }
+ // Latest height = 5000
+ resp := map[string]any{"result": map[string]any{"block": map[string]any{"header": map[string]any{"height": "5000"}}}}
+ _ = json.NewEncoder(w).Encode(resp)
+ })
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+
+ p := New()
+ tp, err := p.ComputeTrust(context.Background(), srv.URL)
+ if err != nil { t.Fatal(err) }
+ if want := int64(4000); tp.Height != want { t.Fatalf("trust height: got %d want %d", tp.Height, want) }
+ if tp.Hash != "ABC123" { t.Fatalf("trust hash uppercased: got %s", tp.Hash) }
+}
diff --git a/push-validator-manager/internal/sync/monitor.go b/push-validator-manager/internal/sync/monitor.go
new file mode 100644
index 00000000..f157b91f
--- /dev/null
+++ b/push-validator-manager/internal/sync/monitor.go
@@ -0,0 +1,1020 @@
+package syncmon
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/node"
+)
+
+type Options struct {
+ LocalRPC string
+ RemoteRPC string
+ LogPath string
+ Window int
+ Compact bool
+ Out io.Writer // default os.Stdout
+ Interval time.Duration // refresh interval for progress updates
+ Quiet bool // minimal, non-emoji, non-TTY style output
+ Debug bool // extra diagnostic prints
+ StuckTimeout time.Duration // timeout for detecting stalled sync
+}
+
+type pt struct {
+ h int64
+ t time.Time
+}
+
+const defaultStuckTimeout = 2 * time.Minute
+
+var ErrSyncStuck = errors.New("sync stuck: no progress detected")
+
+// Run performs two-phase monitoring: snapshot spinner from logs, then WS header progress.
+func Run(ctx context.Context, opts Options) error {
+ if opts.Out == nil {
+ opts.Out = os.Stdout
+ }
+ if opts.Window <= 0 {
+ opts.Window = 30
+ }
+ if opts.StuckTimeout <= 0 {
+ opts.StuckTimeout = defaultStuckTimeout
+ }
+ lastProgress := newAtomicTime(time.Now())
+
+ tty := isTTY()
+ if opts.Quiet {
+ tty = false
+ }
+ hideCursor(opts.Out, tty)
+ defer showCursor(opts.Out, tty)
+
+ if tty && stdinIsTTY() {
+ go swallowEnter(opts.Out)
+ }
+
+ // Start log tailer if log path provided
+ snapCh := make(chan string, 16)
+ stopLog := make(chan struct{})
+ if opts.LogPath != "" {
+ go tailStatesync(ctx, opts.LogPath, snapCh, stopLog)
+ } else {
+ close(snapCh)
+ }
+
+ // Phase 1: snapshot progress indicator until acceptance/quiet
+ phase1Done := make(chan struct{})
+ var phase1Err error
+ var sawSnapshot bool
+ var sawAccepted bool
+ go func() {
+ defer close(phase1Done)
+ lastEvent := time.Now()
+ sawSnapshot = false
+ ticker := time.NewTicker(120 * time.Millisecond)
+ defer ticker.Stop()
+
+ steps := []string{
+ "Discovering snapshots",
+ "Downloading snapshot",
+ "Restoring database",
+ "Verifying and completing",
+ }
+ currentStep := 1
+ maxStep := len(steps)
+ printed := make([]bool, maxStep)
+ completed := make([]bool, maxStep)
+
+ spinnerFrames := []rune{'โ ', 'โ ', 'โ น', 'โ ธ', 'โ ผ', 'โ ด', 'โ ฆ', 'โ ง', 'โ ', 'โ '}
+ spinnerIndex := 0
+
+ printStep := func(idx int, done bool) {
+ if idx < 0 || idx >= maxStep {
+ return
+ }
+ if done {
+ if completed[idx] {
+ return
+ }
+ completed[idx] = true
+ } else if printed[idx] && !completed[idx] {
+ return
+ }
+ line := renderStepIndicator(idx+1, maxStep, steps[idx], opts.Quiet, done)
+ if done || !tty {
+ if done && tty {
+ // Clear spinner line before printing completed step
+ fmt.Fprintf(opts.Out, "\r\033[K%s\n", line)
+ } else {
+ fmt.Fprintln(opts.Out, line)
+ }
+ printed[idx] = true
+ } else {
+ fmt.Fprintf(opts.Out, "\r\033[K%s %c", line, spinnerFrames[spinnerIndex])
+ printed[idx] = true
+ }
+ }
+
+ printStep(currentStep-1, false)
+
+ for {
+ select {
+ case <-ctx.Done():
+ phase1Err = ctx.Err()
+ return
+ case line, ok := <-snapCh:
+ if !ok {
+ return
+ }
+ low := strings.ToLower(line)
+ if strings.Contains(low, "state sync failed") || strings.Contains(low, "state sync aborted") {
+ phase1Err = fmt.Errorf("state sync failed: %s", strings.TrimSpace(line))
+ return
+ }
+ switch {
+ case strings.Contains(low, "discovering snapshots"):
+ currentStep = 1
+ case strings.Contains(low, "fetching snapshot chunk"):
+ currentStep = 2
+ case strings.Contains(low, "applied snapshot chunk") || strings.Contains(low, "restoring"):
+ currentStep = 3
+ case strings.Contains(low, "snapshot accepted") ||
+ strings.Contains(low, "snapshot restored") ||
+ strings.Contains(low, "restored snapshot") ||
+ strings.Contains(low, "switching to blocksync") ||
+ strings.Contains(low, "switching to block sync"):
+ currentStep = 4
+ sawAccepted = true
+ }
+ if strings.Contains(low, "statesync") || strings.Contains(low, "state sync") || strings.Contains(low, "snapshot") {
+ lastEvent = time.Now()
+ lastProgress.Update()
+ sawSnapshot = true
+ }
+ if currentStep < 1 {
+ currentStep = 1
+ }
+ if currentStep > maxStep {
+ currentStep = maxStep
+ }
+ for i := 0; i < currentStep-1; i++ {
+ printStep(i, true)
+ }
+ printStep(currentStep-1, sawAccepted && currentStep == maxStep)
+ case <-ticker.C:
+ if opts.StuckTimeout > 0 && lastProgress.Since() > opts.StuckTimeout {
+ phase1Err = ErrSyncStuck
+ return
+ }
+ if !completed[currentStep-1] {
+ spinnerIndex = (spinnerIndex + 1) % len(spinnerFrames)
+ line := renderStepIndicator(currentStep, maxStep, steps[currentStep-1], opts.Quiet, false)
+ if tty {
+ fmt.Fprintf(opts.Out, "\r\033[K%s %c", line, spinnerFrames[spinnerIndex])
+ }
+ }
+ // Smart completion: if Step 3 stuck with no new logs for 3s, check RPC
+ if currentStep == 3 && sawSnapshot && time.Since(lastEvent) > 3*time.Second {
+ if isSyncedQuick(opts.LocalRPC) {
+ currentStep = maxStep
+ sawAccepted = true
+ for i := 0; i < maxStep; i++ {
+ printStep(i, true)
+ }
+ return
+ }
+ }
+ if sawAccepted && time.Since(lastEvent) > 5*time.Second {
+ for i := 0; i < maxStep; i++ {
+ printStep(i, true)
+ }
+ return
+ }
+ if !sawSnapshot {
+ if isSyncedQuick(opts.LocalRPC) {
+ for i := 0; i < maxStep; i++ {
+ printStep(i, true)
+ }
+ return
+ }
+ } else if isSyncedQuick(opts.LocalRPC) {
+ currentStep = maxStep
+ for i := 0; i < maxStep; i++ {
+ printStep(i, true)
+ }
+ return
+ }
+ }
+ }
+ }()
+
+ // Wait for phase 1 to complete or error
+ <-phase1Done
+ close(stopLog)
+ if phase1Err != nil {
+ return phase1Err
+ }
+ if sawAccepted {
+ fmt.Fprintln(opts.Out, "")
+ fmt.Fprintln(opts.Out, " \033[92mโ\033[0m Snapshot restored. Switching to block sync...")
+ }
+ lastProgress.Update()
+
+ // Phase 2: WS header subscription + progress bar
+ local := strings.TrimRight(opts.LocalRPC, "/")
+ if local == "" {
+ local = "http://127.0.0.1:26657"
+ }
+ hostport := hostPortFromURL(local)
+ // Wait for RPC up to 60s
+ if !waitTCP(hostport, 60*time.Second) {
+ return fmt.Errorf("RPC not listening on %s", hostport)
+ }
+ cli := node.New(local)
+ headers, err := cli.SubscribeHeaders(ctx)
+ if err != nil {
+ return fmt.Errorf("ws subscribe: %w", err)
+ }
+
+ // Remote (denominator) via WebSocket headers
+ remote := strings.TrimRight(opts.RemoteRPC, "/")
+ if remote == "" {
+ remote = local
+ }
+ remoteCli := node.New(remote)
+ remoteHeaders, remoteWSErr := remoteCli.SubscribeHeaders(ctx)
+
+ buf := make([]pt, 0, opts.Window)
+ var lastRemote int64
+ var baseH int64
+ var baseRemote int64
+ var barPrinted bool
+ var firstBarTime time.Time
+ var holdStarted bool
+ var lastPeers int
+ var lastLatency int64
+ var lastMetricsAt time.Time
+ // minimum time to show the bar even if already synced
+ const minShow = 15 * time.Second
+ // Print initial line to claim space
+ if tty {
+ fmt.Fprint(opts.Out, "\r")
+ }
+ iv := opts.Interval
+ if iv <= 0 {
+ iv = 1 * time.Second
+ }
+ tick := time.NewTicker(iv)
+ defer tick.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case rhd, ok := <-remoteHeaders:
+ if remoteWSErr == nil && ok {
+ lastProgress.Update()
+ lastRemote = rhd.Height
+ var cur int64
+ if len(buf) > 0 {
+ cur = buf[len(buf)-1].h
+ }
+ if cur == 0 {
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 800*time.Millisecond)
+ st, err := cli.Status(ctx2)
+ cancel2()
+ if err == nil {
+ cur = st.Height
+ }
+ }
+ if cur > 0 && baseH != 0 {
+ percent := 0.0
+ if lastRemote > 0 {
+ percent = float64(cur) / float64(lastRemote) * 100
+ }
+ percent = floor2(percent)
+ if cur < lastRemote && percent >= 100 {
+ percent = 99.99
+ }
+ line := renderProgressWithQuiet(percent, cur, lastRemote, opts.Quiet)
+ rate, eta := progressRateAndETA(buf, cur, lastRemote)
+ lineWithETA := line
+ if eta != "" {
+ lineWithETA += eta
+ }
+ if tty {
+ extra := ""
+ if lastPeers > 0 {
+ extra += fmt.Sprintf(" | peers: %d", lastPeers)
+ }
+ if lastLatency > 0 {
+ extra += fmt.Sprintf(" | rtt: %dms", lastLatency)
+ }
+ fmt.Fprintf(opts.Out, "\r\033[K %s%s", lineWithETA, extra)
+ } else {
+ if opts.Quiet {
+ fmt.Fprintf(opts.Out, " height=%d/%d rate=%.2f%s peers=%d rtt=%dms\n", cur, lastRemote, rate, eta, lastPeers, lastLatency)
+ } else {
+ fmt.Fprintf(opts.Out, " %s height=%d/%d rate=%.2f blk/s%s peers=%d rtt=%dms\n", time.Now().Format(time.Kitchen), cur, lastRemote, rate, eta, lastPeers, lastLatency)
+ }
+ }
+ if !barPrinted {
+ fmt.Fprintln(opts.Out, "")
+ firstBarTime = time.Now()
+ holdStarted = true
+ barPrinted = true
+ }
+ }
+ }
+ case h, ok := <-headers:
+ if !ok {
+ return nil
+ }
+ lastProgress.Update()
+ buf = append(buf, pt{h.Height, time.Now()})
+ if len(buf) > opts.Window {
+ buf = buf[1:]
+ }
+ // Render progress
+ var cur = h.Height
+ // Establish baseline once we know remote height and have at least one header
+ if baseH == 0 && lastRemote > 0 && len(buf) > 0 {
+ baseH = buf[0].h
+ baseRemote = lastRemote
+ if baseRemote <= baseH {
+ baseRemote = baseH + 1000
+ }
+ }
+ var percent float64
+ if lastRemote > 0 {
+ // Use baseline calculation only when there's meaningful progress to track
+ if baseH > 0 && lastRemote > baseH && (lastRemote-baseH) > 100 {
+ denom := float64(lastRemote - baseH)
+ if denom > 0 {
+ percent = float64(cur-baseH) / denom * 100
+ }
+ } else {
+ // Direct calculation for already-synced or near-synced nodes
+ percent = float64(cur) / float64(lastRemote) * 100
+ }
+ }
+ // Avoid rounding up to 100.00 before actually matching remote
+ percent = floor2(percent)
+ if cur < lastRemote && percent >= 100 {
+ percent = 99.99
+ }
+ // Compute moving rate from recent headers and derive ETA string.
+ rate, eta := progressRateAndETA(buf, cur, lastRemote)
+ // Periodically refresh peers and remote latency (every ~5s)
+ if time.Since(lastMetricsAt) > 5*time.Second {
+ lastMetricsAt = time.Now()
+ ctxp, cancelp := context.WithTimeout(context.Background(), 1200*time.Millisecond)
+ if plist, err := cli.Peers(ctxp); err == nil {
+ lastPeers = len(plist)
+ }
+ cancelp()
+ t0 := time.Now()
+ ctxl, cancell := context.WithTimeout(context.Background(), 1200*time.Millisecond)
+ _, _ = remoteCli.RemoteStatus(ctxl, remote)
+ cancell()
+ lastLatency = time.Since(t0).Milliseconds()
+ }
+ // Only render the bar once baseline exists
+ if baseH == 0 {
+ break
+ }
+ line := renderProgressWithQuiet(percent, cur, lastRemote, opts.Quiet)
+ lineWithETA := line
+ if eta != "" {
+ lineWithETA += eta
+ }
+ if tty {
+ extra := ""
+ if lastPeers > 0 {
+ extra += fmt.Sprintf(" | peers: %d", lastPeers)
+ }
+ if lastLatency > 0 {
+ extra += fmt.Sprintf(" | rtt: %dms", lastLatency)
+ }
+ fmt.Fprintf(opts.Out, "\r\033[K%s%s", lineWithETA, extra)
+ } else {
+ if opts.Quiet {
+ fmt.Fprintf(opts.Out, "height=%d/%d rate=%.2f%s peers=%d rtt=%dms\n", cur, lastRemote, rate, eta, lastPeers, lastLatency)
+ } else {
+ fmt.Fprintf(opts.Out, "%s height=%d/%d rate=%.2f blk/s%s peers=%d rtt=%dms\n", time.Now().Format(time.Kitchen), cur, lastRemote, rate, eta, lastPeers, lastLatency)
+ }
+ }
+ if !barPrinted {
+ fmt.Fprintln(opts.Out, "")
+ firstBarTime = time.Now()
+ holdStarted = true
+ }
+ barPrinted = true
+ case <-tick.C:
+ if opts.StuckTimeout > 0 && lastProgress.Since() > opts.StuckTimeout {
+ if tty {
+ fmt.Fprint(opts.Out, "\r\033[K")
+ }
+ return ErrSyncStuck
+ }
+ // Completion check via local status (cheap)
+ ctx2, cancel2 := context.WithTimeout(context.Background(), 1200*time.Millisecond)
+ st, err := cli.Status(ctx2)
+ cancel2()
+ if err == nil {
+ // If we haven't printed any bar yet (e.g., already synced), render a final bar once
+ if !barPrinted {
+ cur := st.Height
+ remoteH := lastRemote
+ if remoteH == 0 { // quick remote probe
+ remoteH = probeRemoteOnce(opts.RemoteRPC, cur)
+ }
+ if remoteH < cur {
+ remoteH = cur
+ }
+ // If already synced but local height not yet reported, align to remote
+ if !st.CatchingUp && cur < remoteH {
+ cur = remoteH
+ }
+ // Avoid printing a misleading bar when cur is 0; wait for actual height
+ if cur == 0 {
+ break
+ }
+ var percent float64
+ if remoteH > 0 {
+ percent = float64(cur) / float64(remoteH) * 100
+ }
+ percent = floor2(percent)
+ if cur < remoteH && percent >= 100 {
+ percent = 99.99
+ }
+ line := renderProgressWithQuiet(percent, cur, remoteH, opts.Quiet)
+ rate, eta := progressRateAndETA(buf, cur, remoteH)
+ lineWithETA := line
+ if eta != "" {
+ lineWithETA += eta
+ }
+ if tty {
+ extra := ""
+ if lastPeers > 0 {
+ extra += fmt.Sprintf(" | peers: %d", lastPeers)
+ }
+ if lastLatency > 0 {
+ extra += fmt.Sprintf(" | rtt: %dms", lastLatency)
+ }
+ fmt.Fprintf(opts.Out, "\r\033[K %s%s", lineWithETA, extra)
+ } else {
+ if opts.Quiet {
+ fmt.Fprintf(opts.Out, "height=%d/%d rate=%.2f%s peers=%d rtt=%dms\n", cur, remoteH, rate, eta, lastPeers, lastLatency)
+ } else {
+ fmt.Fprintf(opts.Out, "%s height=%d/%d rate=%.2f blk/s%s peers=%d rtt=%dms\n", time.Now().Format(time.Kitchen), cur, remoteH, rate, eta, lastPeers, lastLatency)
+ }
+ }
+ firstBarTime = time.Now()
+ holdStarted = true
+ barPrinted = true
+ }
+ // Active sync: update progress bar on every tick using current RPC status
+ if st.CatchingUp && barPrinted && baseH > 0 {
+ cur := st.Height
+ if cur > 0 {
+ // Add current height to buffer for rate calculation
+ buf = append(buf, pt{cur, time.Now()})
+ if len(buf) > opts.Window {
+ buf = buf[1:]
+ }
+ // Calculate progress using baseline logic
+ remoteH := lastRemote
+ if remoteH == 0 {
+ remoteH = probeRemoteOnce(opts.RemoteRPC, cur)
+ }
+ if remoteH < cur {
+ remoteH = cur
+ }
+ var percent float64
+ if remoteH > 0 {
+ // Use baseline calculation for meaningful progress tracking
+ if baseH > 0 && remoteH > baseH && (remoteH-baseH) > 100 {
+ denom := float64(remoteH - baseH)
+ if denom > 0 {
+ percent = float64(cur-baseH) / denom * 100
+ }
+ } else {
+ // Direct calculation for near-synced nodes
+ percent = float64(cur) / float64(remoteH) * 100
+ }
+ }
+ percent = floor2(percent)
+ if cur < remoteH && percent >= 100 {
+ percent = 99.99
+ }
+ // Render progress bar with current stats
+ line := renderProgressWithQuiet(percent, cur, remoteH, opts.Quiet)
+ rate, eta := progressRateAndETA(buf, cur, remoteH)
+ lineWithETA := line
+ if eta != "" {
+ lineWithETA += eta
+ }
+ if tty {
+ extra := ""
+ if lastPeers > 0 {
+ extra += fmt.Sprintf(" | peers: %d", lastPeers)
+ }
+ if lastLatency > 0 {
+ extra += fmt.Sprintf(" | rtt: %dms", lastLatency)
+ }
+ fmt.Fprintf(opts.Out, "\r\033[K %s%s", lineWithETA, extra)
+ } else {
+ if opts.Quiet {
+ fmt.Fprintf(opts.Out, "height=%d/%d rate=%.2f%s peers=%d rtt=%dms\n", cur, remoteH, rate, eta, lastPeers, lastLatency)
+ } else {
+ fmt.Fprintf(opts.Out, "%s height=%d/%d rate=%.2f blk/s%s peers=%d rtt=%dms\n", time.Now().Format(time.Kitchen), cur, remoteH, rate, eta, lastPeers, lastLatency)
+ }
+ }
+ }
+ }
+ // While within minShow and already not catching_up, keep the bar live-updating
+ if !st.CatchingUp && barPrinted && time.Since(firstBarTime) < minShow {
+ cur := st.Height
+ if cur == 0 && len(buf) > 0 {
+ cur = buf[len(buf)-1].h
+ }
+ remoteH := lastRemote
+ if remoteH == 0 {
+ remoteH = probeRemoteOnce(opts.RemoteRPC, cur)
+ }
+ if remoteH < cur {
+ remoteH = cur
+ }
+ percent := 0.0
+ if remoteH > 0 {
+ percent = float64(cur) / float64(remoteH) * 100
+ }
+ percent = floor2(percent)
+ if cur < remoteH && percent >= 100 {
+ percent = 99.99
+ }
+ line := renderProgressWithQuiet(percent, cur, remoteH, opts.Quiet)
+ rate, eta := progressRateAndETA(buf, cur, remoteH)
+ lineWithETA := line
+ if eta != "" {
+ lineWithETA += eta
+ }
+ if tty {
+ extra := ""
+ if lastPeers > 0 {
+ extra += fmt.Sprintf(" | peers: %d", lastPeers)
+ }
+ if lastLatency > 0 {
+ extra += fmt.Sprintf(" | rtt: %dms", lastLatency)
+ }
+ fmt.Fprintf(opts.Out, "\r\033[K %s%s", lineWithETA, extra)
+ } else {
+ if opts.Quiet {
+ fmt.Fprintf(opts.Out, "height=%d/%d rate=%.2f%s peers=%d rtt=%dms\n", cur, remoteH, rate, eta, lastPeers, lastLatency)
+ } else {
+ fmt.Fprintf(opts.Out, "%s height=%d/%d rate=%.2f blk/s%s peers=%d rtt=%dms\n", time.Now().Format(time.Kitchen), cur, remoteH, rate, eta, lastPeers, lastLatency)
+ }
+ }
+ continue
+ }
+ // End condition: catching_up is false AND minShow window has passed
+ if !st.CatchingUp && holdStarted && time.Since(firstBarTime) >= minShow {
+ cur := st.Height
+ remoteH := lastRemote
+ if remoteH == 0 {
+ remoteH = probeRemoteOnce(opts.RemoteRPC, cur)
+ }
+ if remoteH < cur {
+ remoteH = cur
+ }
+ percent := 0.0
+ if remoteH > 0 {
+ percent = float64(cur) / float64(remoteH) * 100
+ }
+ percent = floor2(percent)
+ if cur < remoteH && percent >= 100 {
+ percent = 99.99
+ }
+ line := renderProgressWithQuiet(percent, cur, remoteH, opts.Quiet)
+ rate, eta := progressRateAndETA(buf, cur, remoteH)
+ lineWithETA := line
+ if eta != "" {
+ lineWithETA += eta
+ }
+ if tty {
+ extra := ""
+ if lastPeers > 0 {
+ extra += fmt.Sprintf(" | peers: %d", lastPeers)
+ }
+ if lastLatency > 0 {
+ extra += fmt.Sprintf(" | rtt: %dms", lastLatency)
+ }
+ fmt.Fprintf(opts.Out, "\r\033[K%s%s\n", lineWithETA, extra)
+ } else {
+ if opts.Quiet {
+ fmt.Fprintf(opts.Out, "height=%d/%d rate=%.2f%s peers=%d rtt=%dms\n", cur, remoteH, rate, eta, lastPeers, lastLatency)
+ } else {
+ fmt.Fprintf(opts.Out, "%s height=%d/%d rate=%.2f blk/s%s peers=%d rtt=%dms\n", time.Now().Format(time.Kitchen), cur, remoteH, rate, eta, lastPeers, lastLatency)
+ }
+ }
+ return nil
+ }
+ }
+ }
+ }
+}
+
+// --- helpers ---
+
+type atomicTime struct {
+ value atomic.Int64
+}
+
+func newAtomicTime(t time.Time) *atomicTime {
+ at := &atomicTime{}
+ at.Store(t)
+ return at
+}
+
+func (a *atomicTime) Store(t time.Time) {
+ a.value.Store(t.UnixNano())
+}
+
+func (a *atomicTime) Update() {
+ a.Store(time.Now())
+}
+
+func (a *atomicTime) Since() time.Duration {
+ last := a.value.Load()
+ if last == 0 {
+ return 0
+ }
+ return time.Since(time.Unix(0, last))
+}
+
+func renderStepIndicator(step, total int, message string, quiet bool, completed bool) string {
+ filled := "โ"
+ empty := "โ"
+ if quiet {
+ filled = "#"
+ empty = "-"
+ }
+ if total <= 0 {
+ total = 1
+ }
+ if step < 1 {
+ step = 1
+ }
+ if step > total {
+ step = total
+ }
+ var sb strings.Builder
+ sb.Grow(total)
+ for i := 1; i <= total; i++ {
+ if i <= step {
+ sb.WriteString(filled)
+ } else {
+ sb.WriteString(empty)
+ }
+ }
+ suffix := message
+ if completed && !quiet {
+ suffix = fmt.Sprintf("%s \033[92m%s\033[0m", message, "โ")
+ }
+ return fmt.Sprintf(" [%s] Step %d/%d: %s", sb.String(), step, total, suffix)
+}
+
+func tailStatesync(ctx context.Context, path string, out chan<- string, stop <-chan struct{}) {
+ defer close(out)
+ // Wait for log file to appear to avoid missing early snapshot lines
+ for {
+ if _, err := os.Stat(path); err == nil {
+ break
+ }
+ select {
+ case <-ctx.Done():
+ return
+ case <-stop:
+ return
+ case <-time.After(300 * time.Millisecond):
+ }
+ }
+ f, err := os.Open(path)
+ if err != nil {
+ return
+ }
+ defer f.Close()
+ // Seek to end
+ if _, err := f.Seek(0, io.SeekEnd); err != nil {
+ return
+ }
+ r := bufio.NewReader(f)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-stop:
+ return
+ default:
+ }
+ line, err := r.ReadString('\n')
+ if err != nil {
+ if err == io.EOF {
+ time.Sleep(100 * time.Millisecond)
+ continue
+ }
+ return
+ }
+ // Only forward relevant lines to reduce chatter
+ low := strings.ToLower(line)
+ if strings.Contains(low, "statesync") || strings.Contains(low, "state sync") || strings.Contains(low, "snapshot") {
+ out <- strings.TrimSpace(line)
+ }
+ }
+}
+
+func hostPortFromURL(s string) string {
+ u, err := url.Parse(s)
+ if err == nil && u.Host != "" {
+ return u.Host
+ }
+ return "127.0.0.1:26657"
+}
+
+// isSyncedQuick checks local RPC catching_up with a tiny timeout.
+func isSyncedQuick(local string) bool {
+ local = strings.TrimRight(local, "/")
+ if local == "" {
+ local = "http://127.0.0.1:26657"
+ }
+ httpc := &http.Client{Timeout: 1200 * time.Millisecond}
+ ctx, cancel := context.WithTimeout(context.Background(), 1200*time.Millisecond)
+ defer cancel()
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, local+"/status", nil)
+ resp, err := httpc.Do(req)
+ if err != nil {
+ return false
+ }
+ defer resp.Body.Close()
+ var payload struct {
+ Result struct {
+ SyncInfo struct {
+ CatchingUp bool `json:"catching_up"`
+ } `json:"sync_info"`
+ } `json:"result"`
+ }
+ if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil {
+ return false
+ }
+ return !payload.Result.SyncInfo.CatchingUp
+}
+
+func waitTCP(hostport string, d time.Duration) bool {
+ deadline := time.Now().Add(d)
+ for time.Now().Before(deadline) {
+ conn, err := (&net.Dialer{Timeout: 750 * time.Millisecond}).Dial("tcp", hostport)
+ if err == nil {
+ _ = conn.Close()
+ return true
+ }
+ time.Sleep(750 * time.Millisecond)
+ }
+ return false
+}
+
+func pollRemote(ctx context.Context, base string, every time.Duration, out chan<- int64) {
+ defer close(out)
+ httpc := &http.Client{Timeout: 2 * time.Second}
+ base = strings.TrimRight(base, "/")
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-time.After(every):
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, base+"/status", nil)
+ resp, err := httpc.Do(req)
+ if err != nil {
+ continue
+ }
+ var payload struct {
+ Result struct {
+ SyncInfo struct {
+ Height string `json:"latest_block_height"`
+ } `json:"sync_info"`
+ } `json:"result"`
+ }
+ _ = json.NewDecoder(resp.Body).Decode(&payload)
+ _ = resp.Body.Close()
+ if payload.Result.SyncInfo.Height != "" {
+ hv, _ := strconvParseInt(payload.Result.SyncInfo.Height)
+ if hv > 0 {
+ select {
+ case out <- hv:
+ default:
+ }
+ }
+ }
+ }
+ }
+}
+
+// probeRemoteOnce fetches a single remote height with a small timeout.
+func probeRemoteOnce(base string, fallback int64) int64 {
+ base = strings.TrimRight(base, "/")
+ if base == "" {
+ return fallback
+ }
+ httpc := &http.Client{Timeout: 1200 * time.Millisecond}
+ ctx, cancel := context.WithTimeout(context.Background(), 1200*time.Millisecond)
+ defer cancel()
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, base+"/status", nil)
+ resp, err := httpc.Do(req)
+ if err != nil {
+ return fallback
+ }
+ defer resp.Body.Close()
+ var payload struct {
+ Result struct {
+ SyncInfo struct {
+ Height string `json:"latest_block_height"`
+ } `json:"sync_info"`
+ } `json:"result"`
+ }
+ if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil {
+ return fallback
+ }
+ h, _ := strconvParseInt(payload.Result.SyncInfo.Height)
+ if h <= 0 {
+ return fallback
+ }
+ return h
+}
+
+func movingRate(buf []struct {
+ h int64
+ t time.Time
+}) float64 {
+ n := len(buf)
+ if n < 2 {
+ return 0
+ }
+ dh := float64(buf[n-1].h - buf[0].h)
+ dt := buf[n-1].t.Sub(buf[0].t).Seconds()
+ if dt <= 0 {
+ return 0
+ }
+ return dh / dt
+}
+
+func progressRateAndETA(buf []pt, cur, remote int64) (float64, string) {
+ rate := movingRatePt(buf)
+ if rate <= 0 || math.IsNaN(rate) || math.IsInf(rate, 0) {
+ rate = 1.0
+ }
+ eta := ""
+ if remote > cur && rate > 0 {
+ rem := float64(remote-cur) / rate
+ if rem < 0 {
+ rem = 0
+ }
+ eta = fmt.Sprintf(" | ETA: %s", (time.Duration(rem * float64(time.Second))).Round(time.Second))
+ } else if remote > 0 {
+ eta = " | ETA: 0s"
+ }
+ return rate, eta
+}
+
+func renderProgress(percent float64, cur, remote int64) string {
+ width := 28
+ if percent < 0 {
+ percent = 0
+ }
+ if percent > 100 {
+ percent = 100
+ }
+ filled := int(percent / 100 * float64(width))
+ if filled > width {
+ filled = width
+ }
+ bar := strings.Repeat("โ", filled) + strings.Repeat("โ", width-filled)
+ return fmt.Sprintf("๐ Syncing [%s] %.2f%% | %d/%d blocks", bar, percent, cur, remote)
+}
+
+func renderProgressWithQuiet(percent float64, cur, remote int64, quiet bool) string {
+ if quiet {
+ width := 28
+ if percent < 0 {
+ percent = 0
+ }
+ if percent > 100 {
+ percent = 100
+ }
+ filled := int(percent / 100 * float64(width))
+ if filled < 0 {
+ filled = 0
+ }
+ if filled > width {
+ filled = width
+ }
+ bar := strings.Repeat("#", filled) + strings.Repeat("-", width-filled)
+ return fmt.Sprintf("[%s] %.2f%% | %d/%d", bar, percent, cur, remote)
+ }
+ return renderProgress(percent, cur, remote)
+}
+
+func stdinIsTTY() bool {
+ fi, err := os.Stdin.Stat()
+ if err != nil {
+ return false
+ }
+ return (fi.Mode() & os.ModeCharDevice) != 0
+}
+
+func swallowEnter(out io.Writer) {
+ reader := bufio.NewReader(os.Stdin)
+ for {
+ r, _, err := reader.ReadRune()
+ if err != nil {
+ return
+ }
+ if r == '\n' || r == '\r' {
+ // Move cursor to beginning of current line and clear it
+ // This handles the newline created by Enter without moving up
+ fmt.Fprint(out, "\r\x1b[K")
+ }
+ }
+}
+
+func isTTY() bool {
+ fi, err := os.Stdout.Stat()
+ if err != nil {
+ return false
+ }
+ return (fi.Mode()&os.ModeCharDevice) != 0 && os.Getenv("TERM") != ""
+}
+
+func hideCursor(w io.Writer, tty bool) {
+ if tty {
+ fmt.Fprint(w, "\x1b[?25l")
+ }
+}
+func showCursor(w io.Writer, tty bool) {
+ if tty {
+ fmt.Fprint(w, "\x1b[?25h")
+ }
+}
+
+// local copy to avoid extra imports
+func strconvParseInt(s string) (int64, error) {
+ var n int64
+ var sign int64 = 1
+ if s == "" {
+ return 0, fmt.Errorf("empty")
+ }
+ if s[0] == '-' {
+ sign = -1
+ s = s[1:]
+ }
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c < '0' || c > '9' {
+ return 0, fmt.Errorf("invalid")
+ }
+ n = n*10 + int64(c-'0')
+ }
+ return sign * n, nil
+}
+
+// floor1 returns v floored to one decimal place.
+func floor1(v float64) float64 { return math.Floor(v*10.0) / 10.0 }
+
+// floor2 returns v floored to two decimal places.
+func floor2(v float64) float64 { return math.Floor(v*100.0) / 100.0 }
+
+// convert helper to reuse movingRate signature
+func movingRatePt(in []pt) float64 {
+ tmp := make([]struct {
+ h int64
+ t time.Time
+ }, len(in))
+ for i := range in {
+ tmp[i] = struct {
+ h int64
+ t time.Time
+ }{h: in[i].h, t: in[i].t}
+ }
+ return movingRate(tmp)
+}
diff --git a/push-validator-manager/internal/sync/monitor_test.go b/push-validator-manager/internal/sync/monitor_test.go
new file mode 100644
index 00000000..491a985c
--- /dev/null
+++ b/push-validator-manager/internal/sync/monitor_test.go
@@ -0,0 +1,60 @@
+package syncmon
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+ "net"
+)
+
+func TestRenderProgress_FloorClamp(t *testing.T) {
+ // 99.97% should floor to 99.9, not 100.0
+ cur, remote := int64(9997), int64(10000)
+ percent := float64(cur) / float64(remote) * 100
+ percent = floor1(percent)
+ if percent >= 100.0 { t.Fatalf("percent should not be 100, got %.1f", percent) }
+}
+
+func TestIsSyncedQuick(t *testing.T) {
+ // skip in sandbox environments that restrict binding
+ if _, err := net.Listen("tcp", "127.0.0.1:0"); err != nil {
+ t.Skip("skipping due to sandbox")
+ }
+ mux := http.NewServeMux()
+ mux.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte(`{"result":{"sync_info":{"catching_up":false}}}`)) })
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+ if !isSyncedQuick(srv.URL) { t.Fatal("expected synced true") }
+}
+
+func TestTailStatesync_FiltersAndSignals(t *testing.T) {
+ dir := t.TempDir()
+ logPath := filepath.Join(dir, "pchaind.log")
+ f, err := os.Create(logPath)
+ if err != nil { t.Fatal(err) }
+ defer f.Close()
+ ch := make(chan string, 4)
+ stop := make(chan struct{})
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go tailStatesync(ctx, logPath, ch, stop)
+ // Write non-matching line
+ time.Sleep(50 * time.Millisecond)
+ f.WriteString("random line\n")
+ f.Sync()
+ // Write matching snapshot line
+ f.WriteString("Snapshot accepted, restoring...\n")
+ f.Sync()
+ // Expect to receive the filtered line
+ select {
+ case s := <-ch:
+ if s == "" { t.Fatal("empty line") }
+ case <-time.After(500 * time.Millisecond):
+ t.Fatal("timeout waiting for tail")
+ }
+ close(stop)
+}
diff --git a/push-validator-manager/internal/system/setup.go b/push-validator-manager/internal/system/setup.go
new file mode 100644
index 00000000..a3a5e55a
--- /dev/null
+++ b/push-validator-manager/internal/system/setup.go
@@ -0,0 +1,21 @@
+package system
+
+// Guided system setup for nginx/logrotate. No automatic package installs.
+// Implementers should prompt for sudo if needed and generate configs idempotently.
+
+type NginxConfig struct {
+ ServerName string
+ RPCPort int
+ WS bool
+}
+
+func SetupNginx(cfg NginxConfig) error { return nil }
+
+type LogrotateConfig struct {
+ LogPath string
+ Rotate int
+ SizeMB int
+}
+
+func SetupLogrotate(cfg LogrotateConfig) error { return nil }
+
diff --git a/push-validator-manager/internal/ui/colors.go b/push-validator-manager/internal/ui/colors.go
new file mode 100644
index 00000000..e9f9e975
--- /dev/null
+++ b/push-validator-manager/internal/ui/colors.go
@@ -0,0 +1,307 @@
+package ui
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+// Color codes for terminal output
+const (
+ Reset = "\033[0m"
+ Bold = "\033[1m"
+ Dim = "\033[2m"
+ Italic = "\033[3m"
+ Underline = "\033[4m"
+
+ // Primary colors
+ Black = "\033[30m"
+ Red = "\033[31m"
+ Green = "\033[32m"
+ Yellow = "\033[33m"
+ Blue = "\033[34m"
+ Magenta = "\033[35m"
+ Cyan = "\033[36m"
+ White = "\033[37m"
+
+ // Bright colors
+ BrightBlack = "\033[90m"
+ BrightRed = "\033[91m"
+ BrightGreen = "\033[92m"
+ BrightYellow = "\033[93m"
+ BrightBlue = "\033[94m"
+ BrightMagenta = "\033[95m"
+ BrightCyan = "\033[96m"
+ BrightWhite = "\033[97m"
+
+ // Background colors
+ BgBlack = "\033[40m"
+ BgRed = "\033[41m"
+ BgGreen = "\033[42m"
+ BgYellow = "\033[43m"
+ BgBlue = "\033[44m"
+ BgMagenta = "\033[45m"
+ BgCyan = "\033[46m"
+ BgWhite = "\033[47m"
+)
+
+// Theme defines the color scheme for different UI elements
+type Theme struct {
+ // Status indicators
+ Success string
+ Warning string
+ Error string
+ Info string
+
+ // UI elements
+ Header string
+ SubHeader string
+ Label string
+ Value string
+ Command string
+ Flag string
+ Description string
+ Separator string
+
+ // Interactive elements
+ Prompt string
+ Input string
+ Selection string
+
+ // Progress indicators
+ Progress string
+ Complete string
+ Pending string
+
+ // Special elements
+ Logo string
+ Version string
+ Timestamp string
+}
+
+// DefaultTheme returns the default color theme
+func DefaultTheme() *Theme {
+ return &Theme{
+ // Status indicators - Clear semantic colors
+ Success: BrightGreen,
+ Warning: BrightYellow,
+ Error: BrightRed,
+ Info: BrightCyan,
+
+ // UI elements - Professional and readable
+ Header: Bold + BrightCyan,
+ SubHeader: Bold + Cyan,
+ Label: Bold, // Bold + terminal default color for visibility on all backgrounds
+ Value: "", // Use terminal default foreground color for best contrast
+ Command: BrightGreen,
+ Flag: BrightYellow,
+ Description: BrightBlack,
+ Separator: BrightBlack,
+
+ // Interactive elements
+ Prompt: Bold + BrightMagenta,
+ Input: BrightWhite,
+ Selection: Bold + BrightCyan,
+
+ // Progress indicators
+ Progress: BrightYellow,
+ Complete: BrightGreen,
+ Pending: BrightBlack,
+
+ // Special elements
+ Logo: Bold + BrightMagenta,
+ Version: BrightBlack,
+ Timestamp: BrightBlack,
+ }
+}
+
+// ColorConfig manages color output settings
+type ColorConfig struct {
+ Enabled bool
+ EmojiEnabled bool
+ Theme *Theme
+}
+
+// NewColorConfig creates a new color configuration with default settings
+func NewColorConfig() *ColorConfig {
+ // Check if colors should be disabled
+ noColor := os.Getenv("NO_COLOR") != ""
+ term := os.Getenv("TERM")
+
+ // Disable colors if NO_COLOR is set or TERM is dumb
+ enabled := !noColor && term != "dumb" && term != ""
+
+ return &ColorConfig{
+ Enabled: enabled,
+ EmojiEnabled: true,
+ Theme: DefaultTheme(),
+ }
+}
+
+// Apply applies a color to text if colors are enabled
+func (c *ColorConfig) Apply(color, text string) string {
+ if !c.Enabled {
+ return text
+ }
+ return color + text + Reset
+}
+
+// Success formats success messages
+func (c *ColorConfig) Success(text string) string {
+ return c.Apply(c.Theme.Success, text)
+}
+
+// Warning formats warning messages
+func (c *ColorConfig) Warning(text string) string {
+ return c.Apply(c.Theme.Warning, text)
+}
+
+// Error formats error messages
+func (c *ColorConfig) Error(text string) string {
+ return c.Apply(c.Theme.Error, text)
+}
+
+// Info formats info messages
+func (c *ColorConfig) Info(text string) string {
+ return c.Apply(c.Theme.Info, text)
+}
+
+// Header formats header text
+func (c *ColorConfig) Header(text string) string {
+ return c.Apply(c.Theme.Header, text)
+}
+
+// SubHeader formats sub-header text
+func (c *ColorConfig) SubHeader(text string) string {
+ return c.Apply(c.Theme.SubHeader, text)
+}
+
+// Label formats label text
+func (c *ColorConfig) Label(text string) string {
+ return c.Apply(c.Theme.Label, text)
+}
+
+// Value formats value text
+func (c *ColorConfig) Value(text string) string {
+ return c.Apply(c.Theme.Value, text)
+}
+
+// Command formats command text
+func (c *ColorConfig) Command(text string) string {
+ return c.Apply(c.Theme.Command, text)
+}
+
+// Flag formats flag text
+func (c *ColorConfig) Flag(text string) string {
+ return c.Apply(c.Theme.Flag, text)
+}
+
+// Description formats description text
+func (c *ColorConfig) Description(text string) string {
+ return c.Apply(c.Theme.Description, text)
+}
+
+// FormatKeyValue formats a key-value pair with proper colors
+func (c *ColorConfig) FormatKeyValue(key, value string) string {
+ return fmt.Sprintf("%s: %s", c.Label(key), c.Value(value))
+}
+
+// FormatCommand formats a command with its description
+func (c *ColorConfig) FormatCommand(cmd, desc string) string {
+ return fmt.Sprintf(" %s %s", c.Command(cmd), c.Description(desc))
+}
+
+// FormatFlag formats a flag with its description
+func (c *ColorConfig) FormatFlag(flag, desc string) string {
+ return fmt.Sprintf(" %s %s", c.Flag(flag), c.Description(desc))
+}
+
+// Separator returns a colored separator line
+func (c *ColorConfig) Separator(width int) string {
+ sep := strings.Repeat("โ", width)
+ return c.Apply(c.Theme.Separator, sep)
+}
+
+// Box creates a colored box around text
+func (c *ColorConfig) Box(text string, width int) string {
+ if !c.Enabled {
+ return text
+ }
+
+ topBorder := c.Apply(c.Theme.Separator, "โ" + strings.Repeat("โ", width-2) + "โ")
+ bottomBorder := c.Apply(c.Theme.Separator, "โ" + strings.Repeat("โ", width-2) + "โ")
+
+ lines := strings.Split(text, "\n")
+ var boxed []string
+ boxed = append(boxed, topBorder)
+
+ for _, line := range lines {
+ padding := width - len(line) - 4
+ if padding < 0 {
+ padding = 0
+ }
+ boxedLine := c.Apply(c.Theme.Separator, "โ ") + line + strings.Repeat(" ", padding) + c.Apply(c.Theme.Separator, " โ")
+ boxed = append(boxed, boxedLine)
+ }
+
+ boxed = append(boxed, bottomBorder)
+ return strings.Join(boxed, "\n")
+}
+
+// StatusIcon returns a colored status icon (respects emoji settings)
+func (c *ColorConfig) StatusIcon(status string) string {
+ if !c.EmojiEnabled {
+ switch strings.ToLower(status) {
+ case "success", "running", "active", "online":
+ return c.Success("[OK]")
+ case "warning", "syncing", "pending":
+ return c.Warning("[WARN]")
+ case "error", "failed", "stopped", "offline":
+ return c.Error("[ERR]")
+ case "info":
+ return c.Info("[INFO]")
+ default:
+ return c.Apply(c.Theme.Pending, "[ ]")
+ }
+ }
+
+ switch strings.ToLower(status) {
+ case "success", "running", "active", "online":
+ return c.Success("โ")
+ case "warning", "syncing", "pending":
+ return c.Warning("โ ")
+ case "error", "failed", "stopped", "offline":
+ return c.Error("โ")
+ case "info":
+ return c.Info("โน")
+ default:
+ return c.Apply(c.Theme.Pending, "โ")
+ }
+}
+
+// ProgressBar creates a colored progress bar
+func (c *ColorConfig) ProgressBar(percent float64, width int) string {
+ if width < 10 {
+ width = 10
+ }
+
+ filled := int(float64(width) * percent / 100)
+ if filled > width {
+ filled = width
+ }
+
+ bar := strings.Repeat("โ", filled) + strings.Repeat("โ", width-filled)
+
+ if percent >= 100 {
+ return c.Apply(c.Theme.Complete, bar)
+ } else if percent >= 50 {
+ return c.Apply(c.Theme.Progress, bar)
+ }
+ return c.Apply(c.Theme.Pending, bar)
+}
+
+// Spinner returns a colored spinner character for the given frame
+func (c *ColorConfig) Spinner(frame int) string {
+ spinners := []string{"โ ", "โ ", "โ น", "โ ธ", "โ ผ", "โ ด", "โ ฆ", "โ ง", "โ ", "โ "}
+ return c.Apply(c.Theme.Progress, spinners[frame%len(spinners)])
+}
\ No newline at end of file
diff --git a/push-validator-manager/internal/ui/errors.go b/push-validator-manager/internal/ui/errors.go
new file mode 100644
index 00000000..21eb7b0f
--- /dev/null
+++ b/push-validator-manager/internal/ui/errors.go
@@ -0,0 +1,69 @@
+package ui
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ErrorMessage represents a structured, actionable error to present to users.
+type ErrorMessage struct {
+ Problem string // one-line problem statement
+ Causes []string // possible causes
+ Actions []string // actionable steps to resolve
+ Hints []string // optional hints (e.g., commands to try)
+}
+
+// Format renders the error using the color theme. It does not include ANSI
+// codes when colors are disabled (NO_COLOR or dumb terminal).
+func (e ErrorMessage) Format(c *ColorConfig) string {
+ var b strings.Builder
+ // Header
+ b.WriteString(c.Error("โ "))
+ b.WriteString(c.Header("Error"))
+ b.WriteString("\n")
+ if e.Problem != "" {
+ b.WriteString(" ")
+ b.WriteString(c.Label("Problem"))
+ b.WriteString(": ")
+ b.WriteString(e.Problem)
+ b.WriteString("\n")
+ }
+ if len(e.Causes) > 0 {
+ b.WriteString(" ")
+ b.WriteString(c.Label("Possible causes"))
+ b.WriteString(":\n")
+ for _, it := range e.Causes {
+ b.WriteString(" โข ")
+ b.WriteString(it)
+ b.WriteString("\n")
+ }
+ }
+ if len(e.Actions) > 0 {
+ b.WriteString(" ")
+ b.WriteString(c.Label("Try"))
+ b.WriteString(":\n")
+ for _, it := range e.Actions {
+ b.WriteString(" โ ")
+ b.WriteString(it)
+ b.WriteString("\n")
+ }
+ }
+ if len(e.Hints) > 0 {
+ b.WriteString(" ")
+ b.WriteString(c.Label("Hints"))
+ b.WriteString(":\n")
+ for _, it := range e.Hints {
+ b.WriteString(" ยท ")
+ b.WriteString(c.Description(it))
+ b.WriteString("\n")
+ }
+ }
+ return b.String()
+}
+
+// PrintError prints the structured error to stdout using the current theme.
+func PrintError(e ErrorMessage) {
+ c := NewColorConfig()
+ fmt.Println(e.Format(c))
+}
+
diff --git a/push-validator-manager/internal/ui/format.go b/push-validator-manager/internal/ui/format.go
new file mode 100644
index 00000000..b8ebaf13
--- /dev/null
+++ b/push-validator-manager/internal/ui/format.go
@@ -0,0 +1,33 @@
+package ui
+
+import (
+ "fmt"
+ "strings"
+)
+
+// FormatNumber formats an integer with thousands separators
+// Example: 1234567 -> "1,234,567"
+func FormatNumber(n int64) string {
+ s := fmt.Sprintf("%d", n)
+ if len(s) <= 3 {
+ return s
+ }
+
+ // Insert commas from right to left
+ var result strings.Builder
+ for i, c := range reverse(s) {
+ if i > 0 && i%3 == 0 {
+ result.WriteRune(',')
+ }
+ result.WriteRune(c)
+ }
+ return reverse(result.String())
+}
+
+func reverse(s string) string {
+ runes := []rune(s)
+ for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 {
+ runes[i], runes[j] = runes[j], runes[i]
+ }
+ return string(runes)
+}
diff --git a/push-validator-manager/internal/ui/global.go b/push-validator-manager/internal/ui/global.go
new file mode 100644
index 00000000..2244599b
--- /dev/null
+++ b/push-validator-manager/internal/ui/global.go
@@ -0,0 +1,42 @@
+package ui
+
+// Global UI configuration for the application (set once at startup)
+var globalConfig = Config{}
+
+// Config holds application-wide UI settings
+type Config struct {
+ NoColor bool
+ NoEmoji bool
+ Yes bool
+ NonInteractive bool
+ Verbose bool
+ Quiet bool
+ Debug bool
+}
+
+// InitGlobal initializes the global UI configuration (call once at startup)
+func InitGlobal(cfg Config) {
+ globalConfig = cfg
+}
+
+// GetGlobal returns the global UI configuration
+func GetGlobal() Config {
+ return globalConfig
+}
+
+// NewColorConfigFromGlobal creates a ColorConfig using global settings
+func NewColorConfigFromGlobal() *ColorConfig {
+ cfg := GetGlobal()
+ c := NewColorConfig()
+ c.Enabled = c.Enabled && !cfg.NoColor
+ c.EmojiEnabled = c.EmojiEnabled && !cfg.NoEmoji
+ return c
+}
+
+// NewPrinterFromGlobal creates a Printer using global settings
+func NewPrinterFromGlobal(format string) Printer {
+ return Printer{
+ format: format,
+ Colors: NewColorConfigFromGlobal(),
+ }
+}
diff --git a/push-validator-manager/internal/ui/logtui.go b/push-validator-manager/internal/ui/logtui.go
new file mode 100644
index 00000000..d13fd1fd
--- /dev/null
+++ b/push-validator-manager/internal/ui/logtui.go
@@ -0,0 +1,300 @@
+package ui
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "os/signal"
+ "strings"
+ "syscall"
+ "time"
+ "unicode"
+
+ "github.com/nxadm/tail"
+ "golang.org/x/term"
+)
+
+// LogUIOptions configures the TUI log viewer
+type LogUIOptions struct {
+ LogPath string // Path to pchaind.log
+ BgKey byte // Key to background (default: 'b')
+ ShowFooter bool // Enable footer (default: true)
+ NoColor bool // Respect --no-color
+}
+
+// RunLogUI starts the interactive log viewer with a sticky footer.
+// In TUI mode:
+// - Ctrl+C stops the node and exits
+// - BgKey (default 'b') detaches the viewer while keeping the node running
+//
+// Automatically falls back to plain tail for non-TTY environments.
+func RunLogUI(ctx context.Context, opts LogUIOptions) error {
+ debug := os.Getenv("DEBUG_TUI") != ""
+
+ // 1. TTY check
+ stdin := int(os.Stdin.Fd())
+ stdout := int(os.Stdout.Fd())
+ stdinTTY := term.IsTerminal(stdin)
+ stdoutTTY := term.IsTerminal(stdout)
+
+ if !stdinTTY || !stdoutTTY || !opts.ShowFooter {
+ if debug {
+ fmt.Fprintf(os.Stderr, "[DEBUG] TUI fallback: stdin_tty=%v stdout_tty=%v footer=%v\n",
+ stdinTTY, stdoutTTY, opts.ShowFooter)
+ }
+ return tailFollow(ctx, opts.LogPath)
+ }
+
+ // 2. Terminal size check
+ rows, cols, err := term.GetSize(stdout)
+ if err != nil || rows < 5 || cols < 20 {
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot detect terminal size: %v; showing plain logs.\n", err)
+ } else {
+ fmt.Fprintf(os.Stderr, "Terminal too small for TUI (rows=%d cols=%d, need 5x20+); showing plain logs.\n", rows, cols)
+ }
+ return tailFollow(ctx, opts.LogPath)
+ }
+
+ if debug {
+ fmt.Fprintf(os.Stderr, "[DEBUG] TUI mode activating: terminal=%dx%d\n", cols, rows)
+ }
+
+ // 3. Enter raw mode
+ oldState, err := term.MakeRaw(stdin)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "Cannot enable TUI mode; showing plain logs.")
+ return tailFollow(ctx, opts.LogPath)
+ }
+
+ // 4. CRITICAL: Always restore terminal on ALL exit paths
+ defer func() {
+ term.Restore(stdin, oldState) // restore cooked mode
+ fmt.Fprint(os.Stdout, "\x1b[?7h") // re-enable line wrap
+ }()
+
+ // 5. Setup: disable line wrap only (no scroll region for now)
+ fmt.Fprint(os.Stdout, "\x1b[?7l") // disable line wrap
+
+ // 6. Show startup message (use \r\n in raw mode)
+ fmt.Fprint(os.Stdout, "\r\n")
+ fmt.Fprint(os.Stdout, "TUI mode active - Press Ctrl+C to STOP NODE | Press 'b' to detach\r\n")
+ fmt.Fprint(os.Stdout, strings.Repeat("-", min(cols, 80))+"\r\n")
+ fmt.Fprint(os.Stdout, "\r\n")
+
+ // 8. Context with cancel for signals
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // 9. Setup signal handling (SIGTERM, SIGHUP only)
+ // SIGINT is handled via raw stdin, SIGWINCH removed (no footer to update)
+ sigCh := make(chan os.Signal, 1)
+ signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGHUP)
+ defer signal.Stop(sigCh)
+
+ go func() {
+ for sig := range sigCh {
+ switch sig {
+ case syscall.SIGTERM, syscall.SIGHUP:
+ cancel() // graceful exit
+ }
+ }
+ }()
+
+ // 10. Start log streaming in goroutine
+ logErr := make(chan error, 1)
+ go func() {
+ logErr <- streamLogs(ctx, opts.LogPath, os.Stdout)
+ }()
+
+ // 11. Start keyboard listener with debouncing
+ keyCh := listenKeys(ctx)
+
+ // 12. Main loop: handle keys and check for errors
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+
+ case err := <-logErr:
+ if err != nil && err != context.Canceled {
+ fmt.Fprintf(os.Stdout, "\r\nLog streaming error: %v\r\n", err)
+ time.Sleep(1 * time.Second)
+ }
+ return err
+
+ case key := <-keyCh:
+ switch key {
+ case 3: // Ctrl+C - STOP NODE
+ fmt.Fprint(os.Stdout, "\r\nStopping nodeโฆ ")
+ _ = stopNode(ctx, os.Stdout, opts.NoColor)
+ return nil
+
+ case opts.BgKey, byte(unicode.ToUpper(rune(opts.BgKey))): // 'b' or 'B' - DETACH VIEWER
+ fmt.Fprint(os.Stdout, "\r\nDetaching to backgroundโฆ\r\n")
+ return nil
+ }
+ }
+ }
+}
+
+// stopNode calls push-validator-manager stop using the same binary
+func stopNode(ctx context.Context, w io.Writer, noColor bool) error {
+ exe, err := os.Executable()
+ if err != nil {
+ exe = "push-validator" // fallback to PATH
+ }
+
+ c, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+
+ cmd := exec.CommandContext(c, exe, "stop")
+ cmd.Stdout = io.Discard
+ cmd.Stderr = io.Discard
+
+ if err := cmd.Run(); err != nil {
+ fmt.Fprintf(w, "failed (%v)\n", err)
+ return err
+ }
+
+ fmt.Fprint(w, "done\n")
+ return nil
+}
+
+// renderFooter draws the 3-line footer at the bottom of the screen
+func renderFooter(w io.Writer, rows, cols int, noColor bool) {
+ if cols < 1 {
+ cols = 1
+ }
+
+ // Never print more characters than we have columns
+ divLen := cols
+ if divLen > 80 {
+ divLen = 80
+ }
+
+ div := strings.Repeat("โ", divLen)
+ if noColor {
+ div = strings.Repeat("-", divLen)
+ } else {
+ div = "\x1b[2m" + div + "\x1b[0m" // dim gray
+ }
+
+ controls := "Press Ctrl+C to STOP NODE | Press 'b' to run in background"
+ if len(controls) > cols {
+ // Truncate safely to terminal width
+ controls = controls[:cols]
+ }
+
+ // Clear both footer lines to avoid stale characters when resizing smaller
+ fmt.Fprintf(w, "\x1b[%d;1H\x1b[2K%s", rows-2, div) // divider
+ fmt.Fprintf(w, "\x1b[%d;1H\x1b[2K%s", rows-1, controls) // controls
+ fmt.Fprintf(w, "\x1b[%d;1H", rows) // spacer
+}
+
+// listenKeys reads keypresses from stdin with debouncing
+func listenKeys(ctx context.Context) <-chan byte {
+ keyCh := make(chan byte, 16)
+ go func() {
+ defer close(keyCh)
+ buf := make([]byte, 1)
+ lastKey := time.Now()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+
+ n, err := os.Stdin.Read(buf)
+ if err != nil || n == 0 {
+ return
+ }
+
+ // No debounce for Ctrl+C (immediate stop)
+ if buf[0] == 3 {
+ keyCh <- buf[0]
+ continue
+ }
+
+ // Debounce: ignore keys within 150ms of last key
+ if time.Since(lastKey) < 150*time.Millisecond {
+ continue
+ }
+ lastKey = time.Now()
+
+ keyCh <- buf[0]
+ }
+ }()
+ return keyCh
+}
+
+// streamLogs follows the log file with rotation support using github.com/nxadm/tail
+func streamLogs(ctx context.Context, logPath string, out io.Writer) error {
+ // Wait for log file creation (up to 5 seconds)
+ for i := 0; i < 50; i++ {
+ if _, err := os.Stat(logPath); err == nil {
+ break
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-time.After(100 * time.Millisecond):
+ }
+ }
+
+ t, err := tail.TailFile(logPath, tail.Config{
+ Follow: true, // keep following
+ ReOpen: true, // handle rotation
+ MustExist: false, // don't error if file doesn't exist yet
+ Poll: false, // use inotify/kqueue (efficient)
+ })
+ if err != nil {
+ return fmt.Errorf("failed to tail log: %w", err)
+ }
+ defer t.Cleanup()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ case line := <-t.Lines:
+ if line == nil {
+ return nil
+ }
+ if line.Err != nil {
+ return line.Err
+ }
+ // Use \r\n in raw mode for proper line breaks
+ fmt.Fprintf(out, "%s\r\n", line.Text)
+ }
+ }
+}
+
+// tailFollow is a simple fallback for non-TTY environments
+// It shells out to tail with -F/-f fallback for portability
+func tailFollow(ctx context.Context, logPath string) error {
+ // Try -F first (follows rotation), fallback to -f for BSD/macOS
+ cmd := exec.CommandContext(ctx, "tail", "-F", logPath)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ // Fallback to -f for minimal systems
+ cmd = exec.CommandContext(ctx, "tail", "-f", logPath)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+ }
+ return nil
+}
+
+// min returns the smaller of two integers
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/push-validator-manager/internal/ui/logtui_v2.go b/push-validator-manager/internal/ui/logtui_v2.go
new file mode 100644
index 00000000..0a81a7b7
--- /dev/null
+++ b/push-validator-manager/internal/ui/logtui_v2.go
@@ -0,0 +1,231 @@
+package ui
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+ "unicode"
+
+ "golang.org/x/term"
+)
+
+// RunLogUIV2 shows logs with sticky footer at bottom
+func RunLogUIV2(ctx context.Context, opts LogUIOptions) error {
+ // 1. Check TTY
+ stdin := int(os.Stdin.Fd())
+ stdout := int(os.Stdout.Fd())
+ if !term.IsTerminal(stdin) || !term.IsTerminal(stdout) || !opts.ShowFooter {
+ return tailFollowSimple(ctx, opts.LogPath)
+ }
+
+ // 2. Get terminal size (need width for divider, height for footer placement)
+ rows, cols, err := term.GetSize(stdout)
+ if err != nil {
+ return tailFollowSimple(ctx, opts.LogPath)
+ }
+
+ // 3. Enter raw mode for key handling
+ oldState, err := term.MakeRaw(stdin)
+ if err != nil {
+ return tailFollowSimple(ctx, opts.LogPath)
+ }
+ defer term.Restore(stdin, oldState)
+
+ // 4. Allow terminal state to stabilize after entering raw mode
+ time.Sleep(10 * time.Millisecond)
+
+ // 4. Print minimal controls banner (keeps existing scrollback intact)
+ if opts.BgKey == 0 {
+ opts.BgKey = 'b'
+ }
+ bgLabel := fmt.Sprintf("%c", opts.BgKey)
+ footerRaw := fmt.Sprintf("Controls: Ctrl+C to stop node | '%s' to run in background", bgLabel)
+ if cols > 0 && len(footerRaw) > cols {
+ footerRaw = footerRaw[:cols]
+ }
+ footerStyled := footerRaw
+ if !opts.NoColor {
+ footerStyled = "\x1b[1m" + footerRaw + "\x1b[0m"
+ }
+
+ var renderFooter func()
+ if rows > 2 {
+ renderFooter = func() {
+ fmt.Fprint(os.Stdout, "\x1b7")
+ if rows > 1 {
+ fmt.Fprintf(os.Stdout, "\x1b[%d;1H\x1b[2K", rows-1)
+ }
+ fmt.Fprintf(os.Stdout, "\x1b[%d;1H\x1b[2K%s", rows, footerStyled)
+ fmt.Fprint(os.Stdout, "\x1b8")
+ }
+ renderFooter()
+ } else {
+ renderFooter = func() {}
+ }
+ defer renderFooter()
+
+ // 8. Start log tailing in goroutine
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // 9. Start log streaming
+ logDone := make(chan error, 1)
+ go func() {
+ logDone <- streamLogsSimple(ctx, opts.LogPath, renderFooter)
+ }()
+
+ // 10. Listen for keypresses
+ keyDone := make(chan byte, 1)
+ go func() {
+ buf := make([]byte, 1)
+ for {
+ n, err := os.Stdin.Read(buf)
+ if err != nil || n == 0 {
+ return
+ }
+ keyDone <- buf[0]
+ }
+ }()
+
+ // 10. Wait for key or log error
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ case err := <-logDone:
+ return err
+ case key := <-keyDone:
+ upperBg := byte(unicode.ToUpper(rune(opts.BgKey)))
+ switch key {
+ case 3: // Ctrl+C
+ fmt.Fprint(os.Stdout, "\r\nStopping node...\r\n")
+ stopNodeSimple()
+ return nil
+ case opts.BgKey, upperBg:
+ return nil
+ }
+ }
+ }
+}
+
+func streamLogsSimple(ctx context.Context, logPath string, onPrint func()) error {
+ // Wait for file
+ for i := 0; i < 50; i++ {
+ if _, err := os.Stat(logPath); err == nil {
+ break
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-time.After(100 * time.Millisecond):
+ }
+ }
+
+ // Open and tail file
+ f, err := os.Open(logPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ const backlogLines = 20
+ // Emit recent history so the viewer isn't blank on start
+ if err := printRecentLines(f, os.Stdout, backlogLines, onPrint); err != nil {
+ return err
+ }
+
+ // Seek to end and continue streaming
+ if _, err := f.Seek(0, io.SeekEnd); err != nil {
+ return err
+ }
+
+ reader := bufio.NewReader(f)
+ for {
+ select {
+ case <-ctx.Done():
+ return nil
+ default:
+ }
+
+ line, err := reader.ReadString('\n')
+ if err == io.EOF {
+ time.Sleep(100 * time.Millisecond)
+ continue
+ }
+ if err != nil {
+ return err
+ }
+
+ // Print with \r\n for raw mode
+ fmt.Fprint(os.Stdout, strings.TrimSuffix(line, "\n")+"\r\n")
+ if onPrint != nil {
+ onPrint()
+ }
+ }
+}
+
+func printRecentLines(f *os.File, out io.Writer, maxLines int, onPrint func()) error {
+ if maxLines <= 0 {
+ return nil
+ }
+ if _, err := f.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ scanner := bufio.NewScanner(f)
+ buf := make([]string, 0, maxLines)
+ // allow long log lines up to 512 KiB
+ bufSize := 512 * 1024
+ scanner.Buffer(make([]byte, bufSize), bufSize)
+ for scanner.Scan() {
+ if len(buf) == maxLines {
+ copy(buf, buf[1:])
+ buf[len(buf)-1] = scanner.Text()
+ } else {
+ buf = append(buf, scanner.Text())
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return err
+ }
+ for _, line := range buf {
+ fmt.Fprintf(out, "%s\r\n", line)
+ if onPrint != nil {
+ onPrint()
+ }
+ }
+ return nil
+}
+
+func stopNodeSimple() {
+ exe, _ := os.Executable()
+ if exe == "" {
+ exe = "push-validator"
+ }
+ cmd := exec.Command(exe, "stop")
+ cmd.Run()
+}
+
+func tailFollowSimple(ctx context.Context, logPath string) error {
+ cmd := exec.CommandContext(ctx, "tail", "-F", logPath)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ cmd = exec.CommandContext(ctx, "tail", "-f", logPath)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+ }
+ return nil
+}
+
+func minInt(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/push-validator-manager/internal/ui/printer.go b/push-validator-manager/internal/ui/printer.go
new file mode 100644
index 00000000..bbd0fc97
--- /dev/null
+++ b/push-validator-manager/internal/ui/printer.go
@@ -0,0 +1,139 @@
+package ui
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "strings"
+)
+
+// Printer centralizes output formatting for commands.
+// - Respects --output (text|json)
+// - Uses ColorConfig for styling when printing text
+// - Provides helpers for common message types
+type Printer struct{
+ format string
+ Colors *ColorConfig
+}
+
+func NewPrinter(format string) Printer {
+ return Printer{format: format, Colors: NewColorConfig()}
+}
+
+// Textf prints formatted text to stdout (always text path).
+func (p Printer) Textf(format string, a ...any) { fmt.Printf(format, a...) }
+
+// JSON pretty-prints a JSON value to stdout.
+func (p Printer) JSON(v any) {
+ enc := json.NewEncoder(os.Stdout)
+ enc.SetIndent("", " ")
+ _ = enc.Encode(v)
+}
+
+// Success prints a success line with themed prefix.
+func (p Printer) Success(msg string) {
+ c := p.Colors
+ // Don't add extra space if message already starts with whitespace
+ space := " "
+ if len(msg) > 0 && (msg[0] == ' ' || msg[0] == '\t') {
+ space = ""
+ }
+ if c.EmojiEnabled {
+ fmt.Printf("%s%s%s\n", c.Success("โ"), space, msg)
+ } else {
+ fmt.Printf("%s%s%s\n", c.Success("[OK]"), space, msg)
+ }
+}
+
+// Info prints an informational line.
+func (p Printer) Info(msg string) {
+ c := p.Colors
+ if c.EmojiEnabled {
+ fmt.Println(c.Info("โน"), msg)
+ } else {
+ fmt.Println(c.Info("[INFO]"), msg)
+ }
+}
+
+// Warn prints a warning line.
+func (p Printer) Warn(msg string) {
+ c := p.Colors
+ if c.EmojiEnabled {
+ fmt.Println(c.Warning("!"), msg)
+ } else {
+ fmt.Println(c.Warning("[WARN]"), msg)
+ }
+}
+
+// Error prints an error line.
+func (p Printer) Error(msg string) {
+ c := p.Colors
+ if c.EmojiEnabled {
+ fmt.Println(c.Error("โ"), msg)
+ } else {
+ fmt.Println(c.Error("[ERR]"), msg)
+ }
+}
+
+// Header prints a section header.
+func (p Printer) Header(title string) {
+ fmt.Println(p.Colors.Header(" " + title + " "))
+}
+
+// Separator prints a themed separator line of n characters.
+func (p Printer) Separator(n int) { fmt.Println(p.Colors.Separator(n)) }
+
+// Section prints a section header with separator
+func (p Printer) Section(title string) {
+ fmt.Println()
+ fmt.Println(p.Colors.SubHeader(title))
+ fmt.Println(p.Colors.Separator(40))
+}
+
+// MnemonicBox prints a mnemonic phrase with bold underlined title and clean formatting
+func (p Printer) MnemonicBox(mnemonic string) {
+ fmt.Println()
+
+ // Bold + Underlined title in green
+ title := "Recovery Mnemonic Phrase"
+ boldUnderlineGreen := "\033[1m\033[4m" + p.Colors.Theme.Success
+ fmt.Println(p.Colors.Apply(boldUnderlineGreen, title))
+
+ // Separator line
+ fmt.Println(p.Colors.Separator(len(title)))
+ fmt.Println()
+
+ // Split mnemonic into 3 lines (8 words per line for standard 24-word phrase)
+ words := strings.Fields(mnemonic)
+ wordsPerLine := 8
+
+ for i := 0; i < len(words); i += wordsPerLine {
+ end := i + wordsPerLine
+ if end > len(words) {
+ end = len(words)
+ }
+ line := strings.Join(words[i:end], " ")
+ fmt.Println(p.Colors.Apply(p.Colors.Theme.Success, line))
+ }
+
+ fmt.Println()
+}
+
+// KeyValueLine prints a key-value pair with proper formatting
+func (p Printer) KeyValueLine(key, value, colorType string) {
+ var coloredValue string
+ switch colorType {
+ case "blue":
+ coloredValue = p.Colors.Apply(p.Colors.Theme.Info, value)
+ case "yellow":
+ coloredValue = p.Colors.Apply(p.Colors.Theme.Warning, value)
+ case "green":
+ coloredValue = p.Colors.Apply(p.Colors.Theme.Success, value)
+ case "dim":
+ coloredValue = p.Colors.Apply(p.Colors.Theme.Description, value)
+ default:
+ coloredValue = p.Colors.Value(value)
+ }
+ fmt.Printf("%s %s\n", p.Colors.Label(key+":"), coloredValue)
+}
+
diff --git a/push-validator-manager/internal/ui/progress.go b/push-validator-manager/internal/ui/progress.go
new file mode 100644
index 00000000..60ddd533
--- /dev/null
+++ b/push-validator-manager/internal/ui/progress.go
@@ -0,0 +1,41 @@
+package ui
+
+import (
+ "fmt"
+ "io"
+ "time"
+)
+
+// Spinner is a tiny terminal spinner helper.
+type Spinner struct {
+ frames []rune
+ idx int
+ out io.Writer
+ colors *ColorConfig
+ prefix string
+ delay time.Duration
+}
+
+func NewSpinner(out io.Writer, prefix string) *Spinner {
+ if out == nil { out = io.Discard }
+ return &Spinner{
+ frames: []rune{'โ ','โ ','โ น','โ ธ','โ ผ','โ ด','โ ฆ','โ ง','โ ','โ '},
+ idx: 0,
+ out: out,
+ colors: NewColorConfig(),
+ prefix: prefix,
+ delay: 120 * time.Millisecond,
+ }
+}
+
+func (s *Spinner) SetDelay(d time.Duration) { if d > 0 { s.delay = d } }
+
+// Tick renders the next frame with prefix. Caller controls timing via time.Ticker.
+func (s *Spinner) Tick() {
+ if s.out == nil { return }
+ frame := s.frames[s.idx%len(s.frames)]
+ s.idx++
+ msg := s.prefix
+ if s.colors.Enabled { fmt.Fprintf(s.out, "\r%s %c", msg, frame) } else { fmt.Fprintf(s.out, "\r%s", msg) }
+}
+
diff --git a/push-validator-manager/internal/ui/tables.go b/push-validator-manager/internal/ui/tables.go
new file mode 100644
index 00000000..eafeff20
--- /dev/null
+++ b/push-validator-manager/internal/ui/tables.go
@@ -0,0 +1,71 @@
+package ui
+
+import (
+ "regexp"
+ "strings"
+)
+
+// Table renders a simple monospaced table with optional colorization using ColorConfig.
+// widths optionally fixes column widths; when 0, width is computed from data (capped at maxWidth per col).
+func Table(c *ColorConfig, headers []string, rows [][]string, widths []int) string {
+ const maxWidth = 80
+ // compute widths
+ w := make([]int, len(headers))
+ for i := range headers {
+ w[i] = len(headers[i])
+ }
+ for _, r := range rows {
+ for i := range r {
+ if i >= len(w) { continue }
+ if l := len(r[i]); l > w[i] {
+ if l > maxWidth { l = maxWidth }
+ w[i] = l
+ }
+ }
+ }
+ if len(widths) == len(w) {
+ for i := range w {
+ if widths[i] > 0 { w[i] = widths[i] }
+ }
+ }
+ // header line
+ var b strings.Builder
+ // top title separator not included; caller can add
+ // headers
+ for i, h := range headers {
+ if i > 0 { b.WriteString(" ") }
+ b.WriteString(padCell(c.Label(h), w[i]))
+ }
+ b.WriteString("\n")
+ // separator
+ sepLen := 0
+ for i := range w { sepLen += w[i]; if i < len(w)-1 { sepLen++ } }
+ b.WriteString(strings.Repeat("-", sepLen))
+ b.WriteString("\n")
+ // rows
+ for _, r := range rows {
+ for i := range w {
+ if i > 0 { b.WriteString(" ") }
+ cell := ""
+ if i < len(r) { cell = r[i] }
+ if len(cell) > maxWidth { cell = cell[:maxWidth-1] + "โฆ" }
+ b.WriteString(padCell(c.Value(cell), w[i]))
+ }
+ b.WriteString("\n")
+ }
+ return b.String()
+}
+
+var ansiRE = regexp.MustCompile(`\x1b\[[0-9;]*m`)
+
+func visibleLen(s string) int {
+ // strip ANSI escapes then measure
+ plain := ansiRE.ReplaceAllString(s, "")
+ return len([]rune(plain))
+}
+
+func padCell(s string, width int) string {
+ v := visibleLen(s)
+ if v >= width { return s }
+ return s + strings.Repeat(" ", width-v)
+}
diff --git a/push-validator-manager/internal/validator/fetcher.go b/push-validator-manager/internal/validator/fetcher.go
new file mode 100644
index 00000000..43fa2216
--- /dev/null
+++ b/push-validator-manager/internal/validator/fetcher.go
@@ -0,0 +1,718 @@
+package validator
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/pushchain/push-chain-node/push-validator-manager/internal/config"
+)
+
+
+
+// rewardsCacheEntry holds cached rewards data with timestamp
+type rewardsCacheEntry struct {
+ commission string
+ outstanding string
+ fetchedAt time.Time
+}
+
+// Fetcher handles validator data fetching with caching
+type Fetcher struct {
+ mu sync.Mutex
+
+ // All validators cache
+ allValidators ValidatorList
+ allValidatorsTime time.Time
+
+ // My validator cache
+ myValidator MyValidatorInfo
+ myValidatorTime time.Time
+
+ // Rewards cache (per validator address)
+ rewardsCache map[string]rewardsCacheEntry
+ rewardsTTL time.Duration
+
+ cacheTTL time.Duration
+}
+
+// NewFetcher creates a new validator fetcher with 30s cache
+func NewFetcher() *Fetcher {
+ return &Fetcher{
+ cacheTTL: 30 * time.Second,
+ rewardsTTL: 30 * time.Second,
+ rewardsCache: make(map[string]rewardsCacheEntry),
+ }
+}
+
+// GetAllValidators fetches all validators with 30s caching
+func (f *Fetcher) GetAllValidators(ctx context.Context, cfg config.Config) (ValidatorList, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ // Force fetch on first call (cache is zero-initialized)
+ if f.allValidatorsTime.IsZero() {
+ list, err := f.fetchAllValidators(ctx, cfg)
+ if err != nil {
+ return ValidatorList{}, err
+ }
+ f.allValidators = list
+ f.allValidatorsTime = time.Now()
+ return list, nil
+ }
+
+ // Return cached if still valid
+ if time.Since(f.allValidatorsTime) < f.cacheTTL && f.allValidators.Total > 0 {
+ return f.allValidators, nil
+ }
+
+ // Fetch fresh data
+ list, err := f.fetchAllValidators(ctx, cfg)
+ if err != nil {
+ // Return stale cache if available
+ if f.allValidators.Total > 0 {
+ return f.allValidators, nil
+ }
+ return ValidatorList{}, err
+ }
+
+ // Update cache
+ f.allValidators = list
+ f.allValidatorsTime = time.Now()
+ return list, nil
+}
+
+// GetMyValidator fetches current node's validator status with 30s caching
+func (f *Fetcher) GetMyValidator(ctx context.Context, cfg config.Config) (MyValidatorInfo, error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ // Force fetch on first call (cache is zero-initialized)
+ if f.myValidatorTime.IsZero() {
+ myVal, err := f.fetchMyValidator(ctx, cfg)
+ if err != nil {
+ // IMPORTANT: Set cache time even on error to prevent infinite retry loops
+ f.myValidatorTime = time.Now()
+ return MyValidatorInfo{IsValidator: false}, err
+ }
+ f.myValidator = myVal
+ f.myValidatorTime = time.Now()
+ return myVal, nil
+ }
+
+ // Return cached if still valid
+ if time.Since(f.myValidatorTime) < f.cacheTTL {
+ return f.myValidator, nil
+ }
+
+ // Fetch fresh data
+ myVal, err := f.fetchMyValidator(ctx, cfg)
+ if err != nil {
+ // Return stale cache if available
+ if f.myValidator.Address != "" || !f.myValidatorTime.IsZero() {
+ return f.myValidator, nil
+ }
+ // Set cache time to retry on next refresh
+ f.myValidatorTime = time.Now()
+ return MyValidatorInfo{IsValidator: false}, err
+ }
+
+ // Update cache
+ f.myValidator = myVal
+ f.myValidatorTime = time.Now()
+ return myVal, nil
+}
+
+// fetchAllValidators queries all validators from the network
+func (f *Fetcher) fetchAllValidators(ctx context.Context, cfg config.Config) (ValidatorList, error) {
+ bin, err := exec.LookPath("pchaind")
+ if err != nil {
+ return ValidatorList{}, fmt.Errorf("pchaind not found: %w", err)
+ }
+
+ remote := fmt.Sprintf("tcp://%s:26657", cfg.GenesisDomain)
+ cmd := exec.CommandContext(ctx, bin, "query", "staking", "validators", "--node", remote, "-o", "json")
+ output, err := cmd.Output()
+ if err != nil {
+ return ValidatorList{}, fmt.Errorf("query validators failed: %w", err)
+ }
+
+ var result struct {
+ Validators []struct {
+ Description struct {
+ Moniker string `json:"moniker"`
+ } `json:"description"`
+ OperatorAddress string `json:"operator_address"`
+ Status string `json:"status"`
+ Tokens string `json:"tokens"`
+ Commission struct {
+ CommissionRates struct {
+ Rate string `json:"rate"`
+ } `json:"commission_rates"`
+ } `json:"commission"`
+ Jailed bool `json:"jailed"`
+ } `json:"validators"`
+ }
+
+ if err := json.Unmarshal(output, &result); err != nil {
+ return ValidatorList{}, fmt.Errorf("parse validators failed: %w", err)
+ }
+
+ validators := make([]ValidatorInfo, 0, len(result.Validators))
+ for _, v := range result.Validators {
+ moniker := v.Description.Moniker
+ if moniker == "" {
+ moniker = "unknown"
+ }
+
+ status := parseStatus(v.Status)
+
+ var votingPower int64
+ if v.Tokens != "" {
+ if tokens, err := strconv.ParseFloat(v.Tokens, 64); err == nil {
+ votingPower = int64(tokens / 1e18)
+ }
+ }
+
+ commission := "0%"
+ if v.Commission.CommissionRates.Rate != "" {
+ if rate, err := strconv.ParseFloat(v.Commission.CommissionRates.Rate, 64); err == nil {
+ commission = fmt.Sprintf("%.0f%%", rate*100)
+ }
+ }
+
+ validators = append(validators, ValidatorInfo{
+ OperatorAddress: v.OperatorAddress,
+ Moniker: moniker,
+ Status: status,
+ Tokens: v.Tokens,
+ VotingPower: votingPower,
+ Commission: commission,
+ Jailed: v.Jailed,
+ })
+ }
+
+ return ValidatorList{
+ Validators: validators,
+ Total: len(validators),
+ }, nil
+}
+
+// fetchMyValidator fetches the current node's validator info by comparing consensus pubkeys
+func (f *Fetcher) fetchMyValidator(ctx context.Context, cfg config.Config) (MyValidatorInfo, error) {
+ bin, err := exec.LookPath("pchaind")
+ if err != nil {
+ return MyValidatorInfo{IsValidator: false}, nil
+ }
+
+ // Get local consensus pubkey using 'tendermint show-validator'
+ showValCmd := exec.CommandContext(ctx, bin, "tendermint", "show-validator", "--home", cfg.HomeDir)
+ pubkeyBytes, err := showValCmd.Output()
+ if err != nil {
+ // No validator key file exists
+ return MyValidatorInfo{IsValidator: false}, nil
+ }
+
+ var localPubkey struct {
+ Type string `json:"@type"`
+ Key string `json:"key"`
+ }
+ if err := json.Unmarshal(pubkeyBytes, &localPubkey); err != nil {
+ return MyValidatorInfo{IsValidator: false}, nil
+ }
+
+ if localPubkey.Key == "" {
+ return MyValidatorInfo{IsValidator: false}, nil
+ }
+
+ // Build the full pubkey JSON string for slashing info query
+ fullPubkeyJSON := string(pubkeyBytes)
+
+ // Get local node moniker from status (for conflict detection)
+ var localMoniker string
+ statusCmd := exec.CommandContext(ctx, bin, "status", "--node", cfg.RPCLocal)
+ if statusOutput, err := statusCmd.Output(); err == nil {
+ var statusData struct {
+ NodeInfo struct {
+ Moniker string `json:"moniker"`
+ } `json:"NodeInfo"`
+ }
+ if json.Unmarshal(statusOutput, &statusData) == nil {
+ localMoniker = statusData.NodeInfo.Moniker
+ }
+ }
+
+ // Fetch all validators to match by consensus pubkey
+ remote := fmt.Sprintf("tcp://%s:26657", cfg.GenesisDomain)
+ queryCmd := exec.CommandContext(ctx, bin, "query", "staking", "validators", "--node", remote, "-o", "json")
+ valsOutput, err := queryCmd.Output()
+ if err != nil {
+ return MyValidatorInfo{IsValidator: false}, err
+ }
+
+ var result struct {
+ Validators []struct {
+ OperatorAddress string `json:"operator_address"`
+ Description struct {
+ Moniker string `json:"moniker"`
+ } `json:"description"`
+ ConsensusPubkey struct {
+ Value string `json:"value"` // The base64 pubkey
+ } `json:"consensus_pubkey"`
+ Status string `json:"status"`
+ Tokens string `json:"tokens"`
+ Commission struct {
+ CommissionRates struct {
+ Rate string `json:"rate"`
+ } `json:"commission_rates"`
+ } `json:"commission"`
+ Jailed bool `json:"jailed"`
+ } `json:"validators"`
+ }
+
+ if err := json.Unmarshal(valsOutput, &result); err != nil {
+ return MyValidatorInfo{IsValidator: false}, err
+ }
+
+ // Calculate total voting power
+ var totalVotingPower int64
+ for _, v := range result.Validators {
+ if v.Tokens != "" {
+ if tokens, err := strconv.ParseFloat(v.Tokens, 64); err == nil {
+ totalVotingPower += int64(tokens / 1e18)
+ }
+ }
+ }
+
+ // Try to find validator by matching consensus pubkey
+ var monikerConflict string
+ for _, v := range result.Validators {
+ // Check for moniker conflicts (different validator, same moniker)
+ if localMoniker != "" && v.Description.Moniker == localMoniker &&
+ !strings.EqualFold(v.ConsensusPubkey.Value, localPubkey.Key) {
+ monikerConflict = localMoniker
+ }
+
+ // Check if this validator matches our consensus pubkey
+ if strings.EqualFold(v.ConsensusPubkey.Value, localPubkey.Key) {
+ // Found our validator!
+ status := parseStatus(v.Status)
+
+ var votingPower int64
+ if v.Tokens != "" {
+ if tokens, err := strconv.ParseFloat(v.Tokens, 64); err == nil {
+ votingPower = int64(tokens / 1e18)
+ }
+ }
+
+ var votingPct float64
+ if totalVotingPower > 0 {
+ votingPct = float64(votingPower) / float64(totalVotingPower)
+ }
+
+ commission := "0%"
+ if v.Commission.CommissionRates.Rate != "" {
+ if rate, err := strconv.ParseFloat(v.Commission.CommissionRates.Rate, 64); err == nil {
+ commission = fmt.Sprintf("%.0f%%", rate*100)
+ }
+ }
+
+ info := MyValidatorInfo{
+ IsValidator: true,
+ Address: v.OperatorAddress,
+ Moniker: v.Description.Moniker,
+ Status: status,
+ VotingPower: votingPower,
+ VotingPct: votingPct,
+ Commission: commission,
+ Jailed: v.Jailed,
+ ValidatorExistsWithSameMoniker: monikerConflict != "",
+ ConflictingMoniker: monikerConflict,
+ }
+
+ // If jailed, fetch slashing info with timeout (3s)
+ if v.Jailed {
+ slashCtx, slashCancel := context.WithTimeout(context.Background(), 3*time.Second)
+ slashingInfo, err := GetSlashingInfo(slashCtx, cfg, fullPubkeyJSON)
+ slashCancel()
+ if err == nil {
+ info.SlashingInfo = slashingInfo
+ }
+ }
+
+ return info, nil
+ }
+ }
+
+ // Not matched by consensus pubkey, check for keyring address match
+ // (validator may have been created with a key in the local keyring)
+ keyringAddrs := getKeyringAddresses(bin, cfg)
+ for _, keyAddr := range keyringAddrs {
+ for _, v := range result.Validators {
+ // Check if validator's operator address matches a key in the keyring
+ // Convert cosmos address to validator operator address for comparison
+ // Both addresses have the same bech32 data, just different prefixes
+ cosmosPrefix := "push1"
+ validatorPrefix := "pushvaloper1"
+
+ // Extract the bech32-encoded part (remove prefix)
+ keyAddrData := strings.TrimPrefix(keyAddr, cosmosPrefix)
+ valAddrData := strings.TrimPrefix(v.OperatorAddress, validatorPrefix)
+
+ if keyAddrData != "" && keyAddrData == valAddrData {
+ // Found validator controlled by a key in our keyring
+ status := parseStatus(v.Status)
+
+ var votingPower int64
+ if v.Tokens != "" {
+ if tokens, err := strconv.ParseFloat(v.Tokens, 64); err == nil {
+ votingPower = int64(tokens / 1e18)
+ }
+ }
+
+ var votingPct float64
+ if totalVotingPower > 0 {
+ votingPct = float64(votingPower) / float64(totalVotingPower)
+ }
+
+ commission := "0%"
+ if v.Commission.CommissionRates.Rate != "" {
+ if rate, err := strconv.ParseFloat(v.Commission.CommissionRates.Rate, 64); err == nil {
+ commission = fmt.Sprintf("%.0f%%", rate*100)
+ }
+ }
+
+ // Return validator info but with IsValidator=false (no consensus pubkey match)
+ // This indicates keyring match but consensus key mismatch
+ return MyValidatorInfo{
+ IsValidator: false,
+ Address: v.OperatorAddress,
+ Moniker: v.Description.Moniker,
+ Status: status,
+ VotingPower: votingPower,
+ VotingPct: votingPct,
+ Commission: commission,
+ Jailed: v.Jailed,
+ ValidatorExistsWithSameMoniker: false,
+ ConflictingMoniker: "",
+ }, nil
+ }
+ }
+ }
+
+ // Not matched by consensus pubkey, check for moniker-based match
+ // (validator may have been created with different key/node)
+ if localMoniker != "" {
+ for _, v := range result.Validators {
+ if v.Description.Moniker == localMoniker {
+ // Found validator by moniker but consensus pubkey doesn't match
+ status := parseStatus(v.Status)
+
+ var votingPower int64
+ if v.Tokens != "" {
+ if tokens, err := strconv.ParseFloat(v.Tokens, 64); err == nil {
+ votingPower = int64(tokens / 1e18)
+ }
+ }
+
+ var votingPct float64
+ if totalVotingPower > 0 {
+ votingPct = float64(votingPower) / float64(totalVotingPower)
+ }
+
+ commission := "0%"
+ if v.Commission.CommissionRates.Rate != "" {
+ if rate, err := strconv.ParseFloat(v.Commission.CommissionRates.Rate, 64); err == nil {
+ commission = fmt.Sprintf("%.0f%%", rate*100)
+ }
+ }
+
+ // Return validator info but with IsValidator=false (no consensus pubkey match)
+ // This indicates moniker match but key/node mismatch
+ return MyValidatorInfo{
+ IsValidator: false,
+ Address: v.OperatorAddress,
+ Moniker: v.Description.Moniker,
+ Status: status,
+ VotingPower: votingPower,
+ VotingPct: votingPct,
+ Commission: commission,
+ Jailed: v.Jailed,
+ ValidatorExistsWithSameMoniker: false,
+ ConflictingMoniker: "",
+ }, nil
+ }
+ }
+ }
+
+ // Not registered as validator, but check for moniker conflicts
+ return MyValidatorInfo{
+ IsValidator: false,
+ ValidatorExistsWithSameMoniker: monikerConflict != "",
+ ConflictingMoniker: monikerConflict,
+ }, nil
+}
+
+// parseStatus converts bond status to human-readable format
+func parseStatus(status string) string {
+ switch status {
+ case "BOND_STATUS_BONDED":
+ return "BONDED"
+ case "BOND_STATUS_UNBONDING":
+ return "UNBONDING"
+ case "BOND_STATUS_UNBONDED":
+ return "UNBONDED"
+ default:
+ return status
+ }
+}
+
+// GetValidatorRewards fetches commission and outstanding rewards for a validator
+func GetValidatorRewards(ctx context.Context, cfg config.Config, validatorAddr string) (commission string, outstanding string, err error) {
+ if validatorAddr == "" {
+ return "โ", "โ", fmt.Errorf("validator address required")
+ }
+
+ bin, err := exec.LookPath("pchaind")
+ if err != nil {
+ return "โ", "โ", fmt.Errorf("pchaind not found: %w", err)
+ }
+
+ // Create child context with 15s timeout per validator to avoid deadline issues
+ // when fetching rewards for multiple validators in parallel
+ // Increased from 5s to handle network latency and slower nodes
+ queryCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ defer cancel()
+
+ remote := fmt.Sprintf("tcp://%s:26657", cfg.GenesisDomain)
+
+ // Fetch commission rewards
+ commissionRewards := "โ"
+ commCmd := exec.CommandContext(queryCtx, bin, "query", "distribution", "commission", validatorAddr, "--node", remote, "-o", "json")
+ if commOutput, err := commCmd.Output(); err == nil {
+ var commResult struct {
+ Commission struct {
+ Commission []string `json:"commission"`
+ } `json:"commission"`
+ }
+ if err := json.Unmarshal(commOutput, &commResult); err == nil && len(commResult.Commission.Commission) > 0 {
+ // Extract numeric part from amount string (format: "123.45upc")
+ amountStr := commResult.Commission.Commission[0]
+ // Remove denom suffix
+ amountStr = strings.TrimSuffix(amountStr, "upc")
+ if amount, err := strconv.ParseFloat(amountStr, 64); err == nil {
+ commissionRewards = fmt.Sprintf("%.2f", amount/1e18)
+ }
+ }
+ }
+
+ // Fetch outstanding rewards with retry logic
+ outstandingRewards := "โ"
+ var outOutput []byte
+ var outErr error
+
+ // Retry up to 2 times with 2s delay on failure
+ maxRetries := 2
+ for attempt := 0; attempt <= maxRetries; attempt++ {
+ outCmd := exec.CommandContext(queryCtx, bin, "query", "distribution", "validator-outstanding-rewards", validatorAddr, "--node", remote, "-o", "json")
+ outOutput, outErr = outCmd.Output()
+
+ if outErr == nil {
+ break // Success, exit retry loop
+ }
+
+ // Wait before retry (except on last attempt)
+ if attempt < maxRetries {
+ time.Sleep(2 * time.Second)
+ }
+ }
+
+ // Parse outstanding rewards if fetch succeeded
+ if outErr == nil {
+ var outResult struct {
+ Rewards struct {
+ Rewards []string `json:"rewards"`
+ } `json:"rewards"`
+ }
+ if err := json.Unmarshal(outOutput, &outResult); err == nil && len(outResult.Rewards.Rewards) > 0 {
+ // Extract numeric part from amount string (format: "123.45upc")
+ amountStr := outResult.Rewards.Rewards[0]
+ // Remove denom suffix
+ amountStr = strings.TrimSuffix(amountStr, "upc")
+ if amount, err := strconv.ParseFloat(amountStr, 64); err == nil {
+ outstandingRewards = fmt.Sprintf("%.2f", amount/1e18)
+ }
+ }
+ }
+
+ return commissionRewards, outstandingRewards, nil
+}
+
+// GetCachedValidatorRewards fetches validator rewards with 30s caching
+func (f *Fetcher) GetCachedValidatorRewards(ctx context.Context, cfg config.Config, validatorAddr string) (commission string, outstanding string, err error) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ // Check cache first
+ if cached, exists := f.rewardsCache[validatorAddr]; exists {
+ if time.Since(cached.fetchedAt) < f.rewardsTTL {
+ return cached.commission, cached.outstanding, nil
+ }
+ }
+
+ // Cache miss or expired - fetch fresh data
+ commission, outstanding, err = GetValidatorRewards(ctx, cfg, validatorAddr)
+ if err == nil {
+ // Store in cache
+ f.rewardsCache[validatorAddr] = rewardsCacheEntry{
+ commission: commission,
+ outstanding: outstanding,
+ fetchedAt: time.Now(),
+ }
+ }
+
+ return commission, outstanding, err
+}
+
+// Global fetcher instance
+var globalFetcher = NewFetcher()
+
+// GetCachedValidatorsList returns cached validator list
+func GetCachedValidatorsList(ctx context.Context, cfg config.Config) (ValidatorList, error) {
+ return globalFetcher.GetAllValidators(ctx, cfg)
+}
+
+// GetCachedMyValidator returns cached my validator info
+func GetCachedMyValidator(ctx context.Context, cfg config.Config) (MyValidatorInfo, error) {
+ return globalFetcher.GetMyValidator(ctx, cfg)
+}
+
+// GetCachedRewards returns validator rewards with 30s caching
+func GetCachedRewards(ctx context.Context, cfg config.Config, validatorAddr string) (commission string, outstanding string, err error) {
+ return globalFetcher.GetCachedValidatorRewards(ctx, cfg, validatorAddr)
+}
+
+// GetEVMAddress converts a Cosmos validator address to EVM address
+func GetEVMAddress(ctx context.Context, validatorAddr string) string {
+ if validatorAddr == "" {
+ return "โ"
+ }
+
+ bin, err := exec.LookPath("pchaind")
+ if err != nil {
+ return "โ"
+ }
+
+ cmd := exec.CommandContext(ctx, bin, "debug", "addr", validatorAddr)
+ output, err := cmd.Output()
+ if err != nil {
+ return "โ"
+ }
+
+ // Parse output to extract hex address
+ lines := strings.Split(string(output), "\n")
+ for _, line := range lines {
+ if strings.HasPrefix(line, "Address (hex):") {
+ parts := strings.SplitN(line, ":", 2)
+ if len(parts) == 2 {
+ hex := strings.TrimSpace(parts[1])
+ return "0x" + hex
+ }
+ }
+ }
+
+ return "โ"
+}
+
+// GetSlashingInfo fetches slashing information for a validator (jail reason, jailed until time, etc)
+func GetSlashingInfo(ctx context.Context, cfg config.Config, consensusPubkey string) (SlashingInfo, error) {
+ bin, err := exec.LookPath("pchaind")
+ if err != nil {
+ return SlashingInfo{}, fmt.Errorf("pchaind not found: %w", err)
+ }
+
+ remote := fmt.Sprintf("tcp://%s:26657", cfg.GenesisDomain)
+
+ // Query signing info to get jail details
+ // consensusPubkey should be a JSON string like: {"@type":"/cosmos.crypto.ed25519.PubKey","key":"..."}
+ cmd := exec.CommandContext(ctx, bin, "query", "slashing", "signing-info", consensusPubkey, "--node", remote, "-o", "json")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return SlashingInfo{}, fmt.Errorf("failed to query slashing info: %w", err)
+ }
+
+ var result struct {
+ ValSigningInfo struct {
+ Address string `json:"address"`
+ StartHeight string `json:"start_height"`
+ JailedUntil string `json:"jailed_until"`
+ Tombstoned bool `json:"tombstoned"`
+ MissedBlocks string `json:"missed_blocks_counter"`
+ } `json:"val_signing_info"`
+ }
+
+ if err := json.Unmarshal(output, &result); err != nil {
+ return SlashingInfo{}, fmt.Errorf("failed to parse slashing info: %w", err)
+ }
+
+ info := SlashingInfo{
+ Tombstoned: result.ValSigningInfo.Tombstoned,
+ JailedUntil: result.ValSigningInfo.JailedUntil,
+ }
+
+ // Parse missed blocks counter
+ if result.ValSigningInfo.MissedBlocks != "" {
+ if mb, err := strconv.ParseInt(result.ValSigningInfo.MissedBlocks, 10, 64); err == nil {
+ info.MissedBlocks = mb
+ }
+ }
+
+ // Determine jail reason with better heuristics
+ if info.Tombstoned {
+ info.JailReason = "Double Sign"
+ } else if info.JailedUntil != "" && info.JailedUntil != "1970-01-01T00:00:00Z" {
+ // Valid jail time (not epoch) indicates downtime
+ info.JailReason = "Downtime"
+ } else if info.MissedBlocks > 0 {
+ // If missed blocks > 0, it's likely a downtime issue
+ info.JailReason = "Downtime"
+ } else {
+ // Unable to determine reason
+ info.JailReason = "Unknown"
+ }
+
+ return info, nil
+}
+
+// getKeyringAddresses returns all addresses in the local keyring
+func getKeyringAddresses(bin string, cfg config.Config) []string {
+ var addresses []string
+
+ // List all keys in the keyring
+ cmd := exec.Command(bin, "keys", "list", "--keyring-backend", cfg.KeyringBackend, "--home", cfg.HomeDir, "-o", "json")
+ output, err := cmd.Output()
+ if err != nil {
+ return addresses
+ }
+
+ // Parse the JSON output to extract addresses
+ var keys []struct {
+ Address string `json:"address"`
+ }
+ if err := json.Unmarshal(output, &keys); err != nil {
+ return addresses
+ }
+
+ for _, key := range keys {
+ if key.Address != "" {
+ addresses = append(addresses, key.Address)
+ }
+ }
+
+ return addresses
+}
diff --git a/push-validator-manager/internal/validator/service.go b/push-validator-manager/internal/validator/service.go
new file mode 100644
index 00000000..53e73aa7
--- /dev/null
+++ b/push-validator-manager/internal/validator/service.go
@@ -0,0 +1,53 @@
+package validator
+
+import "context"
+
+// KeyInfo contains structured information about a created/existing key
+type KeyInfo struct {
+ Address string // Cosmos address (push1...)
+ Name string // Key name
+ Pubkey string // Public key JSON
+ Type string // Key type (local, ledger, etc)
+ Mnemonic string // Recovery mnemonic phrase (only set on creation)
+}
+
+// Service handles key ops, balances, validator detection, and registration flow.
+type Service interface {
+ EnsureKey(ctx context.Context, name string) (KeyInfo, error) // returns key info
+ GetEVMAddress(ctx context.Context, addr string) (string, error) // returns hex/EVM address
+ IsValidator(ctx context.Context, addr string) (bool, error)
+ Balance(ctx context.Context, addr string) (string, error) // denom string for now
+ Register(ctx context.Context, args RegisterArgs) (string, error) // returns tx hash
+ Unjail(ctx context.Context, keyName string) (string, error) // returns tx hash
+ WithdrawRewards(ctx context.Context, validatorAddr string, keyName string, includeCommission bool) (string, error) // returns tx hash
+ Delegate(ctx context.Context, args DelegateArgs) (string, error) // returns tx hash
+}
+
+type RegisterArgs struct {
+ Moniker string
+ CommissionRate string
+ MinSelfDelegation string
+ Amount string
+ KeyName string
+}
+
+type DelegateArgs struct {
+ ValidatorAddress string
+ Amount string
+ KeyName string
+}
+
+// New returns a stub validator service.
+func New() Service { return &noop{} }
+
+type noop struct{}
+
+func (n *noop) EnsureKey(ctx context.Context, name string) (KeyInfo, error) { return KeyInfo{}, nil }
+func (n *noop) GetEVMAddress(ctx context.Context, addr string) (string, error) { return "", nil }
+func (n *noop) IsValidator(ctx context.Context, addr string) (bool, error) { return false, nil }
+func (n *noop) Balance(ctx context.Context, addr string) (string, error) { return "0", nil }
+func (n *noop) Register(ctx context.Context, args RegisterArgs) (string, error) { return "", nil }
+func (n *noop) Unjail(ctx context.Context, keyName string) (string, error) { return "", nil }
+func (n *noop) WithdrawRewards(ctx context.Context, validatorAddr string, keyName string, includeCommission bool) (string, error) { return "", nil }
+func (n *noop) Delegate(ctx context.Context, args DelegateArgs) (string, error) { return "", nil }
+
diff --git a/push-validator-manager/internal/validator/service_impl.go b/push-validator-manager/internal/validator/service_impl.go
new file mode 100644
index 00000000..a4b12ae7
--- /dev/null
+++ b/push-validator-manager/internal/validator/service_impl.go
@@ -0,0 +1,420 @@
+package validator
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+)
+
+type Options struct {
+ BinPath string
+ HomeDir string
+ ChainID string
+ Keyring string
+ GenesisDomain string // e.g., rpc-testnet-donut-node1.push.org
+ Denom string // e.g., upc
+}
+
+func NewWith(opts Options) Service { return &svc{opts: opts} }
+
+type svc struct { opts Options }
+
+func (s *svc) EnsureKey(ctx context.Context, name string) (KeyInfo, error) {
+ if name == "" { return KeyInfo{}, errors.New("key name required") }
+ if s.opts.BinPath == "" { s.opts.BinPath = "pchaind" }
+
+ // Check if key already exists
+ show := exec.CommandContext(ctx, s.opts.BinPath, "keys", "show", name, "-a", "--keyring-backend", s.opts.Keyring, "--home", s.opts.HomeDir)
+ out, err := show.Output()
+ if err == nil {
+ // Key exists - fetch details
+ return s.getKeyInfo(ctx, name, strings.TrimSpace(string(out)), "")
+ }
+
+ // Key doesn't exist - create it and capture output
+ add := exec.CommandContext(ctx, s.opts.BinPath, "keys", "add", name, "--keyring-backend", s.opts.Keyring, "--algo", "eth_secp256k1", "--home", s.opts.HomeDir)
+
+ // Capture output to parse mnemonic
+ output, err := add.CombinedOutput()
+ if err != nil {
+ return KeyInfo{}, fmt.Errorf("keys add: %w", err)
+ }
+
+ // Parse the output to extract mnemonic
+ mnemonic := extractMnemonic(string(output))
+
+ // Get the address
+ out2, err := exec.CommandContext(ctx, s.opts.BinPath, "keys", "show", name, "-a", "--keyring-backend", s.opts.Keyring, "--home", s.opts.HomeDir).Output()
+ if err != nil { return KeyInfo{}, fmt.Errorf("keys show: %w", err) }
+
+ addr := strings.TrimSpace(string(out2))
+ return s.getKeyInfo(ctx, name, addr, mnemonic)
+}
+
+// getKeyInfo fetches full key details
+func (s *svc) getKeyInfo(ctx context.Context, name, addr, mnemonic string) (KeyInfo, error) {
+ // Get key details in JSON format
+ cmd := exec.CommandContext(ctx, s.opts.BinPath, "keys", "show", name, "--keyring-backend", s.opts.Keyring, "--home", s.opts.HomeDir, "-o", "json")
+ out, err := cmd.Output()
+ if err != nil {
+ return KeyInfo{Address: addr, Name: name, Mnemonic: mnemonic}, nil
+ }
+
+ // Parse JSON to extract pubkey and type
+ var keyData struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Address string `json:"address"`
+ Pubkey struct {
+ Type string `json:"@type"`
+ Key string `json:"key"`
+ } `json:"pubkey"`
+ }
+
+ if err := json.Unmarshal(out, &keyData); err != nil {
+ return KeyInfo{Address: addr, Name: name, Mnemonic: mnemonic}, nil
+ }
+
+ pubkeyJSON := fmt.Sprintf(`{"@type":"%s","key":"%s"}`, keyData.Pubkey.Type, keyData.Pubkey.Key)
+
+ return KeyInfo{
+ Address: addr,
+ Name: keyData.Name,
+ Pubkey: pubkeyJSON,
+ Type: keyData.Type,
+ Mnemonic: mnemonic,
+ }, nil
+}
+
+// extractMnemonic extracts the mnemonic phrase from keys add output
+func extractMnemonic(output string) string {
+ lines := strings.Split(output, "\n")
+ foundWarning := false
+
+ // The mnemonic appears after the warning message, skip the warning line itself,
+ // then skip empty lines, and the next non-empty line is the mnemonic
+ for i, line := range lines {
+ line = strings.TrimSpace(line)
+
+ // Look for the warning message
+ if strings.Contains(line, "write this mnemonic phrase") {
+ foundWarning = true
+ continue
+ }
+
+ // After finding the warning, skip empty lines and capture the next non-empty line
+ if foundWarning {
+ if line == "" {
+ continue
+ }
+ // This is the mnemonic line (first non-empty line after the warning)
+ // Make sure it's not another message line by checking if it starts with common message prefixes
+ if !strings.HasPrefix(line, "**") && !strings.HasPrefix(line, "It is") && len(line) > 20 {
+ return line
+ }
+ // If we hit "It is the only way..." or similar, look for the next line
+ if i+1 < len(lines) {
+ nextLine := strings.TrimSpace(lines[i+1])
+ if nextLine != "" && len(nextLine) > 20 {
+ return nextLine
+ }
+ }
+ }
+ }
+
+ return ""
+}
+
+func (s *svc) GetEVMAddress(ctx context.Context, addr string) (string, error) {
+ if addr == "" { return "", errors.New("address required") }
+ if s.opts.BinPath == "" { s.opts.BinPath = "pchaind" }
+ cmd := exec.CommandContext(ctx, s.opts.BinPath, "debug", "addr", addr)
+ out, err := cmd.Output()
+ if err != nil { return "", fmt.Errorf("debug addr: %w", err) }
+ // Parse output to extract hex address
+ lines := strings.Split(string(out), "\n")
+ for _, line := range lines {
+ if strings.HasPrefix(line, "Address (hex):") {
+ parts := strings.SplitN(line, ":", 2)
+ if len(parts) == 2 {
+ hex := strings.TrimSpace(parts[1])
+ return "0x" + hex, nil
+ }
+ }
+ }
+ return "", errors.New("could not extract EVM address from debug output")
+}
+
+func (s *svc) IsValidator(ctx context.Context, addr string) (bool, error) {
+ if s.opts.BinPath == "" { s.opts.BinPath = "pchaind" }
+ // Compare local consensus pubkey with remote validators
+ showVal := exec.CommandContext(ctx, s.opts.BinPath, "tendermint", "show-validator", "--home", s.opts.HomeDir)
+ b, err := showVal.Output()
+ if err != nil { return false, fmt.Errorf("show-validator: %w", err) }
+ var pub struct{ Key string `json:"key"` }
+ if err := json.Unmarshal(b, &pub); err != nil { return false, err }
+ if pub.Key == "" { return false, errors.New("empty consensus pubkey") }
+ // Query validators from remote
+ remote := fmt.Sprintf("tcp://%s:26657", s.opts.GenesisDomain)
+ q := exec.CommandContext(ctx, s.opts.BinPath, "query", "staking", "validators", "--node", remote, "-o", "json")
+ vb, err := q.Output()
+ if err != nil { return false, fmt.Errorf("query validators: %w", err) }
+ // Remote uses "value" field, not "key"
+ var payload struct{ Validators []struct{ ConsensusPubkey struct{ Value string `json:"value"` } `json:"consensus_pubkey"` } `json:"validators"` }
+ if err := json.Unmarshal(vb, &payload); err != nil { return false, err }
+ for _, v := range payload.Validators {
+ if strings.EqualFold(v.ConsensusPubkey.Value, pub.Key) { return true, nil }
+ }
+ return false, nil
+}
+
+func (s *svc) Balance(ctx context.Context, addr string) (string, error) {
+ if s.opts.BinPath == "" { s.opts.BinPath = "pchaind" }
+ // Always query remote genesis node for canonical state during validator registration
+ remote := fmt.Sprintf("tcp://%s:26657", s.opts.GenesisDomain)
+ q := exec.CommandContext(ctx, s.opts.BinPath, "query", "bank", "balances", addr, "--node", remote, "-o", "json")
+ out, err := q.Output()
+ if err != nil { return "0", fmt.Errorf("query balance: %w", err) }
+ var payload struct{ Balances []struct{ Denom, Amount string } `json:"balances"` }
+ if err := json.Unmarshal(out, &payload); err != nil { return "0", err }
+ for _, c := range payload.Balances { if c.Denom == s.opts.Denom { return c.Amount, nil } }
+ return "0", nil
+}
+
+func (s *svc) Register(ctx context.Context, args RegisterArgs) (string, error) {
+ if s.opts.BinPath == "" { s.opts.BinPath = "pchaind" }
+ // Prepare validator JSON - use a separate timeout for this command
+ showCtx, showCancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer showCancel()
+ pubJSON, err := exec.CommandContext(showCtx, s.opts.BinPath, "tendermint", "show-validator", "--home", s.opts.HomeDir).Output()
+ if err != nil { return "", fmt.Errorf("show-validator: %w", err) }
+ tmp, err := os.CreateTemp("", "validator-*.json")
+ if err != nil { return "", err }
+ defer os.Remove(tmp.Name())
+ val := map[string]any{
+ "pubkey": json.RawMessage(strings.TrimSpace(string(pubJSON))),
+ "amount": fmt.Sprintf("%s%s", args.Amount, s.opts.Denom),
+ "moniker": args.Moniker,
+ "identity": "",
+ "website": "",
+ "security": "",
+ "details": "Push Chain Validator",
+ "commission-rate": valueOr(args.CommissionRate, "0.10"),
+ "commission-max-rate": "0.20",
+ "commission-max-change-rate": "0.01",
+ "min-self-delegation": valueOr(args.MinSelfDelegation, "1"),
+ }
+ enc := json.NewEncoder(tmp)
+ enc.SetEscapeHTML(false)
+ if err := enc.Encode(val); err != nil { return "", err }
+ _ = tmp.Close()
+
+ // Submit TX
+ remote := fmt.Sprintf("tcp://%s:26657", s.opts.GenesisDomain)
+ ctxTimeout, cancel := context.WithTimeout(ctx, 60*time.Second)
+ defer cancel()
+ cmd := exec.CommandContext(ctxTimeout, s.opts.BinPath, "tx", "staking", "create-validator", tmp.Name(),
+ "--from", args.KeyName,
+ "--chain-id", s.opts.ChainID,
+ "--keyring-backend", s.opts.Keyring,
+ "--home", s.opts.HomeDir,
+ "--node", remote,
+ "--gas=auto", "--gas-adjustment=1.3", fmt.Sprintf("--gas-prices=1000000000%s", s.opts.Denom),
+ "--yes",
+ )
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ // Try to extract a clean reason
+ msg := extractErrorLine(string(out))
+ if msg == "" { msg = err.Error() }
+ return "", errors.New(msg)
+ }
+ // Find txhash:
+ lines := strings.Split(string(out), "\n")
+ for _, ln := range lines {
+ if strings.Contains(ln, "txhash:") {
+ parts := strings.SplitN(ln, "txhash:", 2)
+ if len(parts) == 2 { return strings.TrimSpace(parts[1]), nil }
+ }
+ }
+ return "", errors.New("transaction submitted; txhash not found in output")
+}
+
+func extractErrorLine(s string) string {
+ for _, l := range strings.Split(s, "\n") {
+ if strings.Contains(l, "rpc error:") || strings.Contains(l, "failed to execute message") || strings.Contains(l, "insufficient") || strings.Contains(l, "unauthorized") {
+ return l
+ }
+ }
+ return ""
+}
+
+func valueOr(v, d string) string { if strings.TrimSpace(v) == "" { return d }; return v }
+
+// Unjail submits an unjail transaction to restore a jailed validator
+func (s *svc) Unjail(ctx context.Context, keyName string) (string, error) {
+ if s.opts.BinPath == "" { s.opts.BinPath = "pchaind" }
+ if keyName == "" { return "", errors.New("key name required") }
+
+ // Submit unjail transaction
+ remote := fmt.Sprintf("tcp://%s:26657", s.opts.GenesisDomain)
+ ctxTimeout, cancel := context.WithTimeout(ctx, 60*time.Second)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctxTimeout, s.opts.BinPath, "tx", "slashing", "unjail",
+ "--from", keyName,
+ "--chain-id", s.opts.ChainID,
+ "--keyring-backend", s.opts.Keyring,
+ "--home", s.opts.HomeDir,
+ "--node", remote,
+ "--gas=auto", "--gas-adjustment=1.3", fmt.Sprintf("--gas-prices=1000000000%s", s.opts.Denom),
+ "--yes",
+ )
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ // Try to extract a clean reason
+ msg := extractErrorLine(string(out))
+ if msg == "" { msg = err.Error() }
+ return "", errors.New(msg)
+ }
+
+ // Find txhash
+ lines := strings.Split(string(out), "\n")
+ for _, ln := range lines {
+ if strings.Contains(ln, "txhash:") {
+ parts := strings.SplitN(ln, "txhash:", 2)
+ if len(parts) == 2 { return strings.TrimSpace(parts[1]), nil }
+ }
+ }
+ return "", errors.New("transaction submitted; txhash not found in output")
+}
+
+// WithdrawRewards submits a transaction to withdraw delegation rewards and optionally commission
+func (s *svc) WithdrawRewards(ctx context.Context, validatorAddr string, keyName string, includeCommission bool) (string, error) {
+ if s.opts.BinPath == "" { s.opts.BinPath = "pchaind" }
+ if validatorAddr == "" { return "", errors.New("validator address required") }
+ if keyName == "" { return "", errors.New("key name required") }
+
+ remote := fmt.Sprintf("tcp://%s:26657", s.opts.GenesisDomain)
+
+ // Build the withdraw rewards command using validator address directly
+ args := []string{
+ "tx", "distribution", "withdraw-rewards", validatorAddr,
+ "--from", keyName,
+ "--chain-id", s.opts.ChainID,
+ "--keyring-backend", s.opts.Keyring,
+ "--home", s.opts.HomeDir,
+ "--node", remote,
+ "--gas=auto", "--gas-adjustment=1.3", fmt.Sprintf("--gas-prices=1000000000%s", s.opts.Denom),
+ "--yes",
+ }
+
+ // Add commission flag if requested
+ if includeCommission {
+ args = append(args, "--commission")
+ }
+
+ // Submit transaction
+ ctxTimeout, cancel := context.WithTimeout(ctx, 60*time.Second)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctxTimeout, s.opts.BinPath, args...)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ // Extract and enhance error message
+ msg := extractErrorLine(string(out))
+ if msg == "" { msg = err.Error() }
+
+ // Improve error messages for common cases
+ msg = improveRewardErrorMessage(msg)
+ return "", errors.New(msg)
+ }
+
+ // Find txhash
+ lines := strings.Split(string(out), "\n")
+ for _, ln := range lines {
+ if strings.Contains(ln, "txhash:") {
+ parts := strings.SplitN(ln, "txhash:", 2)
+ if len(parts) == 2 { return strings.TrimSpace(parts[1]), nil }
+ }
+ }
+ return "", errors.New("transaction submitted; txhash not found in output")
+}
+
+// improveRewardErrorMessage provides user-friendly error messages for common withdrawal failures
+func improveRewardErrorMessage(msg string) string {
+ msg = strings.ToLower(msg)
+
+ if strings.Contains(msg, "no delegation distribution info") {
+ return "No rewards to withdraw. This is normal for new validators that haven't earned any rewards yet."
+ }
+ if strings.Contains(msg, "insufficient") && strings.Contains(msg, "fee") {
+ return "Insufficient balance to pay transaction fees. Check your account balance."
+ }
+ if strings.Contains(msg, "invalid coins") || strings.Contains(msg, "empty") {
+ return "No rewards available to withdraw."
+ }
+ if strings.Contains(msg, "unauthorized") {
+ return "Transaction signing failed. Check that the key exists and is accessible."
+ }
+
+ return msg
+}
+
+// Delegate performs delegation (staking more tokens) to a validator
+func (s *svc) Delegate(ctx context.Context, args DelegateArgs) (string, error) {
+ if s.opts.BinPath == "" {
+ s.opts.BinPath = "pchaind"
+ }
+ if args.ValidatorAddress == "" {
+ return "", errors.New("validator address required")
+ }
+ if args.Amount == "" {
+ return "", errors.New("amount required")
+ }
+
+ // Submit delegation transaction
+ remote := fmt.Sprintf("tcp://%s:26657", s.opts.GenesisDomain)
+ ctxTimeout, cancel := context.WithTimeout(ctx, 60*time.Second)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctxTimeout, s.opts.BinPath, "tx", "staking", "delegate",
+ args.ValidatorAddress,
+ fmt.Sprintf("%s%s", args.Amount, s.opts.Denom),
+ "--from", args.KeyName,
+ "--chain-id", s.opts.ChainID,
+ "--keyring-backend", s.opts.Keyring,
+ "--home", s.opts.HomeDir,
+ "--node", remote,
+ "--gas=auto", "--gas-adjustment=1.3", fmt.Sprintf("--gas-prices=1000000000%s", s.opts.Denom),
+ "--yes",
+ )
+
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ // Try to extract a clean error message
+ msg := extractErrorLine(string(out))
+ if msg == "" {
+ msg = err.Error()
+ }
+ return "", errors.New(msg)
+ }
+
+ // Extract tx hash from output
+ lines := strings.Split(string(out), "\n")
+ for _, line := range lines {
+ if strings.Contains(line, "txhash:") {
+ parts := strings.SplitN(line, "txhash:", 2)
+ if len(parts) > 1 {
+ return strings.TrimSpace(parts[1]), nil
+ }
+ }
+ }
+
+ return "", errors.New("delegation successful but transaction hash not found in output")
+}
diff --git a/push-validator-manager/internal/validator/service_impl_test.go b/push-validator-manager/internal/validator/service_impl_test.go
new file mode 100644
index 00000000..27a66e90
--- /dev/null
+++ b/push-validator-manager/internal/validator/service_impl_test.go
@@ -0,0 +1,67 @@
+package validator
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+// Creates a fake pchaind executable that responds to the minimal subset of commands
+// used by the validator service.
+func makeFakePchaind(t *testing.T) string {
+ dir := t.TempDir()
+ bin := filepath.Join(dir, "pchaind")
+ script := "#!/usr/bin/env sh\n" +
+ "cmd=\"$1\"; shift\n" +
+ "if [ \"$cmd\" = \"tendermint\" ]; then sub=\"$1\"; shift; if [ \"$sub\" = \"show-validator\" ]; then echo '{\"type\":\"tendermint/PubKeyEd25519\",\"key\":\"PUBKEYBASE64\"}'; exit 0; fi; fi\n" +
+ "if [ \"$cmd\" = \"keys\" ]; then sub=\"$1\"; shift\n" +
+ " if [ \"$sub\" = \"show\" ]; then\n" +
+ " if [ \"$1\" = \"-o\" ] && [ \"$2\" = \"json\" ]; then\n" +
+ " echo '{\"name\":\"test-key\",\"type\":\"local\",\"address\":\"push1addrxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\",\"pubkey\":{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"AAAA\"}}'\n" +
+ " else\n" +
+ " echo 'push1addrxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'\n" +
+ " fi\n" +
+ " exit 0\n" +
+ " fi\n" +
+ " if [ \"$sub\" = \"add\" ]; then exit 0; fi\n" +
+ "fi\n" +
+ "if [ \"$cmd\" = \"query\" ]; then mod=\"$1\"; shift; if [ \"$mod\" = \"bank\" ]; then echo '{\"balances\":[{\"denom\":\"upc\",\"amount\":\"999\"}]}' ; exit 0; fi; if [ \"$mod\" = \"staking\" ]; then echo '{\"validators\":[]}' ; exit 0; fi; fi\n" +
+ "if [ \"$cmd\" = \"tx\" ]; then mod=\"$1\"; shift; if [ \"$mod\" = \"staking\" ]; then echo 'txhash: 0xABCD'; exit 0; fi; fi\n" +
+ "echo 'unknown'; exit 1\n"
+ if err := os.WriteFile(bin, []byte(script), 0o755); err != nil { t.Fatal(err) }
+ if runtime.GOOS == "windows" { t.Skip("windows not supported in this test") }
+ return bin
+}
+
+func TestValidator_RegisterHappyPath(t *testing.T) {
+ bin := makeFakePchaind(t)
+ home := t.TempDir()
+ s := NewWith(Options{
+ BinPath: bin,
+ HomeDir: home,
+ ChainID: "push_42101-1",
+ Keyring: "test",
+ GenesisDomain: "rpc-testnet-donut-node1.push.org",
+ Denom: "upc",
+ })
+ ctx := context.Background()
+ // EnsureKey should return the fake key info
+ keyInfo, err := s.EnsureKey(ctx, "validator-key")
+ if err != nil { t.Fatal(err) }
+ if keyInfo.Address == "" { t.Fatal("empty address") }
+ // IsValidator should be false (no validators in fake output)
+ ok, err := s.IsValidator(ctx, keyInfo.Address)
+ if err != nil { t.Fatal(err) }
+ if ok { t.Fatal("expected not a validator") }
+ // Balance should parse
+ bal, err := s.Balance(ctx, keyInfo.Address)
+ if err != nil { t.Fatal(err) }
+ if bal != "999" { t.Fatalf("balance got %s", bal) }
+ // Register should return txhash
+ tx, err := s.Register(ctx, RegisterArgs{Moniker: "m", Amount: "1500000000000000000", KeyName: "validator-key"})
+ if err != nil { t.Fatal(err) }
+ if tx == "" { t.Fatal("empty txhash") }
+}
+
diff --git a/push-validator-manager/internal/validator/types.go b/push-validator-manager/internal/validator/types.go
new file mode 100644
index 00000000..ac46a2a4
--- /dev/null
+++ b/push-validator-manager/internal/validator/types.go
@@ -0,0 +1,41 @@
+package validator
+
+// ValidatorInfo contains information about a single validator
+type ValidatorInfo struct {
+ OperatorAddress string
+ Moniker string
+ Status string // BONDED, UNBONDING, UNBONDED
+ Tokens string // Raw token amount
+ VotingPower int64 // Tokens converted to power
+ Commission string // Commission rate as percentage
+ Jailed bool
+}
+
+// ValidatorList contains a list of validators
+type ValidatorList struct {
+ Validators []ValidatorInfo
+ Total int
+}
+
+// SlashingInfo contains slashing-related information for a validator
+type SlashingInfo struct {
+ Tombstoned bool
+ JailedUntil string // RFC3339 formatted timestamp
+ MissedBlocks int64
+ JailReason string // "Downtime", "Double Sign", or "Unknown"
+}
+
+// MyValidatorInfo contains status of the current node's validator
+type MyValidatorInfo struct {
+ IsValidator bool
+ Address string
+ Moniker string
+ Status string
+ VotingPower int64
+ VotingPct float64 // Percentage of total voting power [0,1]
+ Commission string
+ Jailed bool
+ SlashingInfo SlashingInfo // Jail reason and details
+ ValidatorExistsWithSameMoniker bool // True if a different validator uses this node's moniker
+ ConflictingMoniker string // The moniker that conflicts
+}
diff --git a/push-validator-manager/push-validator b/push-validator-manager/push-validator
new file mode 100755
index 00000000..892da378
Binary files /dev/null and b/push-validator-manager/push-validator differ
diff --git a/push-validator-manager/rebuild.sh b/push-validator-manager/rebuild.sh
new file mode 100755
index 00000000..216ca047
--- /dev/null
+++ b/push-validator-manager/rebuild.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+# Simple rebuild script for push-validator
+
+set -e
+
+echo "Building push-validator..."
+CGO_ENABLED=0 go build -a -o build/push-validator ./cmd/push-validator
+
+echo "โ Built: build/push-validator"
+
+# Automatically create wrapper script in system location
+# This works around a macOS PATH-execution issue where the binary is killed with SIGKILL
+mkdir -p ~/.local/bin
+
+# Get the absolute path to the build directory
+BUILD_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/build"
+
+cat > ~/.local/bin/push-validator << EOF
+#!/bin/bash
+# Wrapper script for push-validator
+# Works around macOS PATH-execution issue (SIGKILL when running binary directly from PATH)
+exec "$BUILD_DIR/push-validator" "\$@"
+EOF
+
+chmod +x ~/.local/bin/push-validator
+echo "โ Created wrapper script at ~/.local/bin/push-validator"
+echo ""
+echo "You can now run: push-validator dashboard"
diff --git a/push-validator-manager/scripts/build-pchaind.sh b/push-validator-manager/scripts/build-pchaind.sh
new file mode 100755
index 00000000..7fa46715
--- /dev/null
+++ b/push-validator-manager/scripts/build-pchaind.sh
@@ -0,0 +1,120 @@
+#!/bin/bash
+# Build pchaind binary with correct version for Push Chain compatibility
+
+set -e
+
+GREEN='\033[0;32m'
+BLUE='\033[0;34m'
+RED='\033[0;31m'
+YELLOW='\033[0;33m'
+NC='\033[0m'
+
+print_status() { echo -e " ${BLUE}$1${NC}"; }
+print_success() { echo -e " ${GREEN}$1${NC}"; }
+print_error() { echo -e "${RED}$1${NC}"; }
+print_warning() { echo -e "${YELLOW}$1${NC}"; }
+
+# Get script directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+# Parse arguments
+SOURCE_DIR="${1:-}"
+OUTPUT_DIR="${2:-$SCRIPT_DIR/build}"
+
+if [ -z "$SOURCE_DIR" ]; then
+ # Try to find the source directory
+ # First check if we're in a local dev environment
+ if [ -d "$SCRIPT_DIR/../../go.mod" ]; then
+ SOURCE_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
+ elif [ -d "$HOME/.local/share/push-validator/repo" ]; then
+ SOURCE_DIR="$HOME/.local/share/push-validator/repo"
+ else
+ print_error "โ Usage: $0 [output-dir]"
+ print_error " source-dir: Path to push-chain source code"
+ print_error " output-dir: Where to place built binary (default: ./build)"
+ exit 1
+ fi
+fi
+
+if [ ! -f "$SOURCE_DIR/go.mod" ]; then
+ print_error "โ Invalid source directory: $SOURCE_DIR"
+ print_error " Expected to find go.mod in the directory"
+ exit 1
+fi
+
+# Validate Go version (requires 1.23+ for pchaind build)
+GO_VERSION=$(go version | awk '{print $3}' | sed 's/go//')
+GO_MAJOR=$(echo "$GO_VERSION" | cut -d. -f1)
+GO_MINOR=$(echo "$GO_VERSION" | cut -d. -f2)
+
+if [[ "$GO_MAJOR" -lt 1 ]] || [[ "$GO_MAJOR" -eq 1 && "$GO_MINOR" -lt 23 ]]; then
+ print_error "โ Go 1.23 or higher is required (found: $GO_VERSION)"
+ echo
+ echo "The Push Node Daemon (pchaind) requires Go 1.23+ to build."
+ echo
+ echo "Please upgrade Go:"
+ if [[ "$OSTYPE" == "darwin"* ]]; then
+ echo " โข Using Homebrew: brew upgrade go"
+ echo " โข Or download from: https://go.dev/dl/"
+ else
+ echo " โข Download from: https://go.dev/dl/"
+ echo " โข Or use your package manager to upgrade"
+ fi
+ exit 1
+fi
+print_success "โ Go version check passed: $GO_VERSION"
+
+# Create output directory
+mkdir -p "$OUTPUT_DIR"
+
+# Change to source directory
+cd "$SOURCE_DIR"
+
+# Detect OS for sed command
+OS="linux"
+if [[ "$OSTYPE" == "darwin"* ]]; then
+ OS="macos"
+fi
+
+# Patch chain ID in app/app.go if needed
+APP_FILE="app/app.go"
+OLD_CHAIN_ID="localchain_9000-1"
+NEW_CHAIN_ID="push_42101-1"
+
+if [ -f "$APP_FILE" ]; then
+ if grep -q "$OLD_CHAIN_ID" "$APP_FILE"; then
+ print_status "๐ Patching chain ID from $OLD_CHAIN_ID to $NEW_CHAIN_ID"
+ if [[ "$OS" == "macos" ]]; then
+ sed -i '' "s/\"$OLD_CHAIN_ID\"/\"$NEW_CHAIN_ID\"/" "$APP_FILE"
+ else
+ sed -i "s/\"$OLD_CHAIN_ID\"/\"$NEW_CHAIN_ID\"/" "$APP_FILE"
+ fi
+ fi
+fi
+
+# Detect version from git tags (can be overridden via VERSION env var)
+VERSION=${VERSION:-$(git describe --tags --always --dirty 2>/dev/null || echo "v1.0.1")}
+COMMIT=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
+BUILD_DATE=$(date -u '+%Y-%m-%dT%H:%M:%SZ')
+
+# Build with the exact same flags as the bash version
+go build -mod=readonly -tags "netgo,ledger" \
+ -ldflags "-X github.com/cosmos/cosmos-sdk/version.Name=pchain \
+ -X github.com/cosmos/cosmos-sdk/version.AppName=pchaind \
+ -X github.com/cosmos/cosmos-sdk/version.Version=$VERSION-native \
+ -s -w" \
+ -trimpath -o "$OUTPUT_DIR/pchaind" ./cmd/pchaind
+
+# Verify binary was created
+if [ ! -f "$OUTPUT_DIR/pchaind" ]; then
+ print_error "โ Binary creation failed"
+ exit 1
+fi
+
+# Make executable
+chmod +x "$OUTPUT_DIR/pchaind"
+
+# Test basic functionality
+if ! "$OUTPUT_DIR/pchaind" version >/dev/null 2>&1; then
+ print_warning "โ ๏ธ Binary created but may have issues"
+fi
\ No newline at end of file
diff --git a/push-validator-manager/scripts/setup-log-rotation.sh b/push-validator-manager/scripts/setup-log-rotation.sh
new file mode 100755
index 00000000..04bd56d5
--- /dev/null
+++ b/push-validator-manager/scripts/setup-log-rotation.sh
@@ -0,0 +1,214 @@
+#!/bin/bash
+
+###############################################
+# Push Chain Log Rotation Setup Script
+# Native Push Validator Manager Edition
+#
+# - Rotates logs under ~/.pchain/logs/
+# - Uses logrotate (daily, compress, 14-day retention)
+# - Target: ~/.pchain/logs/*.log
+# - Adapted for native setup paths
+###############################################
+
+set -euo pipefail
+
+# Colors for output - Standardized palette
+GREEN='\033[0;32m' # Success messages
+RED='\033[0;31m' # Error messages
+YELLOW='\033[0;33m' # Warning messages
+CYAN='\033[0;36m' # Status/info messages
+BLUE='\033[1;94m' # Headers/titles (bright blue)
+NC='\033[0m' # No color/reset
+BOLD='\033[1m' # Emphasis
+
+# Print functions - Unified across all scripts
+print_status() { echo -e "${CYAN}$1${NC}"; }
+print_header() { echo -e "${BLUE}$1${NC}"; }
+print_success() { echo -e "${GREEN}$1${NC}"; }
+print_error() { echo -e "${RED}$1${NC}"; }
+print_warning() { echo -e "${YELLOW}$1${NC}"; }
+
+# Configuration
+LOG_DIR="$HOME/.pchain/logs"
+LOGROTATE_CONF="/etc/logrotate.d/push-chain-$USER"
+
+print_header "๐๏ธ Setting up log rotation for Push Chain"
+echo
+
+# Detect operating system
+OS=$(uname -s)
+case "$OS" in
+ Linux*)
+ MACHINE="Linux"
+ ;;
+ Darwin*)
+ MACHINE="macOS"
+ ;;
+ *)
+ MACHINE="Unknown"
+ ;;
+esac
+
+print_status "๐ฅ๏ธ Detected OS: $MACHINE"
+
+# Handle macOS differently
+if [ "$MACHINE" = "macOS" ]; then
+ print_status "๐ macOS detected - using native log management"
+ print_status "โน๏ธ macOS automatically manages log rotation via ASL/Unified Logging"
+ print_status "๐ Your logs are in: $LOG_DIR"
+ print_status "๐ View logs with: ./push-validator logs"
+ echo
+ print_success "โ
Log management configured for macOS"
+ print_status "๐ก Manual cleanup command if needed:"
+ print_status " find $LOG_DIR -name '*.log' -mtime +14 -delete"
+ exit 0
+fi
+
+# Linux-specific setup
+print_status "๐ง Linux detected - setting up logrotate"
+
+# Check if we're running as root or have sudo
+if [ "$EUID" -eq 0 ]; then
+ SUDO=""
+ print_warning "โ ๏ธ Running as root - this is not recommended for normal operation"
+else
+ if ! command -v sudo >/dev/null 2>&1; then
+ print_error "โ This script requires sudo privileges to configure system log rotation"
+ exit 1
+ fi
+ SUDO="sudo"
+fi
+
+# Check if log directory exists
+if [ ! -d "$LOG_DIR" ]; then
+ print_status "๐ Creating log directory: $LOG_DIR"
+ mkdir -p "$LOG_DIR"
+fi
+
+# Install logrotate if missing
+print_status "๐ Checking for logrotate..."
+if ! command -v logrotate >/dev/null 2>&1; then
+ print_status "๐ฆ Installing logrotate..."
+
+ # Detect package manager
+ if command -v apt-get >/dev/null 2>&1; then
+ $SUDO apt-get update && $SUDO apt-get install -y logrotate
+ elif command -v yum >/dev/null 2>&1; then
+ $SUDO yum install -y logrotate
+ elif command -v dnf >/dev/null 2>&1; then
+ $SUDO dnf install -y logrotate
+ elif command -v pacman >/dev/null 2>&1; then
+ $SUDO pacman -S --noconfirm logrotate
+ else
+ print_error "โ Could not detect package manager. Please install logrotate manually."
+ exit 1
+ fi
+
+ print_success "โ
Logrotate installed"
+else
+ print_success "โ
Logrotate is available"
+fi
+
+echo
+
+# Create logrotate configuration
+print_status "๐ ๏ธ Creating logrotate configuration at $LOGROTATE_CONF..."
+
+# Get the current user for proper permissions
+CURRENT_USER=$(whoami)
+CURRENT_GROUP=$(id -gn)
+
+$SUDO tee "$LOGROTATE_CONF" > /dev/null </dev/null 2>&1 && systemctl reload nginx >/dev/null 2>&1 || true
+ endscript
+}
+EOF
+
+print_success "โ
Log rotation configuration created"
+echo
+
+# Test the configuration
+print_status "๐งช Testing logrotate configuration..."
+if $SUDO logrotate --debug "$LOGROTATE_CONF" 2>/dev/null; then
+ print_success "โ
Configuration test passed"
+else
+ print_warning "โ ๏ธ Configuration test had warnings (this may be normal)"
+fi
+
+echo
+
+# Show configuration details
+print_header "๐ Log Rotation Summary"
+echo
+print_status "๐๏ธ Log directory: ${BOLD}$LOG_DIR${NC}"
+print_status "โ๏ธ Configuration: ${BOLD}$LOGROTATE_CONF${NC}"
+print_status "๐
Rotation schedule: ${BOLD}Daily${NC}"
+print_status "๐๏ธ Retention period: ${BOLD}14 days${NC}"
+print_status "๐ฆ Compression: ${BOLD}Enabled${NC}"
+print_status "๐ค File owner: ${BOLD}$CURRENT_USER:$CURRENT_GROUP${NC}"
+
+echo
+print_success "๐ Log rotation setup complete!"
+echo
+
+print_status "๐ What happens now:"
+print_status " โข Logs will be rotated daily at system-defined time"
+print_status " โข Old logs will be compressed to save space"
+print_status " โข Logs older than 14 days will be automatically deleted"
+print_status " โข Process will be signaled to reopen log files"
+
+echo
+print_status "๐ Useful commands:"
+print_status " โข Check logs: ls -la $LOG_DIR"
+print_status " โข Manual rotation: sudo logrotate -f $LOGROTATE_CONF"
+print_status " โข View configuration: cat $LOGROTATE_CONF"
+print_status " โข System log status: sudo systemctl status logrotate"
+
+# Check if any log files currently exist
+if ls "$LOG_DIR"/*.log >/dev/null 2>&1; then
+ echo
+ print_status "๐ Current log files:"
+ ls -lh "$LOG_DIR"/*.log | while read -r line; do
+ print_status " $line"
+ done
+else
+ echo
+ print_status "๐ No log files found yet - they will be created when the node runs"
+fi
\ No newline at end of file
diff --git a/push-validator-manager/scripts/setup-nginx.sh b/push-validator-manager/scripts/setup-nginx.sh
new file mode 100755
index 00000000..d0e7d03d
--- /dev/null
+++ b/push-validator-manager/scripts/setup-nginx.sh
@@ -0,0 +1,314 @@
+#!/bin/bash
+
+# ---------------------------------------
+# Push Chain NGINX + SSL Setup Script
+# Native Push Validator Manager Edition
+# ---------------------------------------
+# - Sets up NGINX to serve Cosmos and EVM RPCs
+# - Bootstraps temporary HTTP config to fetch certs
+# - Replaces config with SSL-enabled version
+# - Adapted for native setup with ~/.pchain paths
+# ---------------------------------------
+
+set -euo pipefail
+
+# Colors for output - Standardized palette
+GREEN='\033[0;32m' # Success messages
+RED='\033[0;31m' # Error messages
+YELLOW='\033[0;33m' # Warning messages
+CYAN='\033[0;36m' # Status/info messages
+BLUE='\033[1;94m' # Headers/titles (bright blue)
+MAGENTA='\033[0;35m' # Accent/highlight data
+NC='\033[0m' # No color/reset
+BOLD='\033[1m' # Emphasis
+
+# Print functions - Unified across all scripts
+print_status() { echo -e "${CYAN}$1${NC}"; }
+print_header() { echo -e "${BLUE}$1${NC}"; }
+print_success() { echo -e "${GREEN}$1${NC}"; }
+print_error() { echo -e "${RED}$1${NC}"; }
+print_warning() { echo -e "${YELLOW}$1${NC}"; }
+
+# Validate input
+if [ -z "${1:-}" ]; then
+ print_error "โ Usage: ./push-validator setup-nginx yourdomain.com"
+ echo
+ print_status "This sets up public HTTPS endpoints:"
+ print_status " โข https://yourdomain.com - Cosmos RPC"
+ print_status " โข https://evm.yourdomain.com - EVM RPC"
+ echo
+ print_warning "โ ๏ธ Requirements:"
+ print_status " โข Domain must point to this server's IP"
+ print_status " โข Ports 80 and 443 must be open"
+ print_status " โข Node must be running"
+ exit 1
+fi
+
+DOMAIN=$1
+EVM_SUBDOMAIN="evm.$DOMAIN"
+NGINX_CONFIG="/etc/nginx/sites-available/push-node"
+TMP_CONFIG="/tmp/push-node-temp"
+FINAL_CONFIG="/tmp/push-node-final"
+WEBROOT="/var/www/certbot"
+
+print_header "๐ Setting up NGINX for $DOMAIN and $EVM_SUBDOMAIN..."
+echo
+
+# Check if we're running as root or have sudo
+if [ "$EUID" -eq 0 ]; then
+ SUDO=""
+else
+ if ! command -v sudo >/dev/null 2>&1; then
+ print_error "โ This script requires sudo privileges"
+ exit 1
+ fi
+ SUDO="sudo"
+fi
+
+# Prerequisites check
+print_status "๐ Checking prerequisites..."
+
+# Check if node is running
+if ! pgrep -f "pchaind start" >/dev/null; then
+ print_error "โ Push node is not running"
+ print_status "Start your node first: ./push-validator start"
+ exit 1
+fi
+
+# Check if required ports are accessible
+for port in 26657 8545 8546; do
+ if ! nc -z localhost "$port" 2>/dev/null; then
+ print_warning "โ ๏ธ Port $port not accessible locally"
+ print_status "Make sure your node is fully started and synced"
+ fi
+done
+
+print_success "โ
Prerequisites check passed"
+echo
+
+# Install dependencies
+print_status "๐ฆ Installing dependencies..."
+$SUDO apt update
+$SUDO apt install -y nginx certbot python3-certbot-nginx jq
+
+# Configure firewall
+print_status "๐ก Configuring firewall..."
+$SUDO ufw allow 'Nginx Full' || print_warning "UFW not configured (this is okay)"
+$SUDO ufw allow 26656/tcp || print_warning "UFW not configured (this is okay)"
+
+# Ensure webroot exists
+print_status "๐ Setting up webroot..."
+$SUDO mkdir -p "$WEBROOT"
+$SUDO chown -R www-data:www-data "$WEBROOT"
+
+print_success "โ
Dependencies installed"
+echo
+
+# Write temporary HTTP-only config to serve .well-known
+print_status "โ๏ธ Creating temporary NGINX configuration..."
+$SUDO tee "$TMP_CONFIG" > /dev/null < /dev/null </dev/null 2>&1; then
+ print_success "โ
Cosmos RPC (HTTPS) is working: https://$DOMAIN"
+else
+ print_warning "โ ๏ธ Cosmos RPC check failed - may need a moment to start"
+fi
+
+# Test EVM RPC
+EVM_TEST=$(curl -s -X POST "https://$EVM_SUBDOMAIN" \
+ -H "Content-Type: application/json" \
+ -d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' 2>/dev/null || echo "")
+
+if echo "$EVM_TEST" | jq -e '.result' >/dev/null 2>&1; then
+ print_success "โ
EVM RPC (HTTPS) is working: https://$EVM_SUBDOMAIN"
+else
+ print_warning "โ ๏ธ EVM RPC check failed - may need a moment to start"
+fi
+
+echo
+print_header "๐ Setup complete!"
+echo
+print_success "๐ Cosmos RPC: https://$DOMAIN"
+print_success "๐ EVM RPC: https://$EVM_SUBDOMAIN"
+echo
+print_status "๐ Next steps:"
+print_status " โข Test endpoints with your applications"
+print_status " โข Monitor logs: sudo journalctl -u nginx -f"
+print_status " โข Set up log rotation: ./push-validator setup-logs"
+print_status " โข Create backups: ./push-validator backup"
+echo
+print_warning "๐ Security notes:"
+print_status " โข Rate limiting is set to 10 requests/second per IP"
+print_status " โข SSL certificates auto-renew via cron"
+print_status " โข Monitor your server for unusual activity"
+
+# Clean up temporary files
+rm -f "$TMP_CONFIG" "$FINAL_CONFIG"
\ No newline at end of file