diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index c64af12..5b28cf5 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -1,8 +1,8 @@ -# foc-localnet - AI Coding Instructions +# foc-devnet - AI Coding Instructions ## Project Overview -**foc-localnet** is a Rust CLI tool for managing local Filecoin networks with FOC (Filecoin Onchain Contracts) support for warm storage services. It orchestrates Docker containers running Lotus nodes, miners, databases, and deploys smart contracts using Foundry (Forge/Cast). +**foc-devnet** is a Rust CLI tool for managing local Filecoin networks with FOC (Filecoin Onchain Contracts) support for warm storage services. It orchestrates Docker containers running Lotus nodes, miners, databases, and deploys smart contracts using Foundry (Forge/Cast). **Key Technologies**: Rust, Docker, Filecoin Lotus, FEVM (Filecoin EVM), Foundry, YugabyteDB, Solidity @@ -59,10 +59,10 @@ foc-curio # Second-gen miner (WIP) ## Directory Structure & Conventions ### User Data Directories -All persistent data lives under `~/.foc-localnet/` (see `src/paths.rs`): +All persistent data lives under `~/.foc-devnet/` (see `src/paths.rs`): ``` -~/.foc-localnet/ +~/.foc-devnet/ ├── artifacts/ │ ├── bin/ # Built Lotus/Curio binaries │ └── docker/volumes/ # Container persistent data @@ -82,11 +82,11 @@ All persistent data lives under `~/.foc-localnet/` (see `src/paths.rs`): ``` ### Key Path Functions (src/paths.rs) -- `foc_localnet_home()` - Root directory `~/.foc-localnet/` -- `foc_localnet_docker_volumes()` - Docker volumes directory -- `foc_localnet_lotus_keys()` - BLS key storage +- `foc_devnet_home()` - Root directory `~/.foc-devnet/` +- `foc_devnet_docker_volumes()` - Docker volumes directory +- `foc_devnet_lotus_keys()` - BLS key storage - `contract_addresses_file()` - JSON file with deployed contracts -- `foc_localnet_bin()` - Built binaries directory +- `foc_devnet_bin()` - Built binaries directory ## Smart Contract Deployment @@ -226,7 +226,7 @@ Deletes: BLS keys, genesis sectors, genesis config, blockchain data, contract ad **Files Deleted**: - `lotus-keys/key-1`, `lotus-keys/key-2`, `lotus-keys/prefunded-*` - `genesis-sectors/` -- `genesis/foc-localnet.json` +- `genesis/foc-devnet.json` - `lotus-data/`, `lotus-miner-data/` ### Reset (Chain Reset Only) @@ -275,7 +275,7 @@ docker run --rm --network host \ ### Port Conflicts **Problem**: Container fails to start due to port already in use -**Solution**: Check with `lsof -i :1234` or use `foc-localnet stop` to clean up +**Solution**: Check with `lsof -i :1234` or use `foc-devnet stop` to clean up ### Volume Permission Issues **Problem**: Container can't write to mounted volumes @@ -319,7 +319,7 @@ Use descriptive error messages with context: ```rust return Err(format!( "Lotus container is not running. FOC deployment requires Lotus to be running with FEVM enabled. \ - Run 'foc-localnet start' to start Lotus first." + Run 'foc-devnet start' to start Lotus first." ).into()); ``` @@ -359,7 +359,7 @@ const MOCK_USDFC_INITIAL_SUPPLY: &str = "1000000000000000000000000"; ## When Making Changes 1. **Adding New Steps**: Implement the `Step` trait, add to startup sequence in correct order -2. **Modifying Dockerfiles**: Update embedded assets, rebuild with `foc-localnet init --rebuild` +2. **Modifying Dockerfiles**: Update embedded assets, rebuild with `foc-devnet init --rebuild` 3. **Changing Paths**: Update `src/paths.rs` and ensure backward compatibility 4. **Contract Updates**: Update embedded asset, test deployment manually first 5. **Error Handling**: Always provide context (which container, which file, which command failed) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 49ad9f2..b3496da 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,14 +1,16 @@ +--- name: CI on: push: - branches: [ main ] + branches: ['*'] pull_request: - branches: [ main ] + branches: [main] jobs: - build-and-test: + fmt-clippy: runs-on: ubuntu-latest + timeout-minutes: 10 steps: - uses: actions/checkout@v4 @@ -18,176 +20,319 @@ jobs: with: components: rustfmt, clippy - - name: Build - run: cargo build - - - name: Run tests - run: cargo test - - name: Check formatting run: cargo fmt --all -- --check - name: Run clippy - run: cargo clippy -- -D warnings + run: cargo clippy --all-targets --all-features -- -D warnings - requirements-setup-test: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [macos-latest, ubuntu-latest] + integration-test: + runs-on: ["self-hosted", "linux", "x64", "16xlarge+gpu"] + timeout-minutes: 60 steps: - uses: actions/checkout@v4 - - name: Setup Rust toolchain - uses: actions-rust-lang/setup-rust-toolchain@v1 + # Free up disk space on GitHub Actions runner to avoid "no space left" errors + - name: "EXEC: {Free up disk space}, independent" + uses: endersonmenezes/free-disk-space@v3 with: - components: rustfmt, clippy - - - name: Build - run: cargo build - - - name: Test requirements with --setup - run: | - cargo run -- requirements --setup - continue-on-error: false - - build-commands-test: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - name: Setup Rust toolchain + remove_android: true + remove_dotnet: true + remove_haskell: true + remove_tool_cache: true + remove_swap: true + remove_packages: "azure-cli google-cloud-cli microsoft-edge-stable google-chrome-stable firefox postgresql* temurin-* *llvm* mysql* dotnet-sdk-*" + remove_packages_one_command: true + remove_folders: "/usr/share/swift /usr/share/miniconda /usr/share/az* /usr/local/lib/node_modules /usr/local/share/chromium /usr/local/share/powershell /usr/local/julia /usr/local/aws-cli /usr/local/aws-sam-cli /usr/share/gradle" + rm_cmd: "rmz" + rmz_version: "3.1.1" + + # Setup Rust toolchain and restore cached dependencies + - name: "EXEC: {Setup Rust toolchain}, independent" uses: actions-rust-lang/setup-rust-toolchain@v1 - - name: Build - run: cargo build - - - name: Set up Docker Buildx + # CACHE-RUST: Rust dependencies and build artifacts + # These are keyed on the Cargo.lock file to ensure cache validity + - name: "CACHE_RESTORE: {C-rust-cache}" + id: cache-rust + uses: actions/cache/restore@v4 + with: + path: | + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-rust-build-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-rust-build- + + # Setup Docker for building and running containers + - name: "EXEC: {Setup Docker}, independent" uses: docker/setup-buildx-action@v3 - - name: Build Docker builder image - run: docker build -t foc-localnet-builder ./docker - - - name: Verify Docker image was built - run: docker images foc-localnet-builder - - - name: Test build command with invalid path (should fail gracefully) + - name: "EXEC: {Install build dependencies}, independent" run: | - if ./target/debug/foc-localnet build lotus /nonexistent/path 2>&1; then - echo "Expected command to fail with nonexistent path" - exit 1 - fi + sudo apt-get update + sudo apt-get install -y tar openssl pkg-config libssl-dev - - name: Run Lotus build integration tests - run: | - RUST_BACKTRACE=full cargo test --test build_lotus_test -- --nocapture + # Build the foc-devnet binary + - name: "EXEC: {Build foc-devnet binary}, DEP: {C-rust-cache}" + run: cargo build --release - - name: Run Curio build integration tests + # CACHE-RUST: Save Rust build cache for future runs + - name: "CACHE_SAVE: {C-rust-cache}" + if: steps.cache-rust.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: | + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-rust-build-${{ hashFiles('**/Cargo.lock') }} + + # Copy binary and clean up Rust artifacts to save disk space + - name: "EXEC: {Copy binary and clean cache}, DEP: {C-rust-cache}" run: | - RUST_BACKTRACE=full cargo test --test build_curio_test -- --nocapture - - integration-start-test: - runs-on: ubuntu-latest - timeout-minutes: 30 - - steps: - - uses: actions/checkout@v4 - - - name: Setup Rust toolchain - uses: actions-rust-lang/setup-rust-toolchain@v1 + cp ./target/release/foc-devnet ./foc-devnet + rm -rf ~/.cargo/registry/ + rm -rf ~/.cargo/git/db/ + rm -rf target/ + df -h + + # Compute cache keys based on version info and source files + # - CODE_HASH: Changes when Lotus/Curio versions change (for build artifacts cache) + # - DOCKER_HASH: Changes when Dockerfiles change (for Docker images cache) + - name: "CHECK: {Compute version hashes}" + id: version-hashes + run: | + # Get version output + VERSION_OUTPUT=$(./foc-devnet version 2>&1) + + # Compute CODE_HASH from all default:code: lines (Lotus/Curio versions) + CODE_HASH=$(echo "$VERSION_OUTPUT" | grep 'default:code:' | sha256sum | cut -d' ' -f1) + echo "code-hash=$CODE_HASH" >> $GITHUB_OUTPUT + echo "CODE_HASH: $CODE_HASH" + + # Compute DOCKER_HASH from docker/ directory (Dockerfile changes) + DOCKER_HASH=$(find docker -type f -exec sha256sum {} \; | sort | sha256sum | cut -d' ' -f1) + echo "docker-hash=$DOCKER_HASH" >> $GITHUB_OUTPUT + echo "DOCKER_HASH: $DOCKER_HASH" + + # CACHE-DOCKER: Try to restore pre-built Docker images (foc-lotus, foc-lotus-miner, foc-builder, foc-curio, foc-yugabyte) + # These images contain YugabyteDB and all build dependencies + - name: "CACHE_RESTORE: {C-docker-images-cache}" + id: cache-docker-images + uses: actions/cache/restore@v4 + with: + path: ~/.docker-images-cache + key: ${{ runner.os }}-docker-images-${{ steps.version-hashes.outputs.docker-hash }} - - name: Install build dependencies + # CACHE-DOCKER: If Docker images are cached, load them from tarballs + - name: "EXEC: {Load Docker images}, DEP: {C-docker-images-cache}" + if: steps.cache-docker-images.outputs.cache-hit == 'true' run: | - sudo apt-get update - sudo apt-get install -y tar - - - name: Generate build artifacts + echo "Loading Docker images from cache..." + for image in ~/.docker-images-cache/*.tar; do + if [ -f "$image" ]; then + echo "Loading $(basename $image)..." + docker load -i "$image" + fi + done + echo "Docker images loaded successfully, list:" + docker images + rm -rf ~/.docker-images-cache + df -h + + # If Docker images are cached, skip building them AND skip downloading YugabyteDB + # (YugabyteDB is already baked into the foc-yugabyte Docker image) + - name: "EXEC: {Initialize with cached Docker}, DEP: {C-docker-images-cache}" + if: steps.cache-docker-images.outputs.cache-hit == 'true' run: | - # Ensure artifacts directory exists - mkdir -p artifacts - - # Generate MockUSDFC.tar.gz if contracts directory exists - if [ -d "contracts/MockUSDFC" ]; then - tar -czf artifacts/MockUSDFC.tar.gz contracts/MockUSDFC - echo "✅ Generated MockUSDFC.tar.gz" - ls -la artifacts/ - else - echo "❌ contracts/MockUSDFC directory not found" - exit 1 - fi + rm -rf ~/.foc-devnet + ./foc-devnet init --no-docker-build - - name: Build - run: cargo build + # If Docker images are not cached, do full init (downloads YugabyteDB and builds all images) + - name: "EXEC: {Initialize without cache}, independent" + if: steps.cache-docker-images.outputs.cache-hit != 'true' + run: | + rm -rf ~/.foc-devnet + ./foc-devnet init + + # CACHE-DOCKER: Build Docker images if not cached + - name: "EXEC: {Build Docker images}, DEP: {C-docker-images-cache}" + if: steps.cache-docker-images.outputs.cache-hit != 'true' + run: |- + mkdir -p ~/.docker-images-cache + echo "Building Docker images for cache..." + docker save foc-lotus -o ~/.docker-images-cache/foc-lotus.tar + docker save foc-lotus-miner -o ~/.docker-images-cache/foc-lotus-miner.tar + docker save foc-builder -o ~/.docker-images-cache/foc-builder.tar + docker save foc-curio -o ~/.docker-images-cache/foc-curio.tar + docker save foc-yugabyte -o ~/.docker-images-cache/foc-yugabyte.tar + echo "Docker images saved to cache" + ls -lath ~/.docker-images-cache/ + df -h + + # CACHE-DOCKER: Save Docker images cache for future runs + - name: "CACHE_SAVE: {C-docker-images-cache}" + if: steps.cache-docker-images.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: ~/.docker-images-cache + key: ${{ runner.os }}-docker-images-${{ steps.version-hashes.outputs.docker-hash }} - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + # CACHE-BINARIES: Try to restore previously built Lotus/Curio binaries + - name: "CACHE_RESTORE: {C-build-artifacts-cache}" + id: cache-binaries + uses: actions/cache/restore@v4 + with: + path: ~/.foc-devnet/bin + key: ${{ runner.os }}-binaries-${{ steps.version-hashes.outputs.code-hash }} + + - name: "EXEC: {Ensure permissions on binaries}, DEP: {C-build-artifacts-cache}" + if: steps.cache-binaries.outputs.cache-hit == 'true' + run: sudo chown -R $USER:$USER ~/.foc-devnet/bin/ + + # CACHE-GO: Try to restore foc-builder Go module cache to speed up Lotus/Curio builds + - name: "CACHE_RESTORE: {C-foc-builder-cache}" + id: cache-go + if: steps.cache-binaries.outputs.cache-hit != 'true' + uses: actions/cache/restore@v4 + with: + path: ~/.foc-devnet/docker/volumes/cache/foc-builder + key: ${{ runner.os }}-foc-builder-cache-${{ hashFiles('docker/**') }}-${{ hashFiles('src/config.rs') }} + restore-keys: | + ${{ runner.os }}-foc-builder-cache- + + - name: "EXEC: {Ensure permissions}, DEP: {C-foc-builder-cache}" + if: steps.cache-binaries.outputs.cache-hit != 'true' && + steps.cache-go.outputs.cache-hit == 'true' + run: sudo chown -R $USER:$USER ~/.foc-devnet/ + + - name: "EXEC: {Check disk space}, independent" + run: df -h + + # Build Lotus and Curio if not cached + - name: "EXEC: {Build Lotus}, DEP: {C-build-artifacts-cache}" + if: steps.cache-binaries.outputs.cache-hit != 'true' + run: ./foc-devnet build lotus + + - name: "EXEC: {Build Curio}, DEP: {C-build-artifacts-cache}" + if: steps.cache-binaries.outputs.cache-hit != 'true' + run: ./foc-devnet build curio + + # CACHE-GO: Save Go module cache for future builds + - name: "CACHE_SAVE: {C-foc-builder-cache}" + if: steps.cache-binaries.outputs.cache-hit != 'true' && + steps.cache-go.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: ~/.foc-devnet/docker/volumes/cache/foc-builder + key: ${{ runner.os }}-foc-builder-cache-${{ hashFiles('docker/**') }}-${{ hashFiles('src/config.rs') }} - - name: Enable Docker experimental features - run: | - echo '{"experimental": true}' | sudo tee /etc/docker/daemon.json - sudo systemctl restart docker + # CACHE-BINARIES: Save built Lotus/Curio binaries for future runs + - name: "CACHE_SAVE: {C-build-artifacts-cache}" + if: steps.cache-binaries.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: ~/.foc-devnet/bin + key: ${{ runner.os }}-binaries-${{ steps.version-hashes.outputs.code-hash }} - - name: Initialize foc-localnet environment + # Disk free-up + - name: "EXEC: {Clean up Go modules}, DEP: {C-build-artifacts-cache}" run: | - ./target/debug/foc-localnet init - timeout-minutes: 10 + sudo rm -rf ~/.foc-devnet/docker/volumes/cache + sudo rm -rf ~/.foc-devnet/code/lotus + sudo rm -rf ~/.foc-devnet/code/curio + df -h - - name: Build curio + # Download and extract Filecoin proof parameters from S3 + - name: "EXEC: {Download proof parameters from S3}, independent" run: | - ./target/debug/foc-localnet build curio - timeout-minutes: 10 - - - name: Build lotus + mkdir -p ~/.foc-devnet/docker/volumes/cache/filecoin-proof-parameters/ + curl -L https://fil-proof-params-2k-cache.s3.us-east-2.amazonaws.com/filecoin-proof-params-2k.tar -o /tmp/filecoin-proof-params-2k.tar + tar -xf /tmp/filecoin-proof-params-2k.tar -C ~/.foc-devnet/docker/volumes/cache/filecoin-proof-parameters/ + rm /tmp/filecoin-proof-params-2k.tar + ls -lath ~/.foc-devnet/docker/volumes/cache/filecoin-proof-parameters/ + PROOF_PARAMS_HASH=$(find ~/.foc-devnet/docker/volumes/cache/filecoin-proof-parameters -type f -exec sha256sum {} \; | cut -d' ' -f1 | sort | sha256sum | cut -d' ' -f1) + echo "Downloaded proof parameters with hash: $PROOF_PARAMS_HASH" + + # Verify cluster is running correctly + - name: "EXEC: {Check cluster status}, independent" + run: ./foc-devnet status + + # Start the full Filecoin localnet cluster + - name: "EXEC: {Start cluster}, independent" + id: start_cluster + continue-on-error: true + run: ./foc-devnet start --parallel + + # On failure, collect and print Docker container logs for debugging + - name: "EXEC: {Collect Docker logs on failure}, independent" run: | - ./target/debug/foc-localnet build lotus - timeout-minutes: 10 + echo "+++++++++++ foc-devnet version run" + cat ~/.foc-devnet/state/latest/version.txt 2>/dev/null || echo "No version file found" - - name: Test start command with --reset --regenesis - run: | - ./target/debug/foc-localnet start --reset --regenesis - timeout-minutes: 15 + echo "+++++++++++ Listing runs in foc-devnet..." + ls -1 ~/.foc-devnet/run/ 2>/dev/null || echo "No runs directory found" - - name: Verify cluster is running - run: | - # Check that containers are running - running_containers=$(docker ps --filter "name=foc-" --format "{{.Names}}") - echo "Running containers: $running_containers" - - # Should have at least lotus and lotus-miner running - if ! echo "$running_containers" | grep -q "foc-lotus"; then - echo "ERROR: foc-lotus container is not running" - exit 1 + echo "+++++++++++ Get latest run ID" + LATEST_RUN=$(ls -t ~/.foc-devnet/run/ 2>/dev/null | head -1) + if [ -n "$LATEST_RUN" ]; then + echo "Latest run: $LATEST_RUN" + RUN_DIR="$HOME/.foc-devnet/run/$LATEST_RUN" + else + RUN_DIR="$HOME/.foc-devnet/state/latest" fi - if ! echo "$running_containers" | grep -q "foc-lotus-miner"; then - echo "ERROR: foc-lotus-miner container is not running" - exit 1 - fi + echo "+++++++++++ Disk space..." + sudo df -h 2>/dev/null || echo "df command failed" - echo "✅ Cluster containers are running" + echo "+++++++++++ Latest Run Directory" + ls -lath "$RUN_DIR" 2>/dev/null || echo "No run directory found at $RUN_DIR" + ls -lath "$RUN_DIR/logs" 2>/dev/null || echo "No logs directory found at $RUN_DIR/logs" - - name: Check contract deployment - run: | - # Wait a bit for contracts to be deployed - sleep 10 + echo "+++++++++++ Contract Addresses" + cat "$RUN_DIR/contract_addresses.json" 2>/dev/null || echo "No contract addresses file found" + + echo "+++++++++++ Step Context" + cat "$RUN_DIR/step_context.json" 2>/dev/null || echo "No step context file found" - # Check if contract addresses file exists - if [ -f ~/.foc-localnet/artifacts/docker/volumes/foc-contract-addresses.json ]; then - echo "✅ Contract addresses file exists" - cat ~/.foc-localnet/artifacts/docker/volumes/foc-contract-addresses.json + echo "+++++++++++ FOC metadata" + cat "$RUN_DIR/foc_metadata.json" 2>/dev/null || echo "No foc metadata file found" + + echo "+++++++++++ Container Logs" + if [ -d "$RUN_DIR/logs" ]; then + for logfile in "$RUN_DIR/logs"/*; do + if [ -f "$logfile" ]; then + echo "" + echo "📰📰📰📰📰📰📰📰📰📰📰 Logs from $(basename "$logfile") 📰📰📰📰📰📰📰📰📰📰📰📰📰" + cat "$logfile" 2>/dev/null || echo "Failed to read $logfile" + echo "📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰📰" + fi + done else - echo "❌ Contract addresses file not found" - exit 1 + echo "No container logs directory found at $RUN_DIR/logs" fi - - name: Cleanup - Stop cluster + # Verify cluster is running correctly + - name: "EXEC: {Check cluster status}, independent" + run: ./foc-devnet status + + - name: "EXEC: {List foc-* containers}, independent" run: | - ./target/debug/foc-localnet stop - if: always() + echo "Containers using foc-* images (running or exited):" + docker ps -a --format 'table {{.Names}}\t{{.Image}}\t{{.Status}}' + + # Clean shutdown + - name: "EXEC: {Stop cluster}, independent" + run: ./foc-devnet stop - - name: Cleanup - Remove containers and volumes + # Mark job as failed if the start step failed, but only after all steps + - name: "CHECK: {Fail job if start failed}" + if: ${{ always() && steps.start_cluster.outcome == 'failure' }} run: | - docker system prune -f --volumes - if: always() + echo "Start cluster failed earlier; marking job as failed." >&2 + exit 1 diff --git a/ADVANCED_README.md b/ADVANCED_README.md new file mode 100644 index 0000000..5d6462d --- /dev/null +++ b/ADVANCED_README.md @@ -0,0 +1,1221 @@ +# Advanced Guide: foc-devnet + +This guide covers advanced usage, internal architecture, and operational details of foc-devnet. + +--- + +## Commands Reference + +### `init` +Initializes foc-devnet by downloading repositories, building Docker images, and preparing the environment. + +```bash +foc-devnet init [OPTIONS] +``` + +**Options:** +- `--curio ` - Curio source location +- `--lotus ` - Lotus source location +- `--filecoin-services ` - Filecoin Services source location +- `--synapse-sdk ` - Synapse SDK source location +- `--yugabyte-url ` - Yugabyte download URL +- `--yugabyte-archive ` - Local Yugabyte archive file +- `--proof-params-dir ` - Local proof params directory +- `--force` - Force regeneration of config file +- `--rand` - Use random mnemonic instead of deterministic one + +**Source Format:** +- `gittag:v1.0.0` - Specific git tag (uses default repo) +- `gittag:https://github.com/user/repo.git:v1.0.0` - Tag from custom repo +- `gitcommit:abc123` - Specific git commit +- `gitbranch:main` - Specific git branch +- `local:/path/to/repo` - Local directory + +**Example:** +```bash +foc-devnet init \ + --lotus local:/home/user/lotus \ + --curio gitbranch:pdpv0 \ + --force +``` + +### `build` +Builds Filecoin components in Docker containers. + +```bash +foc-devnet build lotus [PATH] [--output-dir ] +foc-devnet build curio [PATH] [--output-dir ] +``` + +**Example:** +```bash +foc-devnet build lotus +foc-devnet build curio /path/to/custom/curio --output-dir ~/bins +``` + +### `start` +Starts the local Filecoin network cluster. + +```bash +foc-devnet start [OPTIONS] +``` + +**Options:** +- `--volumes-dir ` - Custom docker volumes directory +- `--run-dir ` - Custom run-specific data directory +- `--parallel` - **⚡ Run steps in parallel for ~40% faster startup (recommended)** +- `--notest` - Skip end-to-end tests + +**Recommended for faster startup:** +```bash +foc-devnet start --parallel +``` + +**Skip tests during development:** +```bash +foc-devnet start --parallel --notest +``` + +> **💡 Pro Tip:** Use `--parallel` by default! It runs independent steps concurrently (contract deployments, database startup, etc.) while respecting dependencies. This can reduce startup time from ~5 minutes to ~3 minutes. + +**After successful start:** +- Portainer UI available at http://localhost:5700 (uses first port in configured range) +- Use Portainer to monitor containers, view logs, and debug issues +- All container names include the run ID for easy identification + +### `stop` +Stops all running containers and cleans up Docker networks. + +```bash +foc-devnet stop +``` + +**What it does:** +- Stops containers in reverse order (Curio → Yugabyte → Lotus-Miner → Lotus) +- Removes containers to ensure clean state +- Deletes Docker networks +- Preserves Portainer for persistent access +- Clears run ID + +### `status` +Shows the current status of the foc-devnet system. + +```bash +foc-devnet status +``` + +Displays: +- Current run ID +- Container states +- Network information +- Port allocations + +### `version` +Shows version information. + +```bash +foc-devnet version +``` + +--- + +## Configuration System + +### Config File Location + +``` +~/.foc-devnet/config.toml +``` + +### Config Structure + +```toml +# Port range for dynamic allocation +# foc-devnet uses a contiguous range of ports to avoid conflicts with other +# services on your machine. All components (Lotus, Curio SPs, Yugabyte, etc.) +# dynamically allocate ports from this range. Using a dedicated range ensures: +# - No conflicts with system services (MySQL, PostgreSQL, etc.) +# - Easy firewall configuration (just open one range) +# - Port availability can be validated before starting +port_range_start = 5700 +port_range_count = 100 + +# Service Provider configuration +approved_pdp_sp_count = 1 # SPs registered and approved in registry +active_pdp_sp_count = 1 # Total SPs actually running + +# Yugabyte database +yugabyte_download_url = "https://software.yugabyte.com/releases/2.25.1.0/..." + +# Component sources +[lotus] +url = "https://github.com/filecoin-project/lotus.git" +tag = "v1.34.0" + +[curio] +url = "https://github.com/filecoin-project/curio.git" +branch = "pdpv0" + +[filecoin_services] +url = "https://github.com/FilOzone/filecoin-services.git" +tag = "v1.0.0" + +[multicall3] +url = "https://github.com/mds1/multicall3.git" +branch = "main" + +[synapse_sdk] +url = "git@github.com:FilOzone/synapse-sdk.git" +tag = "synapse-sdk-v0.36.1" +``` + +### Configuration Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `port_range_start` | u16 | 5700 | Starting port for contiguous port range | +| `port_range_count` | u16 | 100 | Number of ports in the range | +| `approved_pdp_sp_count` | usize | 1 | Number of approved service providers | +| `active_pdp_sp_count` | usize | 1 | Number of running service providers | +| `yugabyte_download_url` | string | (URL) | Yugabyte database tarball URL | + +**Constraints:** +- `approved_pdp_sp_count` ≤ `active_pdp_sp_count` ≤ `MAX_PDP_SP_COUNT` (5) + +### Editing Config + +```bash +# Edit manually +vim ~/.foc-devnet/config.toml + +# Or use init --force to regenerate +foc-devnet init --force +``` + +--- + +## Directory Structure + +``` +~/.foc-devnet/ +├── config.toml # Main configuration file +├── bin/ # Compiled binaries (lotus, curio) +├── code/ # Cloned repositories, or symlinks +│ ├── lotus/ # Lotus source code +│ ├── curio/ # Curio source code +│ ├── filecoin-services/ # FOC smart contracts +│ ├── multicall3/ # Multicall3 contracts +│ └── synapse-sdk/ # Synapse SDK +├── docker/ +│ └── volumes/ +│ ├── cache/ # Shared cache (proof params, etc.) +│ │ └── filecoin-proof-parameters/ +│ └── run-specific/ # Run-isolated volumes +│ └── / # Each run has its own volumes +│ ├── lotus-data/ # Lotus blockchain data +│ ├── lotus-miner-data/ +│ ├── yugabyte-data/ +│ ├── curio-1/ # First Curio SP +│ ├── curio-2/ # Second Curio SP (if active) +│ └── ... +├── keys/ # BLS keys (genesis/mnemonic) +│ ├── mnemonic.txt # Seed phrase +│ └── genesis/ # Genesis block keys +├── logs/ # Container logs +├── run/ # Run-specific execution data +│ └── / # e.g., 26jan02-1430_ZanyPip/ +│ ├── setup.log # Startup execution log +│ ├── version.txt # Component versions +│ ├── contract_addresses.json # Deployed contracts +│ ├── step_context.json # Step state (addresses, etc.) +│ ├── foc_metadata.json # FOC service metadata +│ └── pdp_sps/ +│ ├── 1.provider_id.json # First SP provider ID +│ ├── 2.provider_id.json # Second SP provider ID +│ └── ... +├── state/ # Global state +│ ├── current_run_id.txt # Current active run +│ └── latest -> ../run// # Symlink to latest run +└── tmp/ # Temporary files +``` + +### Key Files + +**`contract_addresses.json`** - Deployed smart contract addresses: +```json +{ + "MockUSDFC": "0x1234...", + "Multicall3": "0x5678...", + "PDPVerifier": "0x9abc...", + "ServiceProviderRegistry": "0xdef0...", + "FilecoinWarmStorageService": "0x1122..." +} +``` + +**`step_context.json`** - Shared state between steps, useful for figuring out what happened, what commands were run: +```json +{ + "deployer_mockusdfc_eth_address": "0xabcd...", + "deployer_foc_eth_address": "0xef01...", + "mockusdfc_contract_address": "0x1234...", + "foc_lot_api_addr": "/ip4/127.0.0.1/tcp/1234/http", + "pdp_1_provider_id": "f01234" +} +``` + +**`foc_metadata.json`** - FOC service configuration: +```json +{ + "service_name": "FOC DevNet Warm Storage", + "service_description": "Warm storage service...", + "mockusdfc_address": "0x1234...", + "warm_storage_service_address": "0x5678..." +} +``` + +--- + +## Resetting the System + +### Normal Start Behavior + +**What happens on `start`:** +- Stops any running containers from previous runs +- Creates a NEW run with a unique run ID +- Previous run data is **preserved** for historical reference and debugging +- Each run is completely isolated by its run ID + +```bash +foc-devnet start # Creates new run, preserves old ones +``` + +**Why preserve old runs?** +- **Debugging:** Compare logs and state between runs +- **Historical reference:** Track what happened in previous tests +- **No conflicts:** Run IDs ensure complete isolation +- **Disk management:** You control cleanup manually + +### Manual Cleanup + +**Delete specific old run:** +```bash +# Stop cluster first +foc-devnet stop + +# Delete specific run by run ID +rm -rf ~/.foc-devnet/run/26jan01-1200_OldRun +rm -rf ~/.foc-devnet/docker/volumes/run-specific/26jan01-1200_OldRun +``` + +**Delete all old runs (keep only current):** +```bash +# Stop cluster +foc-devnet stop + +# Find current run ID +CURRENT_RUN=$(cat ~/.foc-devnet/state/current_run_id.txt) + +# Delete all runs except current +cd ~/.foc-devnet/run +ls | grep --invert-match "$CURRENT_RUN" | xargs rm -rf + +cd ~/.foc-devnet/docker/volumes/run-specific +ls | grep --invert-match "$CURRENT_RUN" | xargs rm -rf +``` + +**Complete nuclear reset (delete EVERYTHING including config):** +```bash +# This deletes all runs, config, repos, binaries, keys - use with caution! +rm -rf ~/.foc-devnet +``` + +### Manual Cleanup + +```bash +# Stop cluster +foc-devnet stop + +# Delete specific run +rm -rf ~/.foc-devnet/run/26jan02-1430_ZanyPip +rm -rf ~/.foc-devnet/docker/volumes/run-specific/26jan02-1430_ZanyPip + +# Complete nuclear reset (delete everything) +rm -rf ~/.foc-devnet +``` + +--- + +## Run ID and Step Context + +### Run ID + +**What:** A unique identifier for each cluster execution. + +**Format:** `YYmmmDD-HHMM_RandomName` + +**Example:** `26jan02-1430_ZanyPip` + +**Why needed:** +- **Isolation:** Separate concurrent runs without conflicts +- **Debugging:** Identify logs and data for specific executions +- **Reproducibility:** Track exactly which run produced which results +- **Volume separation:** Each run has its own Docker volumes + +**Generation:** +```rust +// Date: YYmmmDD (26jan02 = January 2, 2026) +// Time: HHMM (1430 = 2:30 PM) +// Name: RandomAdjective + RandomNoun (ZanyPip) +"26jan02-1430_ZanyPip" +``` + +**Storage:** +- Current run: `~/.foc-devnet/state/current_run_id.txt` +- Latest symlink: `~/.foc-devnet/state/latest` → `../run//` + +### Step Context (SetupContext) + +**What:** Thread-safe shared state container that passes data between steps. + +**Why needed:** +- **Dependency resolution:** Later steps need data from earlier steps +- **Decoupling:** Steps don't directly call each other +- **Parallelization:** Thread-safe for concurrent step execution +- **State persistence:** Automatically saved to `step_context.json` + +**Architecture:** +```rust +pub struct SetupContext { + state: Arc>>, // Shared state + run_id: String, // Current run ID + run_dir: PathBuf, // Run directory + port_allocator: Arc>, // Port manager +} +``` + +**Example flow:** + +```rust +// Step 1: ETHAccFundingStep creates deployer address +fn execute(&self, context: &SetupContext) -> Result<(), Box> { + let address = create_eth_address()?; + context.set("deployer_mockusdfc_eth_address", &address); + Ok(()) +} + +// Step 2: USDFCDeployStep uses that address +fn execute(&self, context: &SetupContext) -> Result<(), Box> { + let deployer = context + .get("deployer_mockusdfc_eth_address") + .ok_or("Deployer not found")?; + let contract = deploy_mockusdfc(&deployer)?; + context.set("mockusdfc_contract_address", &contract); + Ok(()) +} + +// Step 3: USDFCFundingStep uses the contract address +fn execute(&self, context: &SetupContext) -> Result<(), Box> { + let contract = context.get("mockusdfc_contract_address")?; + fund_accounts(&contract)?; + Ok(()) +} +``` + +**Common context keys:** +- `deployer_mockusdfc_eth_address` - MockUSDFC deployer address +- `deployer_foc_eth_address` - FOC contracts deployer address +- `mockusdfc_contract_address` - MockUSDFC token contract +- `multicall3_contract_address` - Multicall3 contract +- `foc_lot_api_addr` - Lotus API multiaddr +- `pdp_1_provider_id` - First Curio SP provider ID +- `pdp_2_provider_id` - Second Curio SP provider ID (if active) + +--- + +## Docker and Networking + +### Why Docker? + +**Isolation:** Each component runs in its own container with controlled dependencies. + +**Reproducibility:** Same environment on every machine (Linux, macOS, Windows with WSL2). + +**Lightweight:** Only Docker needed on host; all other dependencies containerized. + +**Build isolation:** Rust, Go, Node.js toolchains stay inside containers. + +### Portainer: Your Debugging Companion + +**What is Portainer?** + +Portainer is a lightweight container management UI that gives you visual, browser-based access to all your Docker containers, networks, and volumes. foc-devnet automatically starts Portainer using the first port in your configured range. + +**Access:** http://localhost:5700 (default, or first port from `port_range_start` in config.toml) + +**Why Portainer is essential for debugging:** + +1. **Real-time Container Monitoring:** + - See which containers are running/stopped at a glance + - Monitor CPU/memory usage per container + - Quickly identify crashed or unhealthy containers + +2. **Live Log Streaming:** + - View logs from any container in real-time + - Search and filter log output + - Compare logs across multiple containers simultaneously + - No need to remember `docker logs` commands + +3. **Container Inspection:** + - View environment variables + - Check mounted volumes and their contents + - Inspect network connections + - See container configuration and restart policies + +4. **Interactive Shell Access:** + - Open bash/sh sessions directly in containers + - Execute commands without using `docker exec` + - Useful for inspecting files, running one-off commands + +5. **Network Visualization:** + - See which containers are on which networks + - Understand connectivity between components + - Troubleshoot network isolation issues + +6. **Quick Actions:** + - Restart individual containers without stopping the whole cluster + - Start/stop specific components for testing + - Delete and recreate containers quickly + +**Common debugging workflows with Portainer:** + +```bash +# 1. Check why Lotus isn't responding +# → Open Portainer → Containers → foc--lotus → Logs +# → Look for "API server listening" or error messages + +# 2. Inspect contract deployment failure +# → Containers → foc-builder → Logs +# → Search for "Error" or "failed" + +# 3. Debug Curio SP not registering +# → Containers → foc--curio-1 → Console +# → Run: curio info (to check status) + +# 4. Check database connectivity +# → Containers → foc--yugabyte → Stats +# → Verify it's running and consuming resources +``` + +**Pro Tips:** +- **Log timestamps:** Portainer shows exact timestamps, helpful for debugging race conditions +- **Multiple tabs:** Open logs from different containers side-by-side for correlation +- **Persistent:** Portainer survives across runs, so you can check old run logs + +### Container Architecture + +| Container | Image | Purpose | Ports | +|-----------|-------|---------|-------| +| `foc--lotus` | foc-lotus | Filecoin daemon (FEVM enabled) | 1234 (API), 1235 (P2P) | +| `foc--lotus-miner` | foc-lotus-miner | First-gen miner (PoRep) | 2345 (API) | +| `foc--yugabyte` | foc-yugabyte | Database for Curio | 5433 (PostgreSQL) | +| `foc--curio-1` | foc-curio | First Curio SP (PDP) | Dynamic | +| `foc--curio-2` | foc-curio | Second Curio SP (PDP) | Dynamic | +| `foc--curio-N` | foc-curio | Nth Curio SP (PDP) | Dynamic | +| `foc-builder` | foc-builder | Foundry tools (contract deployment) | Host network | +| `foc-portainer` | portainer/portainer-ce | Container management UI | 5700 (first from range) | + +**Note:** Container names include run-id for isolation (e.g., `foc-26jan02-1430_ZanyPip-lotus`). + +### Network Topology + +foc-devnet uses **user-defined bridge networks** to separate components: + +**What are user-defined bridge networks?** + +Docker's user-defined bridge networks are virtual networks that provide: +- **Container isolation:** Containers on different networks can't communicate directly +- **Automatic DNS:** Containers can reference each other by name (e.g., `foc-lotus` instead of IP addresses) +- **Network segmentation:** Mimics real-world network separation for testing + +**Important:** All containers are still accessible from the host machine via their exposed ports. The networks only control container-to-container communication and provide convenient DNS resolution. This segregation helps: +- **Test network isolation scenarios:** Simulate how components interact in production +- **Prevent accidental cross-talk:** Ensure services only communicate with intended peers +- **Enable clean DNS:** Use container names instead of hardcoded IPs in configuration + +**Network diagram:** + +```mermaid +graph TB + subgraph host["Host Machine (localhost)"] + style host fill:#f0f0f0,stroke:#333,stroke-width:2px + portainer["🌐 Portainer
:5700"] + lotus_api["📡 Lotus API
:5701"] + miner_api["⛏️ Miner API
:5702"] + yugabyte_api["🗄️ Yugabyte
:5710"] + end + + subgraph lotus_net["foc-<run-id>-lot-net
(Lotus Network - Blockchain Communication)"] + style lotus_net fill:#e3f2fd,stroke:#1976d2,stroke-width:2px + lotus["foc-lotus
(Filecoin Daemon)"] + builder["foc-builder
(--net=host)"] + curio1_lot["foc-curio-1
(on lot-net)"] + end + + subgraph miner_net["foc-<run-id>-lot-m-net
(Lotus Miner Network)"] + style miner_net fill:#fff3e0,stroke:#f57c00,stroke-width:2px + miner["foc-lotus-miner
(PoRep Miner)"] + end + + subgraph curio_net["foc-<run-id>-cur-m-net-1
(Curio SP 1 Network)"] + style curio_net fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px + yugabyte["foc-yugabyte
(Database)"] + curio1["foc-curio-1
(PDP Service Provider)"] + end + + %% Container to Host connections + lotus -.->|exposes| lotus_api + miner -.->|exposes| miner_api + yugabyte -.->|exposes| yugabyte_api + + %% Network connections + builder -->|uses host network| lotus + curio1 -->|same container| curio1_lot + miner -->|connects to| lotus + curio1_lot -->|connects to| lotus + yugabyte <-->|database| curio1 + + %% Styling + classDef container fill:#fff,stroke:#333,stroke-width:1px + class lotus,builder,curio1_lot,miner,yugabyte,curio1 container +``` + +**Legend:** +- **Solid lines** → Network connectivity +- **Dotted lines** → Port exposure to host +- **Boxes** → Docker networks (segregation boundaries) +- All services remain accessible from host machine despite network isolation + +**Why multiple networks (segregation purposes):** + +1. **Lotus Network (`foc--lot-net`)**: + - All components that need Lotus API access + - Provides DNS: containers can use `foc--lotus` as hostname + +2. **Lotus Miner Network (`foc--lot-m-net`)**: + - Lotus miner's isolated network + - Miner connects to Lotus daemon by name + +3. **Curio Networks (`foc--cur-m-net-N`)**: + - Each Curio SP gets its own network + - All share Yugabyte database via network membership + - Provides DNS: Curio can use `foc--yugabyte` as database host + +**Builder uses host network** (`--network host`) to access Lotus RPC at `http://localhost:1234/rpc/v1`. + +**Access from host machine:** + +Despite network segregation, you can still access all services from your host: +- Lotus API: `http://localhost:1234/rpc/v1` +- Lotus Miner API: `http://localhost:2345` +- Yugabyte Database: `postgresql://localhost:5433` +- Portainer UI: `http://localhost:5700` +- Curio instances: Dynamic ports (check `docker ps`) + +The networks only affect container-to-container communication, not host-to-container access. + +### Port Management + +**Dynamic allocation:** Ports allocated from configured range (default: 5700-5799). + +**Port Allocator:** Thread-safe sequential port assignment. + +**Port allocation order:** +1. **First port (5700):** Portainer web UI - always uses `port_range_start` +2. **Remaining ports:** Dynamically assigned to Curio instances, Yugabyte, and other services as needed + +```bash +# Configure in config.toml +port_range_start = 5700 +port_range_count = 100 +``` + +--- + +## Repository Management + +### Required Repositories + +| Repository | Default Source | Purpose | +|------------|---------------|---------| +| **lotus** | `github.com/filecoin-project/lotus:v1.34.0` | Filecoin daemon | +| **curio** | `github.com/filecoin-project/curio:pdpv0` | Storage provider (PDP) | +| **filecoin-services** | `github.com/FilOzone/filecoin-services:v1.0.0` | FOC smart contracts | +| **multicall3** | `github.com/mds1/multicall3:main` | Multicall3 contract | +| **synapse-sdk** | `github.com/FilOzone/synapse-sdk:synapse-sdk-v0.36.1` | PDP verification SDK | + +### Using Local Repositories + +**For active development:** + +```bash +foc-devnet init \ + --lotus local:/home/user/dev/lotus \ + --curio local:/home/user/dev/curio \ + --filecoin-services local:/home/user/dev/filecoin-services \ + --synapse-sdk local:/home/user/dev/synapse-sdk \ + --force +``` + +**Mixed approach:** + +```bash +foc-devnet init \ + --lotus gitbranch:master \ + --curio local:/home/user/dev/curio \ + --force +``` + +### Sharing Configuration + +**To share your exact setup with others:** + +1. **Export config:** + ```bash + cat ~/.foc-devnet/config.toml + ``` + +2. **Document versions:** + ```toml + # Lotus v1.34.0 + [lotus] + url = "https://github.com/filecoin-project/lotus.git" + tag = "v1.34.0" + + # Curio pdpv0 branch (commit: abc123) + [curio] + url = "https://github.com/filecoin-project/curio.git" + branch = "pdpv0" + + # FilOzone services v1.0.0 + [filecoin_services] + url = "https://github.com/FilOzone/filecoin-services.git" + tag = "v1.0.0" + + # Synapse SDK + [synapse_sdk] + url = "git@github.com:FilOzone/synapse-sdk.git" + tag = "synapse-sdk-v0.36.1" + ``` + +3. **Share config file:** + ```bash + # Recipient copies config + mkdir -p ~/.foc-devnet + cp shared-config.toml ~/.foc-devnet/config.toml + + # Run init to download and build + foc-devnet init + ``` + +**For reproducible builds, specify exact commits:** + +```toml +[lotus] +url = "https://github.com/filecoin-project/lotus.git" +commit = "abc123def456..." + +[curio] +url = "https://github.com/filecoin-project/curio.git" +commit = "789012345678..." +``` + +--- + +## Command Flags + +### `init` Flags + +| Flag | Type | Description | +|------|------|-------------| +| `--curio` | String | Curio source location | +| `--lotus` | String | Lotus source location | +| `--filecoin-services` | String | Filecoin Services source location | +| `--synapse-sdk` | String | Synapse SDK source location | +| `--yugabyte-url` | String | Yugabyte download URL | +| `--yugabyte-archive` | Path | Local Yugabyte archive (.tar.gz) | +| `--proof-params-dir` | Path | Local proof parameters directory | +| `--force` | Boolean | Force config regeneration | +| `--rand` | Boolean | Use random mnemonic (non-deterministic keys) | + +**Why `--force`:** Regenerates `config.toml` even if it exists. Useful when switching between configurations. + +**Why `--rand`:** Generates random keys instead of deterministic ones. Use for unique test scenarios. + +### `build` Flags + +| Flag | Type | Description | +|------|------|-------------| +| `--output-dir` | Path | Directory for built binaries (default: `~/.foc-devnet/bin`) | + +**Why `--output-dir`:** Specify custom location for compiled binaries. + +### `start` Flags + +| Flag | Type | Description | +|------|------|-------------| +| `--volumes-dir` | Path | Custom docker volumes directory | +| `--run-dir` | Path | Custom run-specific data directory | +| `--parallel` | Boolean | ⚡ **Execute steps in parallel (~40% faster, recommended)** | +| `--notest` | Boolean | Skip end-to-end Synapse tests | + +**Why `--parallel` (Recommended):** +- **⚡ Significant speedup:** Reduces startup time from ~10 min to ~6 min +- **Smart parallelization:** Steps that don't depend on each other run concurrently +- **Production-ready:** Thread-safe implementation with proper synchronization +- **Use case:** Default for most workflows, especially development iteration + +**When NOT to use `--parallel`:** +- Debugging step ordering issues +- Very low-resource machines (< 4GB RAM) +- First-time setup (sequential is easier to follow) + +**Parallel execution epochs (with `--parallel`):** + +| Epoch | Steps | Parallelized? | Why | +|-------|-------|---------------|-----| +| 1 | Lotus | No | Foundational - everything depends on it | +| 2 | Lotus Miner | No | Needs Lotus running | +| 3 | ETH Account Funding | No | Needs blockchain active | +| 4 | MockUSDFC Deploy + Multicall3 Deploy | **⚡ YES** | Independent contract deployments | +| 5 | FOC Deploy + USDFC Funding + Yugabyte | **⚡ YES** | Parallel contract work + DB startup | +| 6 | Curio SPs | No | Needs Yugabyte ready | +| 7 | PDP SP Registration | No | Needs Curio running for ports | +| 8 | Synapse E2E Test | No | Verification step | + +**Time savings:** Epochs 4 and 5 run ~40% faster in parallel mode. + +**Without `--parallel`:** All 8 epochs run sequentially (~5 minutes total). +**With `--parallel`:** Epochs 4-5 run concurrently (~3 minutes total). + +**Why `--notest`:** Skip time-consuming E2E tests when rapid iteration needed. + +**Why `--volumes-dir` / `--run-dir`:** Use custom paths (e.g., faster SSD, network storage). + +--- + +## Lifecycle Overview + +### Full Lifecycle + +``` +┌──────────┐ +│ init │ Download repos, build images, generate keys +└────┬─────┘ + │ + ▼ +┌──────────┐ +│ build │ Compile lotus and curio binaries +└────┬─────┘ + │ + ▼ +┌──────────┐ +│ start │ Launch cluster (see detailed flow below) +└────┬─────┘ + │ + ▼ +┌──────────┐ +│ [running]│ Cluster active, contracts deployed +└────┬─────┘ + │ + ▼ +┌──────────┐ +│ stop │ Stop containers, cleanup networks +└────┬─────┘ + │ + ▼ +┌──────────┐ +│ start │ Regenesis + restart (fresh blockchain) +└──────────┘ +``` + +### Detailed Start Sequence + +**1. Pre-start cleanup:** + - Stop any existing cluster + - Generate unique run ID + - Create run directories + - Perform regenesis (delete old run volumes) + +**2. Genesis prerequisites (one-time per start):** + - Generate BLS keys for prefunded accounts + - Create pre-sealed sectors + - Build genesis block configuration + +**3. Port allocation:** + - Validate port range availability + - Allocate Portainer port + - Initialize port allocator for dynamic assignment + +**4. Network creation:** + - Create Lotus network + - Create Lotus Miner network + - Create Curio networks (one per SP) + +**5. Step execution (sequential or parallel):** + + **a. Lotus Step:** + - Start Lotus daemon container + - Wait for API file + - Verify RPC connectivity + + **b. Lotus Miner Step:** + - Import pre-sealed sectors + - Initialize miner + - Start mining + + **c. ETH Account Funding Step:** + - Transfer FIL to create FEVM addresses + - Fund deployer accounts + - Wait for address activation + + **d. MockUSDFC Deploy Step:** + - Deploy ERC-20 test token + - Save contract address + + **e. USDFC Funding Step:** + - Transfer tokens to test accounts + - Fund Curio SPs + + **f. Multicall3 Deploy Step:** + - Deploy Multicall3 contract + - Save contract address + + **g. FOC Deploy Step:** + - Deploy FOC service contracts + - Deploy PDPVerifier, ServiceProviderRegistry, etc. + - Save all contract addresses + + **h. Yugabyte Step:** + - Start Yugabyte database + - Verify PostgreSQL port + + **i. Curio Step:** + - Initialize Curio database schemas + - Start N Curio SP containers + - Configure PDP endpoints + + **j. PDP SP Registration Step:** + - Register each Curio SP in registry + - Approve authorized SPs + - Save provider IDs + + **k. Synapse E2E Test Step:** + - Run end-to-end verification + - Test deal flow (unless `--notest`) + +**6. Post-start:** + - Save step context + - Display summary + - Print access URLs + +### Step Implementation Pattern + +Every step follows this trait: + +```rust +pub trait Step: Send + Sync { + fn name(&self) -> &str; + fn pre_execute(&self, context: &SetupContext) -> Result<(), Box>; + fn execute(&self, context: &SetupContext) -> Result<(), Box>; + fn post_execute(&self, context: &SetupContext) -> Result<(), Box>; + fn run(&self, context: &SetupContext) -> Result>; +} +``` + +**Phases:** +1. **Pre-execute:** Validation (check images, ports, prerequisites) +2. **Execute:** Main work (start container, deploy contract, etc.) +3. **Post-execute:** Verification (check API, confirm deployment) + +--- + +## Service Provider Examples + +### Example 1: Run 1 SP with 0 Authorized + +**Scenario:** Testing unapproved SP behavior. + +**Configuration:** +```toml +# ~/.foc-devnet/config.toml +approved_pdp_sp_count = 0 +active_pdp_sp_count = 1 +``` + +**What happens:** +- 1 Curio SP starts (PDP_SP_1) +- SP registers in ServiceProviderRegistry +- SP is **not** approved (no authorization) +- SP cannot accept storage deals +- Useful for testing rejection flows + +**Steps:** +```bash +# Edit config +vim ~/.foc-devnet/config.toml +# Set: approved_pdp_sp_count = 0, active_pdp_sp_count = 1 + +# Start cluster +foc-devnet start --parallel + +# Verify +docker ps | grep curio +# Should see: foc--curio-1 + +# Check registration +cat ~/.foc-devnet/run//pdp_sps/1.provider_id.json +# SP exists but not in approved list +``` + +### Example 2: Run 3 SPs with Top 2 Authorized + +**Scenario:** Testing mixed authorization, failover scenarios. + +**Configuration:** +```toml +# ~/.foc-devnet/config.toml +approved_pdp_sp_count = 2 +active_pdp_sp_count = 3 +``` + +**What happens:** +- 3 Curio SPs start (PDP_SP_1, PDP_SP_2, PDP_SP_3) +- PDP_SP_1 and PDP_SP_2 are approved +- PDP_SP_3 registers but is **not** approved +- First 2 SPs can accept deals, third cannot +- Useful for testing authorization policies + +**Steps:** +```bash +# Edit config +vim ~/.foc-devnet/config.toml +# Set: approved_pdp_sp_count = 2, active_pdp_sp_count = 3 + +# Start cluster +foc-devnet start + +# Verify all 3 SPs running +docker ps | grep curio +# Should see: +# foc--curio-1 +# foc--curio-2 +# foc--curio-3 + +# Check provider IDs +cat ~/.foc-devnet/run//pdp_sps/1.provider_id.json +cat ~/.foc-devnet/run//pdp_sps/2.provider_id.json +cat ~/.foc-devnet/run//pdp_sps/3.provider_id.json + +# Query registry (from builder container) +docker exec foc--builder cast call \ + \ + "isApproved(uint256)" \ + +# Returns: true + +docker exec foc--builder cast call \ + \ + "isApproved(uint256)" \ + +# Returns: false +``` + +**Testing scenarios:** +```bash +# Send deal to approved SP (should succeed) +# Send deal to unapproved SP (should fail) +# Test failover from SP1 to SP2 +# Test SP3 attempting to accept deal (should reject) +``` + +### Example 3: Maximum SPs (5) + +**Scenario:** Stress testing, load balancing. + +**Configuration:** +```toml +approved_pdp_sp_count = 5 +active_pdp_sp_count = 5 +``` + +**What happens:** +- 5 Curio SPs start (maximum supported) +- All 5 approved +- Distributed across 5 separate networks +- Each SP has own database connection +- Port allocator assigns 5 dynamic ports + +**Steps:** +```bash +# Edit config +vim ~/.foc-devnet/config.toml +# Set: approved_pdp_sp_count = 5, active_pdp_sp_count = 5 + +# Start cluster (may take longer) +foc-devnet start + +# Verify all 5 running +docker ps | grep curio +# Should see: foc--curio-{1,2,3,4,5} + +# Check networks +docker network ls | grep cur-m-net +# Should see: foc--cur-m-net-{1,2,3,4,5} + +# Monitor logs +docker logs -f foc--curio-1 +docker logs -f foc--curio-2 +# ... etc +``` + +### Querying SP Status + +```bash +# List all containers +docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" + +# Check specific SP logs +docker logs foc--curio-2 + +# Query provider IDs +cat ~/.foc-devnet/state/latest/pdp_sps/*.provider_id.json + +# Access Yugabyte (shared by all SPs) +docker exec -it foc--yugabyte ysqlsh -h localhost -p 5433 + +# Query Lotus for miner info +docker exec foc--lotus lotus state miner-info f01000 + +# Check contract via cast +docker exec foc--builder cast call \ + $(cat ~/.foc-devnet/state/latest/contract_addresses.json | jq -r .ServiceProviderRegistry) \ + "getServiceProvider(uint256)" \ + +``` + +--- + +## Troubleshooting + +### Port conflicts +```bash +# Check what's using a port +lsof -i :5700 + +# Change port range in config +vim ~/.foc-devnet/config.toml +# Set: port_range_start = 6000 +``` + +### Container won't start +```bash +# Check logs +docker logs foc--lotus + +# Check if image exists +docker images | grep foc-lotus + +# Rebuild if needed +foc-devnet init --force +``` + +### Build failures +```bash +# Check disk space +df -h + +# Clean Docker +docker system prune -a + +# Rebuild with verbose output +docker build -t foc-lotus docker/lotus/ +``` + +### Network issues +```bash +# List networks +docker network ls | grep foc + +# Inspect network +docker network inspect foc--lot-net + +# Recreate if corrupted +foc-devnet stop +docker network rm foc--lot-net +foc-devnet start +``` + +--- + +## Advanced Topics + +### Custom Genesis Block +Edit genesis templates before `start`: +```bash +# Modify sector size, block time, etc. +# (Advanced - requires understanding Filecoin genesis format) +``` + +### Monitoring with Portainer +```bash +# Access Portainer UI (uses first port in range) +http://localhost:5700 # Default +# Or: http://localhost: if you changed the config + +# Default login: admin / (set on first access) +``` + +### Accessing Lotus API +```bash +# Get API token +docker exec foc--lotus cat /root/.lotus/token + +# Query via curl +curl -X POST http://localhost:1234/rpc/v1 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer " \ + -d '{"jsonrpc":"2.0","method":"Filecoin.ChainHead","params":[],"id":1}' +``` + +### Contract Interaction +```bash +# Using cast (from builder container) +docker exec foc--builder cast send \ + --rpc-url http://localhost:1234/rpc/v1 \ + --private-key \ + \ + "transfer(address,uint256)" \ + \ + 1000000000000000000 + +# Using forge script +docker run --rm --network host \ + -v $(pwd)/scripts:/scripts \ + foc-builder forge script /scripts/MyScript.s.sol \ + --rpc-url http://localhost:1234/rpc/v1 \ + --broadcast +``` + +--- + +## Reference Links + +- **Lotus Documentation:** https://lotus.filecoin.io/ +- **Curio Documentation:** https://github.com/filecoin-project/curio +- **FEVM Documentation:** https://docs.filecoin.io/smart-contracts/ +- **Foundry Book:** https://book.getfoundry.sh/ +- **Docker Documentation:** https://docs.docker.com/ + +--- + +**Last Updated:** January 2026 diff --git a/Cargo.lock b/Cargo.lock index 808f56b..7554f8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -838,8 +838,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] -name = "foc-localnet" -version = "0.1.0" +name = "foc-devnet" +version = "1.0.0-rc.1" dependencies = [ "base32", "base64 0.21.7", @@ -858,11 +858,13 @@ dependencies = [ "hex", "indicatif", "names", + "num_cpus", "rand 0.8.5", "regex", "reqwest 0.11.27", "serde", "serde_json", + "sha2 0.10.9", "sha3", "tar", "tempfile", @@ -1066,6 +1068,12 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + [[package]] name = "hex" version = "0.4.3" @@ -1647,6 +1655,16 @@ dependencies = [ "autocfg", ] +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + [[package]] name = "num_enum" version = "0.7.5" diff --git a/Cargo.toml b/Cargo.toml index 23213a3..800c1ef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "foc-localnet" -version = "0.1.0" +name = "foc-devnet" +version = "1.0.0-rc.1" edition = "2021" [build-dependencies] @@ -34,4 +34,6 @@ flate2 = "1.0" tar = "0.4" reqwest = { version = "0.11", features = ["blocking"] } tracing = "0.1" -tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] } \ No newline at end of file +tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] } +sha2 = "0.10" +num_cpus = "1.16" \ No newline at end of file diff --git a/README.md b/README.md index ab47a91..84bf7f9 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# foc-localnet +# foc-devnet **Run a local Filecoin network with FOC (Filecoin Onchain Contracts) in minutes.** @@ -10,6 +10,14 @@ A developer-friendly tool for spinning up complete Filecoin test networks with s Get up and running in three simple steps: +### Step 0: Ensure non-root user +`foc-devnet` requires itself to be run by a non-root user. Please ensure that you are running as a non-root user which is part of `docker` group. + +Run the following to see your User ID and groups you are a part of: +``` +echo $(id -u); groups | grep 'docker' +``` + ### Step 1: Initialize ```bash @@ -48,7 +56,7 @@ This will: ### Step 3: Start the Network ```bash -cargo run -- start +cargo run -- start --parallel ``` This will: @@ -56,9 +64,9 @@ This will: - Start Lotus daemon with FEVM enabled - Deploy FOC smart contracts (including MockUSDFC) - Start storage provider(s) -- Launch Portainer UI for container management +- Launch [Portainer UI](https://docs.docksal.io/use-cases/portainer/) for container management -**If you are feeling adventurous**: Use `cargo run -- start --parallel`, an experimental feature that attempts to parallelize setup steps as much as possible. +**If you are have troubles**: Use `cargo run -- start`, removing parallelism during start, this may take longer. **That's it!** Your local Filecoin network is running. @@ -88,7 +96,7 @@ Each can be: ### 🔒 Deterministic Setup - **Pinned versions**: All components use specific git tags/commits for reproducibility - **Deterministic keys**: Uses fixed seeds, generating the same keys on every setup -- **Consistent addresses**: Find derived accounts in `~/.foc-localnet/state/addresses.json` +- **Consistent state**: Each run preserves its context in `~/.foc-devnet/run//step_context.json` ### 🤖 Fully Automated From building Docker images to deploying contracts—everything is automated: @@ -101,12 +109,13 @@ From building Docker images to deploying contracts—everything is automated: Built with modular steps for easy extension and customization: - Add custom deployment steps - Configure multiple PDP service providers -- Control "allowed" SP nodes via `~/.foc-localnet/config.toml` +- Control "allowed" SP nodes via `~/.foc-devnet/config.toml` ### 📜 Programmable Built for scripting and automation: -- **Contract addresses**: `~/.foc-localnet/state/contract_addresses.json` -- **Account addresses**: `~/.foc-localnet/state/addresses.json` +- **Contract addresses**: `~/.foc-devnet/run//contract_addresses.json` +- **Step context**: `~/.foc-devnet/run//step_context.json` +- **Latest run symlink**: `~/.foc-devnet/state/latest/` → points to most recent run - Write scripts for testing, demos, CI/CD pipelines, etc. ### 🌐 Isolated Networks @@ -133,34 +142,21 @@ Bundled with Portainer for browser-based Docker management—no terminal wizardr --- -## 📂 Where's My Data? - -Everything lives in `~/.foc-localnet/`: - -``` -~/.foc-localnet/ -├── state/ -│ ├── addresses.json # Derived account addresses -│ └── contract_addresses.json # Deployed smart contracts -├── artifacts/ -│ └── docker/volumes/ # Persistent container data -├── logs/ # Container logs -├── repos/ # Cloned Git repositories -└── config.toml # Configuration -``` - ---- - ## 🛠️ Need More? -For advanced topics like: -- Custom repository configurations -- Multiple storage provider setups -- Architecture deep-dives -- Troubleshooting guides -- API access and scripting - -See **[README_ADVANCED.md](README_ADVANCED.md)** for detailed documentation (coming soon). +See **[ADVANCED_README.md](ADVANCED_README.md)** for comprehensive documentation on: +- **All commands reference** (init, build, start, stop, status, version) +- **Configuration system** (config.toml structure, parameters, editing) +- **Complete directory structure** (what's stored where and why) +- **Resetting and cleanup** (manual cleanup, disk management) +- **Run ID and Step Context** (isolation mechanism, state sharing) +- **Docker and networking** (container architecture, network topology, Portainer debugging) +- **Repository management** (using local repos, sharing configurations) +- **Command flags** (detailed explanations of all flags and when to use them) +- **Lifecycle overview** (full startup sequence, step implementation) +- **Service Provider examples** (1 SP with 0 authorized, 3 SPs with top 2 authorized, etc.) +- **Troubleshooting guides** (port conflicts, build failures, network issues) +- **Advanced topics** (custom genesis, Lotus API access, contract interaction) --- @@ -172,4 +168,4 @@ MIT License - see [LICENSE](LICENSE) file for details. ## 💬 Support -- **Issues**: [GitHub Issues](https://github.com/FilOzone/foc-localnet/issues) +- **Issues**: [GitHub Issues](https://github.com/FilOzone/foc-devnet/issues) diff --git a/build.rs b/build.rs index 6774e9b..11289ff 100644 --- a/build.rs +++ b/build.rs @@ -80,6 +80,15 @@ fn generate_mockusdfc_archive() { return; } + // Ensure artifacts directory exists + let artifacts_dir = Path::new("artifacts"); + if !artifacts_dir.exists() { + if let Err(e) = std::fs::create_dir_all(artifacts_dir) { + eprintln!("Warning: Failed to create artifacts directory: {}", e); + return; + } + } + // Create tar.gz archive in contracts/ directory let output = Command::new("tar") .args(["-czf", "artifacts/MockUSDFC.tar.gz", "contracts/MockUSDFC"]) diff --git a/contracts/README.md b/contracts/README.md index b1ae19d..48c742a 100644 --- a/contracts/README.md +++ b/contracts/README.md @@ -17,7 +17,7 @@ MockUSDFC is a toy implementation of the USDFC (USD Filecoin) token used in prod - Mintable by owner - Simple access control (owner can mint) -## Usage in foc-localnet +## Usage in foc-devnet The MockUSDFC token is automatically deployed during the FOC deployment step when starting the local network: diff --git a/docker/builder/Dockerfile b/docker/builder/Dockerfile index 4d926fe..eccebb0 100644 --- a/docker/builder/Dockerfile +++ b/docker/builder/Dockerfile @@ -25,10 +25,11 @@ ENV PATH=$PATH:/usr/local/go/bin # Create foc-user and foc-group with matching host UID/GID when it is run. The 1002 below is just a placeholder which will be replaced during build process. See `docker.rs` ARG USER_ID=1002 ARG GROUP_ID=1002 +RUN userdel ubuntu || true RUN groupadd -g ${GROUP_ID} foc-group && \ useradd -l -u ${USER_ID} -g foc-group -m -s /bin/bash foc-user && \ mkdir -p /home/foc-user/go/pkg && \ - chown -R foc-user:foc-group /home/foc-user/go + chown -R foc-user:foc-group /home/foc-user # Install Rust as foc-user USER foc-user diff --git a/docker/builder/volumes_map.toml b/docker/builder/volumes_map.toml index 6541fca..a43621a 100644 --- a/docker/builder/volumes_map.toml +++ b/docker/builder/volumes_map.toml @@ -1,6 +1,6 @@ # Volume mappings for builder Docker image # Format: host_subdirectory = "container_path" -# Host paths will be relative to ~/.foc-localnet/artifacts/docker/volumes/builder/ +# Host paths will be relative to ~/.foc-devnet/artifacts/docker/volumes/builder/ [volumes] cargo = "/home/foc-user/.cargo" diff --git a/docker/curio/Dockerfile b/docker/curio/Dockerfile index 25ba250..36777b5 100644 --- a/docker/curio/Dockerfile +++ b/docker/curio/Dockerfile @@ -19,6 +19,7 @@ RUN apt-get update && apt-get install -y \ # Create foc-user and foc-group with matching host UID/GID when it is run. The 1002 below is just a placeholder which will be replaced during build process. See `docker.rs` ARG USER_ID=1002 ARG GROUP_ID=1002 +RUN userdel ubuntu || true RUN groupadd -g ${GROUP_ID} foc-group && \ useradd -l -u ${USER_ID} -g foc-group -m -s /bin/bash foc-user @@ -34,7 +35,7 @@ RUN mkdir -p /var/tmp/filecoin-proof-parameters \ USER foc-user # Copy binaries will be mounted from host -# Binary will be mounted from ~/.foc-localnet/bin +# Binary will be mounted from ~/.foc-devnet/bin # Expose curio ports # 12300 - Curio API diff --git a/docker/curio/volumes_map.toml b/docker/curio/volumes_map.toml index 361d97a..dfdbb33 100644 --- a/docker/curio/volumes_map.toml +++ b/docker/curio/volumes_map.toml @@ -1,6 +1,6 @@ # Volume mappings for curio Docker image # Format: host_subdirectory = "container_path" -# Host paths will be relative to ~/.foc-localnet/artifacts/docker/volumes/curio/ +# Host paths will be relative to ~/.foc-devnet/artifacts/docker/volumes/curio/ [volumes] # Curio repository data diff --git a/docker/lotus-miner/Dockerfile b/docker/lotus-miner/Dockerfile index 56f2b35..21a72ab 100644 --- a/docker/lotus-miner/Dockerfile +++ b/docker/lotus-miner/Dockerfile @@ -18,6 +18,7 @@ RUN apt-get update && apt-get install -y \ # Create foc-user and foc-group with matching host UID/GID when it is run. The 1002 below is just a placeholder which will be replaced during build process. See `docker.rs` ARG USER_ID=1002 ARG GROUP_ID=1002 +RUN userdel ubuntu || true RUN groupadd -g ${GROUP_ID} foc-group && \ useradd -l -u ${USER_ID} -g foc-group -m -s /bin/bash foc-user @@ -33,7 +34,7 @@ RUN mkdir -p /var/tmp/filecoin-proof-parameters \ USER foc-user # Copy binaries will be mounted from host -# Binary will be mounted from ~/.foc-localnet/bin +# Binary will be mounted from ~/.foc-devnet/bin # Expose lotus-miner ports # 2345 - Lotus Miner API diff --git a/docker/lotus-miner/volumes_map.toml b/docker/lotus-miner/volumes_map.toml index 1e8a058..75641ab 100644 --- a/docker/lotus-miner/volumes_map.toml +++ b/docker/lotus-miner/volumes_map.toml @@ -1,6 +1,6 @@ # Volume mappings for lotus-miner Docker image # Format: host_subdirectory = "container_path" -# Host paths will be relative to ~/.foc-localnet/artifacts/docker/volumes/lotus-miner/ +# Host paths will be relative to ~/.foc-devnet/artifacts/docker/volumes/lotus-miner/ [volumes] # Lotus miner repository data diff --git a/docker/lotus/Dockerfile b/docker/lotus/Dockerfile index 52e9488..3c56c63 100644 --- a/docker/lotus/Dockerfile +++ b/docker/lotus/Dockerfile @@ -20,6 +20,7 @@ RUN apt-get update && apt-get install -y \ # Create foc-user and foc-group with matching host UID/GID when it is run. The 1002 below is just a placeholder which will be replaced during build process. See `docker.rs` ARG USER_ID=1002 ARG GROUP_ID=1002 +RUN userdel ubuntu || true RUN groupadd -g ${GROUP_ID} foc-group && \ useradd -l -u ${USER_ID} -g foc-group -m -s /bin/bash foc-user @@ -34,7 +35,7 @@ RUN mkdir -p /var/tmp/filecoin-proof-parameters \ USER foc-user # Copy the lotus binary from the host (will be mounted) -# Binary will be mounted from ~/.foc-localnet/bin +# Binary will be mounted from ~/.foc-devnet/bin # Expose lotus daemon ports # 1234 - Lotus API diff --git a/docker/lotus/volumes_map.toml b/docker/lotus/volumes_map.toml index eb29a47..a58192b 100644 --- a/docker/lotus/volumes_map.toml +++ b/docker/lotus/volumes_map.toml @@ -1,6 +1,6 @@ # Volume mappings for lotus Docker image # Format: host_subdirectory = "container_path" -# Host paths will be relative to ~/.foc-localnet/artifacts/docker/volumes/lotus/ +# Host paths will be relative to ~/.foc-devnet/artifacts/docker/volumes/lotus/ [volumes] # Lotus repository data diff --git a/docker/yugabyte/Dockerfile b/docker/yugabyte/Dockerfile index e1433da..5061a1f 100644 --- a/docker/yugabyte/Dockerfile +++ b/docker/yugabyte/Dockerfile @@ -17,6 +17,7 @@ ENV LC_ALL=en_US.UTF-8 # Create foc-user and foc-group with matching host UID/GID when it is run. The 1002 below is just a placeholder which will be replaced during build process. See `docker.rs` ARG USER_ID=1002 ARG GROUP_ID=1002 +RUN userdel ubuntu || true RUN groupadd -g ${GROUP_ID} foc-group && \ useradd -l -u ${USER_ID} -g foc-group -m -s /bin/bash foc-user diff --git a/docker/yugabyte/volumes_map.toml b/docker/yugabyte/volumes_map.toml index dc2dd57..be4566f 100644 --- a/docker/yugabyte/volumes_map.toml +++ b/docker/yugabyte/volumes_map.toml @@ -1,6 +1,6 @@ # Volume mappings for foc-yugabyte Docker image # Format: host_subdirectory = "container_path" -# Host paths will be relative to ~/.foc-localnet/artifacts/docker/volumes/foc-yugabyte/ +# Host paths will be relative to ~/.foc-devnet/artifacts/docker/volumes/foc-yugabyte/ [volumes] data = "/var/ybdata" diff --git a/scripts/cache-artifacts.sh b/scripts/cache-artifacts.sh index 965f520..46d8d88 100755 --- a/scripts/cache-artifacts.sh +++ b/scripts/cache-artifacts.sh @@ -1,13 +1,13 @@ #!/bin/bash # cache-artifacts.sh - Helper script to cache large artifacts for fast development iteration # -# This script copies pre-downloaded artifacts from ~/stash/ to ~/.foc-localnet/artifacts/ +# This script copies pre-downloaded artifacts from ~/stash/ to ~/.foc-devnet/artifacts/ # to avoid re-downloading them during init. set -e STASH_DIR="${STASH_DIR:-$HOME/stash}" -FOC_ARTIFACTS="$HOME/.foc-localnet/artifacts" +FOC_ARTIFACTS="$HOME/.foc-devnet/artifacts" FOC_VOLUMES="$FOC_ARTIFACTS/docker/volumes" echo "🚀 Caching artifacts for fast init..." diff --git a/scripts/patch-lotus-buildconstants.sh b/scripts/patch-lotus-buildconstants.sh index 10d9e45..dac4a4a 100755 --- a/scripts/patch-lotus-buildconstants.sh +++ b/scripts/patch-lotus-buildconstants.sh @@ -140,7 +140,7 @@ const BootstrapPeerThreshold = 1 // ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint. // As per https://github.com/ethereum-lists/chains -const Eip155ChainId = 1414 +const Eip155ChainId = 31415926 var WhitelistedBlock = cid.Undef diff --git a/src/app.rs b/src/app.rs index 16cf260..bfb6190 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,10 +1,10 @@ /// Initialize the application environment. /// /// This function is now a no-op since comprehensive initialization -/// is handled by the `foc-localnet init` command. Other commands +/// is handled by the `foc-devnet init` command. Other commands /// assume that `init` has been run and the environment is properly set up. pub fn initialize_app() -> Result<(), Box> { - // Initialization is now handled by the `foc-localnet init` command + // Initialization is now handled by the `foc-devnet init` command // Other commands assume init has been run Ok(()) } diff --git a/src/cli.rs b/src/cli.rs index 32e3204..3941519 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -1,9 +1,9 @@ use clap::{Parser, Subcommand}; use std::path::PathBuf; -/// CLI structure for foc-localnet +/// CLI structure for foc-devnet #[derive(Parser)] -#[command(name = "foc-localnet")] +#[command(name = "foc-devnet")] #[command(about = "CLI for managing local filecoin-onchain-cloud cluster")] pub struct Cli { #[command(subcommand)] @@ -30,7 +30,7 @@ pub enum Commands { }, /// Stop the local cluster Stop, - /// Initialize foc-localnet by building and caching Docker images + /// Initialize foc-devnet by building and caching Docker images Init { /// Curio source location (e.g., 'gittag:tag', 'gittag:url:tag', 'gitcommit:commit', 'gitcommit:url:commit', 'gitbranch:branch', 'gitbranch:url:branch', 'local:/path/to/curio') #[arg(long)] @@ -59,13 +59,16 @@ pub enum Commands { /// Use random mnemonic instead of deterministic one #[arg(long)] rand: bool, + /// Skip building Docker images (useful when images are already cached) + #[arg(long)] + no_docker_build: bool, }, /// Build Filecoin projects in a container Build { #[command(subcommand)] build_command: BuildCommands, }, - /// Show status of the foc-localnet system + /// Show status of the foc-devnet system Status, /// Show version information Version, diff --git a/src/commands/build/docker.rs b/src/commands/build/docker.rs index acf3c61..b13e873 100644 --- a/src/commands/build/docker.rs +++ b/src/commands/build/docker.rs @@ -7,7 +7,7 @@ use crate::docker::{ core::{get_current_gid, get_current_uid, image_exists}, }; use crate::embedded_assets; -use crate::paths::foc_localnet_docker_volumes_cache; +use crate::paths::foc_devnet_docker_volumes_cache; use std::collections::HashMap; use std::fs; use tracing::info; @@ -16,7 +16,7 @@ use super::Project; /// Build the builder Docker image. pub fn build_builder_image(dockerfile_dir: &str) -> Result> { - let image_tag = "foc-localnet-builder:latest"; + let image_tag = crate::constants::BUILDER_DOCKER_IMAGE; // Check if image already exists in Docker if image_exists(image_tag)? { @@ -80,15 +80,16 @@ pub fn setup_docker_run_args( let container_output_dir = "/workspace/output"; // Give each project a unique container name so they can build simultaneously - let container_name = format!("foc-builder-{}", project); + let container_name = format!("{}-{}", crate::constants::BUILDER_CONTAINER, project); let mut docker_run_args = vec![ "run".to_string(), - "--rm".to_string(), "-u".to_string(), "foc-user".to_string(), "--name".to_string(), container_name, + "-e".to_string(), + "HOME=/home/foc-user".to_string(), "-v".to_string(), format!("{}:{}", source_dir, container_source_dir), "-v".to_string(), @@ -98,8 +99,8 @@ pub fn setup_docker_run_args( // Load and apply volume mappings for this image let volume_map = load_volume_map("builder")?; if !volume_map.is_empty() { - let cache_dir = foc_localnet_docker_volumes_cache(); - let image_volumes_dir = cache_dir.join("foc-builder"); + let cache_dir = foc_devnet_docker_volumes_cache(); + let image_volumes_dir = cache_dir.join(crate::constants::BUILDER_CONTAINER); for (host_subdir, container_path) in volume_map { let host_path = image_volumes_dir.join(&host_subdir); diff --git a/src/commands/build/execution.rs b/src/commands/build/exec.rs similarity index 84% rename from src/commands/build/execution.rs rename to src/commands/build/exec.rs index ed171ea..11d3699 100644 --- a/src/commands/build/execution.rs +++ b/src/commands/build/exec.rs @@ -10,7 +10,11 @@ use std::fs::OpenOptions; use std::io::{BufRead, BufReader, Write}; use std::path::Path; use std::process::{Command, Stdio}; -use tracing::{error, info}; +use tracing::info; +use tracing::warn; + +/// Time to wait for build container initialization (in seconds) +const BUILD_INIT_WAIT_SECS: u64 = 5; /// Run the build process inside the Docker container. pub fn run_build_in_container( @@ -67,19 +71,23 @@ pub fn execute_build_process( let stdout = child.stdout.take().ok_or("Failed to capture stdout")?; let stderr = child.stderr.take().ok_or("Failed to capture stderr")?; - // Create clones of the log file for writing - let log_file_clone = OpenOptions::new() + // Create log file + let log_file = OpenOptions::new() .create(true) .append(true) .open(log_path)?; + info!("NOTE: StdErr output does not necessarily indicate failure"); + + std::thread::sleep(std::time::Duration::from_secs(BUILD_INIT_WAIT_SECS)); + // Stream stdout to both console and log file let stdout_handle = std::thread::spawn({ - let mut log_file = log_file_clone; + let mut log_file = log_file.try_clone()?; move || { let reader = BufReader::new(stdout); - for line in reader.lines().flatten() { - info!("{}", line); + for line in reader.lines().map_while(Result::ok) { + info!("(stdout): {}", line); writeln!(log_file, "{}", line).ok(); } } @@ -87,14 +95,11 @@ pub fn execute_build_process( // Stream stderr to both console and log file let stderr_handle = std::thread::spawn({ - let mut log_file = OpenOptions::new() - .create(true) - .append(true) - .open(log_path)?; + let mut log_file = log_file.try_clone()?; move || { let reader = BufReader::new(stderr); - for line in reader.lines().flatten() { - error!("{}", line); + for line in reader.lines().map_while(Result::ok) { + warn!("(stderr): {}", line); writeln!(log_file, "{}", line).ok(); } } diff --git a/src/commands/build/logging.rs b/src/commands/build/logging.rs index ac2f700..b98996a 100644 --- a/src/commands/build/logging.rs +++ b/src/commands/build/logging.rs @@ -2,13 +2,13 @@ //! //! This module handles creation and management of build log files. -use crate::paths::foc_localnet_logs; +use crate::paths::foc_devnet_logs; use std::fs; use std::path::PathBuf; /// Create a timestamped log file path for build logs. pub fn create_build_log_path() -> Result> { - let logs_dir = foc_localnet_logs().join("build"); + let logs_dir = foc_devnet_logs().join("build"); fs::create_dir_all(&logs_dir)?; let timestamp = chrono::Local::now().format("%Y%m%d_%H%M%S"); diff --git a/src/commands/build/mod.rs b/src/commands/build/mod.rs index 4d294e2..1ec04e0 100644 --- a/src/commands/build/mod.rs +++ b/src/commands/build/mod.rs @@ -3,18 +3,18 @@ //! This module handles building Filecoin projects (Lotus and Curio) in a Docker container. pub mod docker; -pub mod execution; +pub mod exec; pub mod logging; pub mod repository; use crate::config::Config; -use crate::paths::foc_localnet_bin; +use crate::paths::foc_devnet_bin; use repository::prepare_repository; use std::fs; use tracing::info; use self::docker::build_builder_image; -use self::execution::run_build_in_container; +use self::exec::run_build_in_container; /// Build a project in a Docker container. /// @@ -34,7 +34,7 @@ pub fn build_project(project: &Project, config: &Config) -> Result<(), Box Result> { let repo_path = match project { - Project::Lotus => foc_localnet_lotus_repo(), - Project::Curio => foc_localnet_curio_repo(), + Project::Lotus => foc_devnet_lotus_repo(), + Project::Curio => foc_devnet_curio_repo(), }; info!( diff --git a/src/commands/config.rs b/src/commands/config.rs index 648c006..4a370d1 100644 --- a/src/commands/config.rs +++ b/src/commands/config.rs @@ -1,10 +1,10 @@ //! Configuration management commands. //! -//! This module provides commands for updating the foc-localnet configuration, +//! This module provides commands for updating the foc-devnet configuration, //! specifically for changing the source locations of Lotus and Curio components. use crate::config::{Config, Location}; -use crate::paths::foc_localnet_config; +use crate::paths::foc_devnet_config; use std::fs; use tracing::info; @@ -52,7 +52,7 @@ fn update_config_location( default_url: &str, ) -> Result<(), Box> { // Load existing config - let config_path = foc_localnet_config(); + let config_path = foc_devnet_config(); let config_content = fs::read_to_string(&config_path) .map_err(|e| format!("Failed to read config file at {:?}: {}", config_path, e))?; let mut config: Config = toml::from_str(&config_content) diff --git a/src/commands/init/artifacts.rs b/src/commands/init/artifacts.rs index 0e2c899..3a3500d 100644 --- a/src/commands/init/artifacts.rs +++ b/src/commands/init/artifacts.rs @@ -1,4 +1,4 @@ -//! Artifact download utilities for foc-localnet initialization. +//! Artifact download utilities for foc-devnet initialization. //! //! This module handles the downloading and extraction of required artifacts, //! primarily the Yugabyte database. @@ -11,9 +11,9 @@ use std::process::Command; use tracing::info; use crate::config::Config; -use crate::paths::{foc_localnet_artifacts, foc_localnet_config, foc_localnet_proof_parameters}; +use crate::paths::{foc_devnet_artifacts, foc_devnet_config, foc_devnet_proof_parameters}; -/// Download required artifacts for foc-localnet. +/// Download required artifacts for foc-devnet. /// /// This function downloads Yugabyte database and extracts it to the /// artifacts directory, or copies from local paths if provided. @@ -31,7 +31,7 @@ pub fn download_artifacts( info!("Downloading artifacts..."); // Ensure artifacts directory exists - let artifacts_dir = foc_localnet_artifacts(); + let artifacts_dir = foc_devnet_artifacts(); fs::create_dir_all(&artifacts_dir)?; // Handle Yugabyte @@ -39,7 +39,7 @@ pub fn download_artifacts( copy_yugabyte_from_local(&archive_path, &artifacts_dir)?; } else { // Load configuration to get download URL - let config_path = foc_localnet_config(); + let config_path = foc_devnet_config(); let config_content = fs::read_to_string(&config_path) .map_err(|e| format!("Failed to read config file at {:?}: {}", config_path, e))?; let config: Config = toml::from_str(&config_content) @@ -252,7 +252,7 @@ fn copy_proof_params_from_local(params_path: &str) -> Result<(), Box, force: bool, ) -> Result<(), Box> { - let config_path = foc_localnet_config(); + let config_path = foc_devnet_config(); if config_path.exists() && !force { info!("Config file already exists: {}", config_path.display()); diff --git a/src/commands/init/directories.rs b/src/commands/init/directories.rs index ca20039..d1af75c 100644 --- a/src/commands/init/directories.rs +++ b/src/commands/init/directories.rs @@ -1,18 +1,18 @@ -//! Directory creation utilities for foc-localnet initialization. +//! Directory creation utilities for foc-devnet initialization. //! //! This module handles the creation of all necessary directories required -//! for foc-localnet to function properly. +//! for foc-devnet to function properly. use std::fs; use tracing::info; use crate::paths::{ - foc_localnet_artifacts, foc_localnet_bin, foc_localnet_code, foc_localnet_docker_volumes, - foc_localnet_docker_volumes_cache, foc_localnet_docker_volumes_run_specific_root, - foc_localnet_home, foc_localnet_keys, foc_localnet_runs, foc_localnet_state, + foc_devnet_artifacts, foc_devnet_bin, foc_devnet_code, foc_devnet_docker_volumes, + foc_devnet_docker_volumes_cache, foc_devnet_docker_volumes_run_specific_root, foc_devnet_home, + foc_devnet_keys, foc_devnet_runs, foc_devnet_state, }; -/// Create all necessary directories for foc-localnet. +/// Create all necessary directories for foc-devnet. /// /// # Returns /// Returns `Ok(())` if all directories are created successfully, or an error if creation fails. @@ -20,16 +20,16 @@ pub fn create_directories() -> Result<(), Box> { info!("Creating necessary directories..."); let directories = vec![ - foc_localnet_home(), - foc_localnet_runs(), - foc_localnet_bin(), - foc_localnet_state(), - foc_localnet_keys(), - foc_localnet_code(), - foc_localnet_artifacts(), - foc_localnet_docker_volumes(), - foc_localnet_docker_volumes_cache(), - foc_localnet_docker_volumes_run_specific_root(), + foc_devnet_home(), + foc_devnet_runs(), + foc_devnet_bin(), + foc_devnet_state(), + foc_devnet_keys(), + foc_devnet_code(), + foc_devnet_artifacts(), + foc_devnet_docker_volumes(), + foc_devnet_docker_volumes_cache(), + foc_devnet_docker_volumes_run_specific_root(), ]; for dir in directories { diff --git a/src/commands/init/keys.rs b/src/commands/init/keys.rs index 548a3c5..db151b7 100644 --- a/src/commands/init/keys.rs +++ b/src/commands/init/keys.rs @@ -1,11 +1,11 @@ -//! Key generation and management for foc-localnet addresses. +//! Key generation and management for foc-devnet addresses. //! //! This module handles generating deterministic addresses and private keys -//! for various components of the foc-localnet system using HD wallet derivation. +//! for various components of the foc-devnet system using HD wallet derivation. use crate::{ commands::start::FEVM_ACCOUNTS_PREFUNDED, crypto::mnemonic::store_mnemonic, - paths::foc_localnet_keys, + paths::foc_devnet_keys, }; use bip39::{Language, Mnemonic}; use serde::{Deserialize, Serialize}; @@ -47,7 +47,7 @@ pub const STATIC_MNEMONIC_ENTROPY: [u8; 32] = [ pub const NATIVE_KEYS: [&str; 3] = ["BLS_SIGNER_1", "BLS_SIGNER_2", "GLOBAL_FIL_FAUCET"]; -/// Generate all required keys for foc-localnet. +/// Generate all required keys for foc-devnet. /// /// This function generates keys for: /// - BLS_SIGNER_1 (t3 address) @@ -113,9 +113,9 @@ pub fn generate_keys(use_random: bool) -> Result, Box Result<(), Box> { - let keys_dir = foc_localnet_keys(); + let keys_dir = foc_devnet_keys(); fs::create_dir_all(&keys_dir)?; let keys_file = keys_dir.join("addresses.json"); let json = serde_json::to_string_pretty(keys)?; @@ -124,9 +124,9 @@ fn save_keys(keys: &[KeyInfo]) -> Result<(), Box> { Ok(()) } -/// Load keys from ~/.foc-localnet/keys/addresses.json +/// Load keys from ~/.foc-devnet/keys/addresses.json pub fn load_keys() -> Result, Box> { - let keys_dir = foc_localnet_keys(); + let keys_dir = foc_devnet_keys(); let keys_file = keys_dir.join("addresses.json"); let json = fs::read_to_string(keys_file)?; let keys: Vec = serde_json::from_str(&json)?; diff --git a/src/commands/init/mod.rs b/src/commands/init/mod.rs index cc1f247..0098d7b 100644 --- a/src/commands/init/mod.rs +++ b/src/commands/init/mod.rs @@ -1,6 +1,6 @@ //! Init command implementation. //! -//! This module handles comprehensive initialization of foc-localnet including: +//! This module handles comprehensive initialization of foc-devnet including: //! - Creating all necessary directories //! - Generating default configuration //! - Setting up PATH variables in shell configs @@ -18,66 +18,72 @@ pub mod repositories; use tracing::{info, warn}; -/// Clean up previous foc-localnet installation. +/// Clean up previous foc-devnet installation. /// -/// This function removes the entire ~/.foc-localnet directory and all -/// previously built Docker images to ensure a clean slate for initialization. +/// Removes the entire ~/.foc-devnet directory and optionally all foc-* Docker +/// images to ensure a clean slate for initialization. +/// +/// # Arguments +/// * `remove_images` - When true, also remove cached foc-* Docker images. This +/// should stay false when callers rely on preloaded images (e.g., CI caches). /// /// # Returns /// Returns `Ok(())` if cleanup succeeds, or an error if cleanup fails. -fn cleanup_previous_installation() -> Result<(), Box> { - use crate::paths::foc_localnet_home; +fn cleanup_previous_installation(remove_images: bool) -> Result<(), Box> { + use crate::paths::foc_devnet_home; use std::process::Command; info!("Cleaning up previous installation..."); - // Remove the entire foc-localnet home directory - let home_dir = foc_localnet_home(); + // Remove the entire foc-devnet home directory + let home_dir = foc_devnet_home(); if home_dir.exists() { info!("Removing {}", home_dir.display()); std::fs::remove_dir_all(&home_dir)?; - info!("Removed previous foc-localnet installation"); + info!("Removed previous foc-devnet installation"); } else { info!("No previous installation found"); } - // Remove all foc-localnet Docker images - info!("Removing existing foc-localnet Docker images"); - let output = Command::new("docker") - .args(["images", "--format", "{{.Repository}}:{{.Tag}}"]) - .output()?; - - if output.status.success() { - let stdout = String::from_utf8_lossy(&output.stdout); - let mut removed_count = 0; - - for line in stdout.lines() { - if line.starts_with("foc-") { - // Remove the image - let remove_output = Command::new("docker").args(["rmi", line]).output()?; - - if remove_output.status.success() { - removed_count += 1; + // Optionally remove foc-devnet Docker images + if remove_images { + info!("Removing existing foc-devnet Docker images"); + let output = Command::new("docker") + .args(["images", "--format", "{{.Repository}}:{{.Tag}}"]) + .output()?; + + if output.status.success() { + let stdout = String::from_utf8_lossy(&output.stdout); + let mut removed_count = 0; + + for line in stdout.lines() { + if line.starts_with("foc-") { + // Remove the image + let remove_output = Command::new("docker").args(["rmi", line]).output()?; + + if remove_output.status.success() { + removed_count += 1; + } } } - } - if removed_count > 0 { - info!("Removed {} Docker image(s)", removed_count); + if removed_count > 0 { + info!("Removed {} Docker image(s)", removed_count); + } else { + info!("No foc-devnet Docker images found"); + } } else { - info!("No foc-localnet Docker images found"); + warn!("Could not list Docker images (Docker may not be running)"); } - } else { - warn!("Could not list Docker images (Docker may not be running)"); } Ok(()) } -/// Initialize foc-localnet comprehensively. +/// Initialize foc-devnet comprehensively. /// /// This command performs complete initialization: -/// 1. Cleans up previous installation (removes ~/.foc-localnet and docker images) +/// 1. Cleans up previous installation (removes ~/.foc-devnet and docker images) /// 2. Creates all necessary directories /// 3. Generates default config.toml /// 4. Sets up PATH variables in shell configs @@ -94,40 +100,46 @@ fn cleanup_previous_installation() -> Result<(), Box> { /// * `proof_params_dir` - Optional path to local filecoin-proof-params directory /// * `force` - Whether to force regeneration of config file /// * `use_random_mnemonic` - Whether to use random mnemonic for key generation +/// * `no_docker_build` - Whether to skip artifact downloads and Docker image builds (use when images are already cached) /// /// # Returns /// Returns `Ok(())` on successful initialization, or an error if any step fails. -pub fn init_environment( - curio_location: Option, - lotus_location: Option, - filecoin_services_location: Option, - synapse_sdk_location: Option, - yugabyte_url: Option, - yugabyte_archive: Option, - proof_params_dir: Option, - force: bool, - use_random_mnemonic: bool, -) -> Result<(), Box> { - info!("Initializing foc-localnet environment..."); +/// Options for environment initialization +pub struct InitOptions { + pub curio_location: Option, + pub lotus_location: Option, + pub filecoin_services_location: Option, + pub synapse_sdk_location: Option, + pub yugabyte_url: Option, + pub yugabyte_archive: Option, + pub proof_params_dir: Option, + pub force: bool, + pub use_random_mnemonic: bool, + pub no_docker_build: bool, +} + +pub fn init_environment(options: InitOptions) -> Result<(), Box> { + info!("Initializing foc-devnet environment..."); // Clean up previous installation - cleanup_previous_installation()?; + // Preserve cached Docker images when --no-docker-build is used (CI cache path) + cleanup_previous_installation(!options.no_docker_build)?; // Create all necessary directories directories::create_directories()?; // Generate default configuration config::generate_default_config( - curio_location.clone(), - lotus_location.clone(), - filecoin_services_location.clone(), - synapse_sdk_location.clone(), - yugabyte_url.clone(), - force, + options.curio_location.clone(), + options.lotus_location.clone(), + options.filecoin_services_location.clone(), + options.synapse_sdk_location.clone(), + options.yugabyte_url.clone(), + options.force, )?; // Generate keys - keys::generate_keys(use_random_mnemonic)?; + keys::generate_keys(options.use_random_mnemonic)?; // Set up PATH variables path_setup::setup_path_variables()?; @@ -135,14 +147,19 @@ pub fn init_environment( // Download code repositories repositories::download_code_repositories()?; - // Download required artifacts (or copy from local paths) - artifacts::download_artifacts(yugabyte_archive, proof_params_dir)?; + // Download required artifacts and build Docker images (unless skipped) + if options.no_docker_build { + info!("Skipping artifact downloads and Docker image builds (--no-docker-build flag set)"); + } else { + // Download required artifacts (or copy from local paths) + artifacts::download_artifacts(options.yugabyte_archive, options.proof_params_dir)?; - // Build and cache Docker images - crate::docker::build::build_and_cache_docker_images()?; + // Build and cache Docker images + crate::docker::build::build_and_cache_docker_images()?; + } info!("✓ Initialization completed successfully"); - info!("You can now start the localnet with 'foc-localnet start'"); + info!("You can now start the devnet with 'foc-devnet start'"); Ok(()) } diff --git a/src/commands/init/path_setup.rs b/src/commands/init/path_setup.rs index 9817e4d..a802ec3 100644 --- a/src/commands/init/path_setup.rs +++ b/src/commands/init/path_setup.rs @@ -1,4 +1,4 @@ -//! PATH setup utilities for foc-localnet initialization. +//! PATH setup utilities for foc-devnet initialization. //! //! This module handles the setup of PATH environment variables in shell //! configuration files (.bashrc and .zshrc). @@ -9,18 +9,18 @@ use std::fs; use std::path::Path; use tracing::{info, warn}; -use crate::paths::foc_localnet_bin; +use crate::paths::foc_devnet_bin; /// Set up PATH variables in shell configuration files. /// -/// This function adds the foc-localnet bin directory to the PATH in both +/// This function adds the foc-devnet bin directory to the PATH in both /// .bashrc and .zshrc files if it's not already present. It checks if the /// path is already in the current environment before modifying files. /// /// # Returns /// Returns `Ok(())` if PATH setup is successful, or an error if file operations fail. pub fn setup_path_variables() -> Result<(), Box> { - let bin_path = foc_localnet_bin(); + let bin_path = foc_devnet_bin(); let bin_path_str = bin_path.to_string_lossy().to_string(); if is_path_in_env(&bin_path_str) { @@ -64,7 +64,7 @@ fn is_path_in_env(bin_path: &str) -> bool { /// Add the bin path to a shell configuration file if not already present. /// /// This function appends an export statement to the shell config file -/// to add the foc-localnet bin directory to PATH. It includes a marker +/// to add the foc-devnet bin directory to PATH. It includes a marker /// comment to prevent duplicate additions. /// /// # Arguments @@ -82,7 +82,7 @@ fn add_path_to_shell_config( } let mut content = fs::read_to_string(config_path)?; - let marker = "# foc-localnet PATH addition"; + let marker = "# foc-devnet PATH addition"; if content.contains(marker) { return Ok(()); diff --git a/src/commands/init/repositories.rs b/src/commands/init/repositories.rs index 999803a..be60d6e 100644 --- a/src/commands/init/repositories.rs +++ b/src/commands/init/repositories.rs @@ -1,4 +1,4 @@ -//! Code repository download utilities for foc-localnet initialization. +//! Code repository download utilities for foc-devnet initialization. //! //! This module handles the downloading and setup of Git repositories //! for Lotus and Curio components. @@ -9,9 +9,9 @@ use std::process::Command; use tracing::info; use crate::config::{Config, Location}; -use crate::paths::{foc_localnet_code, foc_localnet_config}; +use crate::paths::{foc_devnet_code, foc_devnet_config}; -/// Download code repositories for foc-localnet. +/// Download code repositories for foc-devnet. /// /// This function clones Git repositories for lotus and curio if their /// locations are Git-based. It reads the repository locations from the @@ -23,7 +23,7 @@ pub fn download_code_repositories() -> Result<(), Box> { info!("Downloading code repositories..."); // Load configuration - let config_path = foc_localnet_config(); + let config_path = foc_devnet_config(); let config_content = fs::read_to_string(&config_path) .map_err(|e| format!("Failed to read config file at {:?}: {}", config_path, e))?; let config: Config = toml::from_str(&config_content) @@ -69,7 +69,7 @@ fn download_repository(name: &str, location: &Location) -> Result<(), Box, branch: Option<&str>, ) -> Result<(), Box> { - let repo_dir = foc_localnet_code().join(name); + let repo_dir = foc_devnet_code().join(name); if repo_dir.exists() { info!( diff --git a/src/commands/requirements/mod.rs b/src/commands/requirements/mod.rs index a7954eb..323928b 100644 --- a/src/commands/requirements/mod.rs +++ b/src/commands/requirements/mod.rs @@ -1,6 +1,6 @@ //! Requirements checker command. //! -//! This module checks if all system requirements are met to run the foc-localnet system. +//! This module checks if all system requirements are met to run the foc-devnet system. use std::process::{Command, Stdio}; use tracing::{error, info, warn}; @@ -80,7 +80,7 @@ fn check_docker_requirement(setup: bool) -> Result<(), Box Result<(), Box> { if setup_docker::linux::is_ubuntu_or_debian()? { setup_docker::linux::install_docker_ubuntu()?; } else { - eprintln!( - "{}", - "❌ Automatic Docker installation is only supported on Ubuntu/Debian Linux." - ); + eprintln!("❌ Automatic Docker installation is only supported on Ubuntu/Debian Linux."); return Err("Unsupported Linux distribution".into()); } } else if cfg!(target_os = "macos") { // On macOS, Docker installation is handled by Homebrew - eprintln!("{}", "❌ Please install Docker Desktop manually on macOS."); - eprintln!( - "{}", - "Download from: https://www.docker.com/products/docker-desktop" - ); + eprintln!("❌ Please install Docker Desktop manually on macOS."); + eprintln!("Download from: https://www.docker.com/products/docker-desktop"); return Err("Manual Docker installation required on macOS".into()); } else { - eprintln!( - "{}", - "❌ Automatic Docker installation is not supported on this platform." - ); + eprintln!("❌ Automatic Docker installation is not supported on this platform."); return Err("Unsupported platform".into()); } diff --git a/src/commands/start/curio/constants.rs b/src/commands/start/curio/constants.rs index daffffa..e270b6f 100644 --- a/src/commands/start/curio/constants.rs +++ b/src/commands/start/curio/constants.rs @@ -13,7 +13,7 @@ pub const CURIO_LAYERS: &str = "seal,post,pdp-only,gui"; /// PDP layer configuration template pub const PDP_LAYER_CONFIG_TEMPLATE: &str = r#"[HTTP] DelegateTLS = true -DomainName = "pdp-sp-{sp_index}.foc-localnet.internal" +DomainName = "pdp-sp-{sp_index}.foc-devnet.internal" Enable = true ListenAddress = "0.0.0.0:4702" diff --git a/src/commands/start/curio/daemon.rs b/src/commands/start/curio/daemon.rs index 1ed99f8..18ef72a 100644 --- a/src/commands/start/curio/daemon.rs +++ b/src/commands/start/curio/daemon.rs @@ -7,11 +7,12 @@ use super::db_setup::{build_db_env_vars, build_foc_contract_env_vars, build_lotu use super::CurioStep; use crate::commands::start::curio::constants::CURIO_LAYERS; use crate::docker::command_logger::run_and_log_command_strings; +use crate::docker::init::set_volume_ownership; use crate::docker::network::{lotus_network_name, pdp_miner_network_name}; use crate::docker::{container_exists, stop_and_remove_container}; use crate::paths::{ - foc_localnet_bin, foc_localnet_curio_sp_volume, foc_localnet_genesis_sectors_pdp_sp, - foc_localnet_proof_parameters, CONTAINER_FILECOIN_PROOF_PARAMS_PATH, + foc_devnet_bin, foc_devnet_curio_sp_volume, foc_devnet_genesis_sectors_pdp_sp, + foc_devnet_proof_parameters, CONTAINER_FILECOIN_PROOF_PARAMS_PATH, }; use std::error::Error; use std::fs; @@ -46,7 +47,7 @@ pub fn start_curio_daemon( create_curio_directories(context, sp_index)?; // Step 2: Create and start container - let docker_args = build_docker_run_args(context, sp_index, &container_name)?; + let docker_args = build_docker_create_args(context, sp_index, &container_name)?; start_curio_container(context, &container_name, docker_args)?; Ok(()) @@ -55,7 +56,7 @@ pub fn start_curio_daemon( /// Create necessary directories for Curio fn create_curio_directories(context: &SetupContext, sp_index: usize) -> Result<(), Box> { let run_id = context.run_id(); - let curio_sp_dir = foc_localnet_curio_sp_volume(run_id, sp_index); + let curio_sp_dir = foc_devnet_curio_sp_volume(run_id, sp_index); let dirs = vec![ curio_sp_dir.join(".curio"), @@ -63,14 +64,21 @@ fn create_curio_directories(context: &SetupContext, sp_index: usize) -> Result<( curio_sp_dir.join("long-term-storage"), ]; - for dir in dirs { - fs::create_dir_all(&dir)?; + for dir in &dirs { + fs::create_dir_all(dir)?; } + set_volume_ownership(&curio_sp_dir)?; + Ok(()) } /// Create and start Curio container +/// +/// Uses docker create + network connect + start pattern so that: +/// 1. Container is created but not started +/// 2. Networks are connected while container is stopped +/// 3. Container is started with Curio as PID 1 (logs work properly) fn start_curio_container( context: &SetupContext, container_name: &str, @@ -78,16 +86,16 @@ fn start_curio_container( ) -> Result<(), Box> { info!("Creating container {}...", container_name); - // Add image and command - run curio directly as the main process - docker_args.push("foc-curio".to_string()); + // Add image and command - Curio as main process + docker_args.push(crate::constants::CURIO_DOCKER_IMAGE.to_string()); docker_args.push("/usr/local/bin/lotus-bins/curio".to_string()); docker_args.push("run".to_string()); docker_args.push("--nosync".to_string()); docker_args.push("--layers".to_string()); docker_args.push(CURIO_LAYERS.to_string()); - // Execute docker run - let key = format!("curio_daemon_start_{}", container_name); + // Execute docker create (not run) + let key = format!("curio_daemon_create_{}", container_name); let output = run_and_log_command_strings("docker", &docker_args, context, &key)?; if !output.status.success() { @@ -98,7 +106,7 @@ fn start_curio_container( .into()); } - // Connect to filecoin network + // Connect to filecoin network before starting let lotus_network = lotus_network_name(context.run_id()); let network_args = vec![ "network".to_string(), @@ -109,26 +117,38 @@ fn start_curio_container( let key = format!("curio_network_connect_{}", container_name); let _ = run_and_log_command_strings("docker", &network_args, context, &key); // Ignore errors if already connected - info!("Container created"); + // Start the container + let start_args = vec!["start".to_string(), container_name.to_string()]; + let key = format!("curio_daemon_start_{}", container_name); + let output = run_and_log_command_strings("docker", &start_args, context, &key)?; + + if !output.status.success() { + return Err(format!( + "Failed to start curio container: {}", + String::from_utf8_lossy(&output.stderr) + ) + .into()); + } + + info!("Container created and started"); Ok(()) } -/// Build docker run arguments for Curio -fn build_docker_run_args( +/// Build docker create arguments for Curio +fn build_docker_create_args( context: &SetupContext, sp_index: usize, container_name: &str, ) -> Result, Box> { let run_id = context.run_id(); - let curio_sp_dir = foc_localnet_curio_sp_volume(run_id, sp_index); - let bin_dir = foc_localnet_bin(); - let proof_params_dir = foc_localnet_proof_parameters(); - let genesis_sectors_dir = foc_localnet_genesis_sectors_pdp_sp(run_id, sp_index); + let curio_sp_dir = foc_devnet_curio_sp_volume(run_id, sp_index); + let bin_dir = foc_devnet_bin(); + let proof_params_dir = foc_devnet_proof_parameters(); + let genesis_sectors_dir = foc_devnet_genesis_sectors_pdp_sp(run_id, sp_index); let mut docker_args = vec![ - "run".to_string(), - "-d".to_string(), + "create".to_string(), "--name".to_string(), container_name.to_string(), "--network".to_string(), diff --git a/src/commands/start/curio/db_setup.rs b/src/commands/start/curio/db_setup.rs index bec588c..ea6014b 100644 --- a/src/commands/start/curio/db_setup.rs +++ b/src/commands/start/curio/db_setup.rs @@ -13,8 +13,8 @@ use crate::docker::command_logger::run_and_log_command; use crate::docker::containers::lotus_container_name; use crate::docker::core::docker_command; use crate::docker::network::{lotus_network_name, pdp_miner_network_name}; -use crate::paths::foc_localnet_bin; -use crate::paths::foc_localnet_docker_volumes; +use crate::paths::foc_devnet_bin; +use crate::paths::foc_devnet_docker_volumes; use std::error::Error; use std::thread; use std::time::Duration; @@ -33,28 +33,40 @@ pub fn build_foc_contract_env_vars(context: &SetupContext) -> Result // Get standard contracts if let Some(usdfc) = addresses.contracts.get("usdfc") { - env_vars.push(format!("FOC_CONTRACT_USDFC={}", usdfc)); + env_vars.push(format!("CURIO_DEVNET_USDFC_ADDRESS={}", usdfc)); } // Get FOC service contracts + if let Some(payment) = addresses.foc_contracts.get("payment_contract") { + env_vars.push(format!("CURIO_DEVNET_PAYMENTS_ADDRESS={}", payment)); + } + if let Some(multicall) = addresses.foc_contracts.get("multicall_address") { + env_vars.push(format!("CURIO_DEVNET_MULTICALL_ADDRESS={}", multicall)); + } + if let Some(pdp) = addresses.foc_contracts.get("p_d_p_verifier_proxy") { + env_vars.push(format!("CURIO_DEVNET_PDP_VERIFIER_ADDRESS={}", pdp)); + } if let Some(fwss) = addresses .foc_contracts .get("filecoin_warm_storage_service_proxy") { - env_vars.push(format!("FOC_CONTRACT_FWSS={}", fwss)); - } - if let Some(multicall) = addresses.contracts.get("multicall") { - env_vars.push(format!("FOC_CONTRACT_MULTICALL={}", multicall)); + env_vars.push(format!("CURIO_DEVNET_FWSS_ADDRESS={}", fwss)); } - if let Some(pay) = addresses.foc_contracts.get("filecoin_pay_v1_contract") { - env_vars.push(format!("FOC_CONTRACT_PAY={}", pay)); - } - if let Some(pdp) = addresses.foc_contracts.get("p_d_p_verifier_proxy") { - env_vars.push(format!("FOC_PDP_VERIFIER_PROXY={}", pdp)); + if let Some(sp_registry) = addresses + .foc_contracts + .get("service_provider_registry_proxy") + { + env_vars.push(format!( + "CURIO_DEVNET_SERVICE_REGISTRY_ADDRESS={}", + sp_registry + )); } - // SIMPLE contract is always zero address - env_vars.push("FOC_CONTRACT_SIMPLE=0x0000000000000000000000000000000000000000".to_string()); + // Simple record keeper is always zero address + env_vars.push( + "CURIO_DEVNET_RECORD_KEEPER_SIMPLE_ADDRESS=0x0000000000000000000000000000000000000000" + .to_string(), + ); Ok(env_vars) } @@ -142,11 +154,11 @@ fn create_base_cluster( let lotus_network = lotus_network_name(run_id); // Get binary directory for volume mount - let bin_dir = foc_localnet_bin(); + let bin_dir = foc_devnet_bin(); let bin_mount = format!("{}:/usr/local/bin/lotus-bins", bin_dir.display()); // Get lotus-data directory for volume mount (needed for token and LOTUS_PATH) - let lotus_data_dir = foc_localnet_docker_volumes().join("lotus-data"); + let lotus_data_dir = foc_devnet_docker_volumes().join("lotus-data"); let lotus_data_mount = format!("{}:/lotus-data", lotus_data_dir.display()); // Create a unique container name for this operation @@ -191,7 +203,12 @@ fn create_base_cluster( "sleep 3 && /usr/local/bin/lotus-bins/curio config new-cluster {}", miner_id ); - docker_args.extend_from_slice(&["foc-curio", "/bin/bash", "-c", &bash_cmd]); + docker_args.extend_from_slice(&[ + crate::constants::CURIO_DOCKER_IMAGE, + "/bin/bash", + "-c", + &bash_cmd, + ]); let docker_args_str: Vec<&str> = docker_args.iter().map(|s| s.as_ref()).collect(); let key = format!("curio_new_cluster_sp_{}", sp_index); @@ -244,11 +261,11 @@ fn create_pdp_layer(context: &SetupContext, sp_index: usize) -> Result<(), Box Result<(), Box = docker_args.iter().map(|s| s.as_ref()).collect(); let key = format!("curio_pdp_layer_config_sp_{}", sp_index); diff --git a/src/commands/start/curio/pdp.rs b/src/commands/start/curio/pdp.rs index 4bdfb8b..a39f08c 100644 --- a/src/commands/start/curio/pdp.rs +++ b/src/commands/start/curio/pdp.rs @@ -48,7 +48,7 @@ pub fn import_pdp_key(context: &SetupContext, sp_index: usize) -> Result<(), Box /// Get PDP SP credentials from addresses.json fn get_pdp_sp_credentials(pdp_sp_name: &str) -> Result<(String, String), Box> { - let state_file = crate::paths::foc_localnet_keys().join("addresses.json"); + let state_file = crate::paths::foc_devnet_keys().join("addresses.json"); if !state_file.exists() { return Err(format!("State addresses file not found: {}", state_file.display()).into()); } diff --git a/src/commands/start/curio/pre_execute.rs b/src/commands/start/curio/pre_execute.rs index 15fba21..65d4bdc 100644 --- a/src/commands/start/curio/pre_execute.rs +++ b/src/commands/start/curio/pre_execute.rs @@ -4,12 +4,9 @@ //! attempting to start Curio. use super::super::step::SetupContext; -use crate::docker::command_logger::run_and_log_command; use crate::docker::containers::lotus_container_name; use crate::docker::{container_exists, container_is_running}; use std::error::Error; -use std::thread; -use std::time::Duration; use tracing::info; /// Verify prerequisites for Curio setup. @@ -17,7 +14,6 @@ use tracing::info; /// Checks: /// 1. Lotus container is running /// 2. Lotus-Miner container is running -/// 3. Chain is progressing (blocks are being generated) pub fn verify_prerequisites(context: &SetupContext, sp_count: usize) -> Result<(), Box> { info!("Verifying Lotus is running and producing blocks..."); @@ -41,9 +37,6 @@ pub fn verify_prerequisites(context: &SetupContext, sp_count: usize) -> Result<( .into()); } - // Verify chain is progressing - verify_chain_progressing(context, &lotus_container)?; - info!( "Allocating and verifying ports for {} Curio instance(s)...", sp_count @@ -92,68 +85,3 @@ pub fn verify_prerequisites(context: &SetupContext, sp_count: usize) -> Result<( Ok(()) } - -/// Verify that the Filecoin chain is progressing (blocks are being generated). -fn verify_chain_progressing( - context: &SetupContext, - lotus_container: &str, -) -> Result<(), Box> { - info!("Checking chain is progressing..."); - - // Get initial block height - let height1 = get_chain_head_height(context, lotus_container)?; - - // Wait 6 seconds (should be enough for at least 1 block with 4s block time) - info!("Waiting 6 seconds to verify block production..."); - thread::sleep(Duration::from_secs(6)); - - // Get new block height - let height2 = get_chain_head_height(context, lotus_container)?; - - if height2 <= height1 { - return Err(format!( - "Chain is not progressing. Initial height: {}, Current height: {}. \ - Ensure Lotus-Miner is running and producing blocks.", - height1, height2 - ) - .into()); - } - - info!("Chain is progressing (height {} → {})", height1, height2); - - Ok(()) -} - -/// Get the current chain head height from Lotus. -fn get_chain_head_height( - context: &SetupContext, - lotus_container: &str, -) -> Result> { - let key = format!("curio_pre_check_chain_height_{}", lotus_container); - let output = run_and_log_command( - "docker", - &[ - "exec", - lotus_container, - "/usr/local/bin/lotus-bins/lotus", - "chain", - "head", - "--height", - ], - context, - &key, - )?; - - if !output.status.success() { - return Err(format!( - "Failed to get chain head height: {}", - String::from_utf8_lossy(&output.stderr) - ) - .into()); - } - - let height_str = String::from_utf8_lossy(&output.stdout); - let height = height_str.trim().parse::()?; - - Ok(height) -} diff --git a/src/commands/start/curio/storage.rs b/src/commands/start/curio/storage.rs index 0560e26..c00445c 100644 --- a/src/commands/start/curio/storage.rs +++ b/src/commands/start/curio/storage.rs @@ -12,6 +12,39 @@ use std::thread; use std::time::Duration; use tracing::info; +/// Wait for Curio RPC to be ready using the built-in wait-api command. +fn wait_for_curio_rpc(context: &SetupContext, container_name: &str) -> Result<(), Box> { + info!("Waiting for Curio RPC to be ready..."); + + let machine_addr = format!("{}:12300", container_name); + let key = format!("curio_wait_api_{}", container_name); + let output = run_and_log_command( + "docker", + &[ + "exec", + container_name, + "/usr/local/bin/lotus-bins/curio", + "cli", + "--machine", + &machine_addr, + "wait-api", + ], + context, + &key, + )?; + + if !output.status.success() { + return Err(format!( + "Curio RPC failed to become ready: {}", + String::from_utf8_lossy(&output.stderr) + ) + .into()); + } + + info!("Curio RPC is ready"); + Ok(()) +} + /// Attach storage locations for a specific PDP SP. /// /// Attaches: @@ -26,6 +59,9 @@ pub fn attach_storage_locations( let run_id = context.run_id(); let container_name = format!("foc-{}-curio-{}", run_id, sp_index); + // Wait for RPC to be ready before attaching storage + wait_for_curio_rpc(context, &container_name)?; + // Attach fast storage attach_fast_storage(context, &container_name)?; diff --git a/src/commands/start/curio/verification.rs b/src/commands/start/curio/verification.rs index fad02f8..9bce631 100644 --- a/src/commands/start/curio/verification.rs +++ b/src/commands/start/curio/verification.rs @@ -7,10 +7,11 @@ use super::super::step::SetupContext; use super::constants::TEST_FILE_SIZE_BYTES; +use crate::paths::foc_devnet_bin; use rand::Rng; use std::error::Error; use std::fs; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::process::Command; use std::thread::sleep; use std::time::Duration; @@ -62,7 +63,7 @@ fn verify_pdp_ping(context: &SetupContext, sp_index: usize) -> Result<(), Box Result<(), Box> { - info!("Testing upload/download functionality..."); + info!("Testing upload/download functionality via pdptool..."); // Create temporary directory for test files let temp_dir = TempDir::new()?; @@ -102,7 +103,7 @@ fn create_random_test_file(temp_dir: &TempDir) -> Result /// Upload test file using pdptool. fn upload_test_file( context: &SetupContext, - file_path: &PathBuf, + file_path: &Path, sp_index: usize, ) -> Result> { // Get dynamically allocated PDP port from context @@ -113,7 +114,10 @@ fn upload_test_file( let service_url = format!("http://localhost:{}", port); - // Run pdptool upload twice due to tool quirk + let file_path_str = file_path + .to_str() + .ok_or("Invalid file path: contains non-UTF8 characters")?; + let args = [ "upload-piece", "--service-url", @@ -122,11 +126,13 @@ fn upload_test_file( "public", "--hash-type", "commp", - file_path.to_str().unwrap(), + file_path_str, "--verbose", ]; - let output = Command::new("pdptool").args(args).output()?; + let output = Command::new(foc_devnet_bin().join("pdptool")) + .args(args) + .output()?; if !output.status.success() { return Err(format!( @@ -182,7 +188,7 @@ fn download_piece( let download_url = format!("http://localhost:{}/piece/{}", port, piece_cid); // Retry download a few times in case piece isn't immediately available - for attempt in 1..=5 { + for attempt in 1..=15 { let response = reqwest::blocking::get(&download_url)?; if response.status().is_success() { @@ -190,13 +196,13 @@ fn download_piece( return Ok(data); } - if attempt < 5 { + if attempt < 15 { info!( "Download attempt {} failed with status: {}, retrying...", attempt, response.status() ); - sleep(Duration::from_secs(2)); + sleep(Duration::from_secs(4)); } } diff --git a/src/commands/start/eth_acc_funding/eth_acc_funding_step.rs b/src/commands/start/eth_acc_funding/eth_acc_funding_step.rs index 58095e5..844fe27 100644 --- a/src/commands/start/eth_acc_funding/eth_acc_funding_step.rs +++ b/src/commands/start/eth_acc_funding/eth_acc_funding_step.rs @@ -10,6 +10,7 @@ use crate::commands::start::eth_acc_funding::constants::FEVM_ACCOUNTS_PREFUNDED; use crate::commands::start::step::{SetupContext, Step}; use crate::docker::command_logger::log_command; use crate::docker::containers::lotus_container_name; +use crate::utils::retry::{retry_with_fixed_delay, DEFAULT_MAX_RETRIES, DEFAULT_RETRY_DELAY_SECS}; use std::error::Error; use std::fs; use std::path::PathBuf; @@ -68,7 +69,7 @@ impl ETHAccFundingStep { context: &SetupContext, ) -> Result> { let run_id = context.run_id(); - let keys_dir = crate::paths::foc_localnet_lotus_keys(run_id); + let keys_dir = crate::paths::foc_devnet_lotus_keys(run_id); let faucet_key_dir = keys_dir.join(GLOBAL_FIL_FAUCET_KEY); let keyinfo_files: Vec<_> = fs::read_dir(&faucet_key_dir)? .filter_map(|e| e.ok()) @@ -285,93 +286,93 @@ impl ETHAccFundingStep { let account_name_clone = account_name.clone(); let handle = thread::spawn(move || { - // Log the command - let key = format!( - "eth_acc_verify_balance_{}_{}", - account_name_clone, container - ); - log_command( - "docker", - &[ - "exec", - &container, - "/usr/local/bin/lotus-bins/lotus", - "wallet", - "balance", - &address, - ], - &context_clone, - &key, - ); + // Retry balance verification with fixed delay + let verify_result = + retry_with_fixed_delay( + || { + // Log the command + let key = format!( + "eth_acc_verify_balance_{}_{}", + account_name_clone, container + ); + log_command( + "docker", + &[ + "exec", + &container, + "/usr/local/bin/lotus-bins/lotus", + "wallet", + "balance", + &address, + ], + &context_clone, + &key, + ); - let output = Command::new("docker") - .args([ - "exec", - &container, - "/usr/local/bin/lotus-bins/lotus", - "wallet", - "balance", - &address, - ]) - .output(); + let output = Command::new("docker") + .args([ + "exec", + &container, + "/usr/local/bin/lotus-bins/lotus", + "wallet", + "balance", + &address, + ]) + .output() + .map_err(|e| -> Box { + format!("Failed to execute balance check: {}", e).into() + })?; + + if !output.status.success() { + return Err(format!( + "Failed to check balance: {}", + String::from_utf8_lossy(&output.stderr) + ) + .into()); + } - match output { - Ok(out) if out.status.success() => { - let balance_str = String::from_utf8_lossy(&out.stdout); - let balance_str = balance_str.trim(); - - // Parse balance (format: "XXX FIL") - if let Some(balance_fil) = balance_str.strip_suffix(" FIL") { - match balance_fil.trim().parse::() { - Ok(balance) => { - let expected = expected_amount as f64; - if balance >= expected { - info!( - "{}: {} FIL (expected: {} FIL)", - account_name, balance, expected - ); - } else { - let error_msg = format!( - "{}: Insufficient balance. Expected at least {} FIL, got {} FIL", - account_name, expected, balance - ); - tracing::error!(" {}", error_msg); - errors_clone.lock().unwrap().push(error_msg); - } - } - Err(e) => { - let error_msg = format!( - "{}: Failed to parse balance '{}': {}", - account_name, balance_fil, e - ); - tracing::error!(" {}", error_msg); - errors_clone.lock().unwrap().push(error_msg); - } + let balance_str = String::from_utf8_lossy(&output.stdout); + let balance_str = balance_str.trim(); + + // Parse balance (format: "XXX FIL") + let balance_fil = balance_str.strip_suffix(" FIL").ok_or_else( + || -> Box { + format!("Unexpected balance format: {}", balance_str).into() + }, + )?; + + let balance = balance_fil.trim().parse::().map_err( + |e| -> Box { + format!("Failed to parse balance '{}': {}", balance_fil, e) + .into() + }, + )?; + + let expected = expected_amount as f64; + if balance >= expected { + info!( + "{}: {} FIL (expected: {} FIL)", + account_name, balance, expected + ); + Ok(()) + } else { + Err(format!( + "Insufficient balance. Expected at least {} FIL, got {} FIL", + expected, balance + ) + .into()) } - } else { - let error_msg = format!( - "{}: Unexpected balance format: {}", - account_name, balance_str - ); - tracing::error!(" {}", error_msg); - errors_clone.lock().unwrap().push(error_msg); - } - } - Ok(out) => { - let error_msg = format!( - "{}: Failed to check balance: {}", - account_name, - String::from_utf8_lossy(&out.stderr) - ); - tracing::error!(" {}", error_msg); - errors_clone.lock().unwrap().push(error_msg); - } - Err(e) => { - let error_msg = - format!("{}: Failed to execute balance check: {}", account_name, e); - tracing::error!(" {}", error_msg); - errors_clone.lock().unwrap().push(error_msg); - } + }, + DEFAULT_MAX_RETRIES, + DEFAULT_RETRY_DELAY_SECS, + &format!("Balance verification for {}", account_name), + ); + + // Handle retry result + if let Err(e) = verify_result { + let error_msg = format!("{}: {}", account_name, e); + tracing::error!(" {}", error_msg); + errors_clone.lock().unwrap().push(error_msg); } }); diff --git a/src/commands/start/eth_acc_funding/key_operations.rs b/src/commands/start/eth_acc_funding/key_operations.rs index 4977e7f..65f607a 100644 --- a/src/commands/start/eth_acc_funding/key_operations.rs +++ b/src/commands/start/eth_acc_funding/key_operations.rs @@ -11,7 +11,7 @@ use tracing::info; use crate::commands::start::step::SetupContext; use crate::docker::command_logger::run_and_log_command; use crate::docker::containers::lotus_container_name; -use crate::paths::foc_localnet_lotus_keys; +use crate::paths::foc_devnet_lotus_keys; /// Import the GLOBAL_FIL_FAUCET key into Lotus wallet pub fn import_faucet_key( @@ -37,7 +37,7 @@ pub fn import_faucet_key( .map_err(|e| format!("Failed to write hex key file: {}", e))?; // Get the container path for the temp file - let keys_dir = foc_localnet_lotus_keys(run_id); + let keys_dir = foc_devnet_lotus_keys(run_id); let relative_path = temp_key_file .strip_prefix(&keys_dir) .map_err(|_| "Failed to get relative path for hex key file")?; diff --git a/src/commands/start/eth_acc_funding/lotus_checks.rs b/src/commands/start/eth_acc_funding/lotus_checks.rs index d41fb29..e44027b 100644 --- a/src/commands/start/eth_acc_funding/lotus_checks.rs +++ b/src/commands/start/eth_acc_funding/lotus_checks.rs @@ -9,7 +9,7 @@ use crate::commands::start::eth_acc_funding::constants::GLOBAL_FIL_FAUCET_KEY; use crate::commands::start::step::SetupContext; use crate::docker::command_logger::run_and_log_command; use crate::docker::containers::lotus_container_name; -use crate::paths::foc_localnet_lotus_keys; +use crate::paths::foc_devnet_lotus_keys; /// Check if Lotus is running and accessible pub fn check_lotus_running(context: &SetupContext) -> Result<(), Box> { @@ -45,7 +45,7 @@ pub fn check_lotus_running(context: &SetupContext) -> Result<(), Box> /// Get the global faucet address from the prefunded key pub fn get_global_faucet_address(run_id: &str) -> Result> { - let keys_dir = foc_localnet_lotus_keys(run_id); + let keys_dir = foc_devnet_lotus_keys(run_id); let faucet_key_dir = keys_dir.join(GLOBAL_FIL_FAUCET_KEY); if !faucet_key_dir.exists() { diff --git a/src/commands/start/foc_deploy/deployment.rs b/src/commands/start/foc_deploy/deployment.rs index f2af7ed..edd3f85 100644 --- a/src/commands/start/foc_deploy/deployment.rs +++ b/src/commands/start/foc_deploy/deployment.rs @@ -43,6 +43,9 @@ pub fn perform_deployment( ) -> Result<(), Box> { info!("Deploying FOC service contracts..."); + // Clear cached devnet addresses so contracts are actually deployed + super::helpers::clear_cached_devnet_deployments()?; + // Get required addresses from context let (foc_deployer, foc_deployer_eth, mock_usdfc_address, _global_faucet) = super::helpers::check_required_addresses(context)?; diff --git a/src/commands/start/foc_deploy/foc_deploy_step.rs b/src/commands/start/foc_deploy/foc_deploy_step.rs index 1fc7050..4e01f61 100644 --- a/src/commands/start/foc_deploy/foc_deploy_step.rs +++ b/src/commands/start/foc_deploy/foc_deploy_step.rs @@ -49,7 +49,7 @@ impl Step for FOCDeployStep { if !services_repo.exists() { return Err(format!( "filecoin-services repository not found at {}. \ - Please run 'foc-localnet init' to clone the repository.", + Please run 'foc-devnet init' to clone the repository.", services_repo.display() ) .into()); diff --git a/src/commands/start/foc_deploy/helpers.rs b/src/commands/start/foc_deploy/helpers.rs index 95ecdbd..2c48ab9 100644 --- a/src/commands/start/foc_deploy/helpers.rs +++ b/src/commands/start/foc_deploy/helpers.rs @@ -4,12 +4,14 @@ //! including repository path resolution and deployment checks. use crate::config::{Config, Location}; +use crate::constants::LOCAL_NETWORK_CHAIN_ID; use crate::docker::containers::lotus_container_name; use crate::docker::core::container_is_running; -use crate::paths::{foc_localnet_config, foc_localnet_filecoin_services_repo}; +use crate::paths::{foc_devnet_config, foc_devnet_filecoin_services_repo}; use std::error::Error; use std::fs; use std::path::PathBuf; +use tracing::info; /// Get the filecoin-services repository path based on configuration /// @@ -17,7 +19,7 @@ use std::path::PathBuf; /// The path to the filecoin-services repository pub fn get_filecoin_services_repo_path() -> Result> { // Load configuration - let config_path = foc_localnet_config(); + let config_path = foc_devnet_config(); let config_content = fs::read_to_string(&config_path) .map_err(|e| format!("Failed to read config file at {:?}: {}", config_path, e))?; let config: Config = toml::from_str(&config_content) @@ -30,8 +32,8 @@ pub fn get_filecoin_services_repo_path() -> Result> { PathBuf::from(dir) } _ => { - // For Git-based locations, use the foc-localnet directory - foc_localnet_filecoin_services_repo() + // For Git-based locations, use the foc-devnet directory + foc_devnet_filecoin_services_repo() } }; @@ -93,3 +95,39 @@ pub fn check_required_addresses( global_faucet.clone(), )) } + +/// Clear cached devnet deployment addresses from deployments.json +/// +/// The filecoin-services deployment script reads deployments.json and skips +/// deployment if addresses already exist for the chain ID. This causes issues +/// when starting a fresh devnet because the cached addresses point to contracts +/// that don't exist on the new chain. This function removes the devnet entry +/// so that contracts are actually deployed. +pub fn clear_cached_devnet_deployments() -> Result<(), Box> { + let services_repo = get_filecoin_services_repo_path()?; + let deployments_file = services_repo.join("service_contracts/deployments.json"); + + if !deployments_file.exists() { + info!("No deployments.json found, skipping cache clear"); + return Ok(()); + } + + let content = fs::read_to_string(&deployments_file)?; + let mut deployments: serde_json::Value = serde_json::from_str(&content)?; + + let chain_id_str = LOCAL_NETWORK_CHAIN_ID.to_string(); + if let Some(obj) = deployments.as_object_mut() { + if obj.remove(&chain_id_str).is_some() { + info!( + "Cleared cached devnet (chain {}) addresses from deployments.json", + chain_id_str + ); + let updated = serde_json::to_string_pretty(&deployments)?; + fs::write(&deployments_file, updated)?; + } else { + info!("No cached devnet addresses found in deployments.json"); + } + } + + Ok(()) +} diff --git a/src/commands/start/foc_deployer/mod.rs b/src/commands/start/foc_deployer/mod.rs index 7028b60..1df6ac3 100644 --- a/src/commands/start/foc_deployer/mod.rs +++ b/src/commands/start/foc_deployer/mod.rs @@ -7,7 +7,7 @@ use super::foc_metadata::FOCMetadata; use crate::constants::*; use crate::docker::core::docker_command; -use crate::paths::{foc_localnet_bin, foc_localnet_docker_volumes_cache}; +use crate::paths::{foc_devnet_bin, foc_devnet_docker_volumes_cache}; use std::error::Error; use tracing::{info, warn}; @@ -52,6 +52,9 @@ pub fn deploy_foc_contracts( ) -> Result> { info!("Running deploy-all-warm-storage.sh..."); + // Log the RPC URL for debugging + info!("Lotus RPC URL: {}", lotus_rpc_url); + // Resolve symlinks to get the real path for Docker mounting let services_repo = services_repo_path .canonicalize() @@ -65,8 +68,9 @@ pub fn deploy_foc_contracts( return Err(format!("Deployment script not found at {}", deploy_script.display()).into()); } - let bin_dir = foc_localnet_bin(); - let builder_volumes_dir = foc_localnet_docker_volumes_cache().join("foc-builder"); + let bin_dir = foc_devnet_bin(); + let builder_volumes_dir = + foc_devnet_docker_volumes_cache().join(crate::constants::BUILDER_CONTAINER); // Get the private key from lotus for the deployer address let private_key = get_private_key(foc_deployer, lotus_container)?; @@ -75,7 +79,7 @@ pub fn deploy_foc_contracts( let env_vars = [ ("ETH_RPC_URL", lotus_rpc_url.to_string()), ("USDFC_TOKEN_ADDRESS", mock_usdfc_address.to_string()), - ("SERVICE_NAME", "FOC LocalNet Warm Storage".to_string()), + ("SERVICE_NAME", "FOC DevNet Warm Storage".to_string()), ( "SERVICE_DESCRIPTION", "Warm storage service for FOC local development network".to_string(), @@ -93,9 +97,11 @@ pub fn deploy_foc_contracts( ]; // Run the deployment script - // First, create a keystore from the private key with empty password + // Import wallet into keystore first (required by deploy-all-warm-storage.sh) + // The script uses forge create --password which requires a keystore file let deploy_cmd = format!( r#"set -e +mkdir -p /home/foc-user/.foundry/keystores cast wallet import foc-deployer --private-key {} --unsafe-password '' cd /service_contracts bash /service_contracts/tools/deploy-all-warm-storage.sh 2>&1 | tee /tmp/foc-deploy.log"#, @@ -104,13 +110,14 @@ bash /service_contracts/tools/deploy-all-warm-storage.sh 2>&1 | tee /tmp/foc-dep info!("This may take several minutes..."); + let container_name = format!("foc-{}-foc-deploy", run_id); + let mut docker_args = vec![ "run".to_string(), - "--rm".to_string(), "-u".to_string(), "foc-user".to_string(), "--name".to_string(), - format!("foc-{}-foc-deploy", run_id), + container_name.clone(), "--network".to_string(), "host".to_string(), ]; @@ -133,22 +140,75 @@ bash /service_contracts/tools/deploy-all-warm-storage.sh 2>&1 | tee /tmp/foc-dep docker_args.push(format!("{}:/service_contracts", contracts_dir.display())); // Add image and command - docker_args.push(BUILDER_CONTAINER.to_string()); + docker_args.push(BUILDER_DOCKER_IMAGE.to_string()); docker_args.push("/bin/bash".to_string()); docker_args.push("-c".to_string()); docker_args.push(deploy_cmd); let args_ref: Vec<&str> = docker_args.iter().map(|s| s.as_str()).collect(); + + info!("Executing deployment container: {}", container_name); + info!( + "Docker command: docker run [args with {} total args]", + args_ref.len() + ); + + // Log the command line length for debugging + let total_cmd_len: usize = args_ref.iter().map(|s| s.len()).sum(); + info!("Total command line length: {} bytes", total_cmd_len); + let output = docker_command(&args_ref)?; let output_str = String::from_utf8_lossy(&output.stdout); if !output.status.success() { - warn!("Deployment script failed"); + warn!( + "Deployment container failed with exit status: {:?}", + output.status.code() + ); + + // Print stderr let stderr_str = String::from_utf8_lossy(&output.stderr); - for line in stderr_str.lines() { - warn!("{}", line); + if !stderr_str.is_empty() { + warn!("=== STDERR ==="); + for line in stderr_str.lines() { + warn!("{}", line); + } + } + + // Print stdout as well + if !output_str.is_empty() { + warn!("=== STDOUT ==="); + for line in output_str.lines() { + warn!("{}", line); + } + } + + // Try to get container logs if the container still exists + if let Ok(logs) = crate::docker::core::get_container_logs(&container_name) { + if !logs.is_empty() { + warn!("=== CONTAINER LOGS ==="); + for line in logs.lines() { + warn!("{}", line); + } + } + } else { + info!( + "Container {} does not exist or logs not accessible", + container_name + ); } + + // Also try to inspect the container for more info + let inspect_output = crate::docker::core::docker_command(&["inspect", &container_name]); + if let Ok(output) = inspect_output { + let inspect_str = String::from_utf8_lossy(&output.stdout); + warn!("=== CONTAINER INSPECT ==="); + for line in inspect_str.lines() { + warn!("{}", line); + } + } + return Err("FOC contract deployment failed".into()); } @@ -164,7 +224,7 @@ pub fn parse_deployment_output(output_str: &str) -> Result Result Result<(), Box> { - let genesis_dir = foc_localnet_genesis(run_id); + let genesis_dir = foc_devnet_genesis(run_id); let genesis_file_path = genesis_dir.join(super::constants::GENESIS_FILE); // Check if genesis file already exists - if so, skip all construction diff --git a/src/commands/start/genesis/construction/accounts.rs b/src/commands/start/genesis/construction/accounts.rs index fe21ccb..a899a6b 100644 --- a/src/commands/start/genesis/construction/accounts.rs +++ b/src/commands/start/genesis/construction/accounts.rs @@ -5,7 +5,7 @@ use crate::commands::init::keys::load_keys; use crate::commands::start::genesis::constants; use crate::commands::start::genesis::keys::get_bls_addresses; -use crate::paths::foc_localnet_genesis; +use crate::paths::foc_devnet_genesis; use std::fs; use tracing::info; @@ -19,7 +19,7 @@ pub const PREFUNDED_ACCOUNTS_INIT_FIL: u64 = 10_000_000; // 10 million FIL pub fn add_global_fil_faucet_account(run_id: &str) -> Result<(), Box> { info!("💰 Adding pre-funded accounts to genesis..."); - let genesis_dir = foc_localnet_genesis(run_id); + let genesis_dir = foc_devnet_genesis(run_id); let genesis_file_path = genesis_dir.join(constants::GENESIS_FILE); // Get GLOBAL_FIL_FAUCET BLS address @@ -61,7 +61,7 @@ pub fn add_global_fil_faucet_account(run_id: &str) -> Result<(), Box Result<(), Box> { info!("💰 Adding FOC accounts to genesis..."); - let genesis_dir = foc_localnet_genesis(run_id); + let genesis_dir = foc_devnet_genesis(run_id); let genesis_file_path = genesis_dir.join(constants::GENESIS_FILE); // Load keys diff --git a/src/commands/start/genesis/construction/creation.rs b/src/commands/start/genesis/construction/creation.rs index f18603e..b7aacc0 100644 --- a/src/commands/start/genesis/construction/creation.rs +++ b/src/commands/start/genesis/construction/creation.rs @@ -3,7 +3,7 @@ //! This module handles creating the initial genesis file using lotus-seed. use crate::commands::start::genesis::constants; -use crate::paths::{foc_localnet_bin, foc_localnet_docker_volumes_cache, foc_localnet_genesis}; +use crate::paths::{foc_devnet_bin, foc_devnet_docker_volumes_cache, foc_devnet_genesis}; use std::fs; use std::process::Command; use tracing::info; @@ -16,7 +16,7 @@ use tracing::info; /// Note: This function assumes the genesis file does not already exist. /// The caller should check for existence first. pub fn create_genesis_file(run_id: &str) -> Result<(), Box> { - let genesis_dir = foc_localnet_genesis(run_id); + let genesis_dir = foc_devnet_genesis(run_id); info!("📜 Creating genesis file..."); @@ -30,13 +30,13 @@ pub fn create_genesis_file(run_id: &str) -> Result<(), Box Result<(), Box = vec![( constants::LOTUS_MINER_ID.to_string(), - foc_localnet_genesis_sectors_lotus_miner(run_id), + foc_devnet_genesis_sectors_lotus_miner(run_id), )]; // Add PDP SP miners for i in 1..=active_pdp_sp_count { let miner_id = format!("t0{}", constants::PDP_SP_MINER_ID_START + (i as u32) - 1); - let miner_dir = foc_localnet_genesis_sectors_pdp_sp(run_id, i); + let miner_dir = foc_devnet_genesis_sectors_pdp_sp(run_id, i); miner_configs.push((miner_id, miner_dir)); } @@ -56,7 +56,7 @@ pub fn add_miner_to_genesis( /// Add a single miner to the genesis file. fn add_single_miner_to_genesis( miner_id: &str, - miner_dir: &PathBuf, + miner_dir: &Path, run_id: &str, ) -> Result<(), Box> { info!("⛏ Adding miner {} to genesis...", miner_id,); @@ -73,14 +73,14 @@ fn add_single_miner_to_genesis( } // Run lotus-seed genesis add-miner in builder container - let genesis_dir = foc_localnet_genesis(run_id); - let bin_dir = foc_localnet_bin(); - let builder_volumes_dir = foc_localnet_docker_volumes_cache().join("foc-builder"); + let genesis_dir = foc_devnet_genesis(run_id); + let bin_dir = foc_devnet_bin(); + let builder_volumes_dir = + foc_devnet_docker_volumes_cache().join(crate::constants::BUILDER_CONTAINER); // Build docker args with network environment variables let mut docker_args = vec![ "run".to_string(), - "--rm".to_string(), "-u".to_string(), "foc-user".to_string(), "--name".to_string(), @@ -97,7 +97,7 @@ fn add_single_miner_to_genesis( format!("{}:/genesis", genesis_dir.display()), "-v".to_string(), format!("{}:/home/foc-user/.genesis-sectors", miner_dir.display()), - "foc-builder".to_string(), + crate::constants::BUILDER_DOCKER_IMAGE.to_string(), "/bin/bash".to_string(), "-c".to_string(), format!( diff --git a/src/commands/start/genesis/construction/signers.rs b/src/commands/start/genesis/construction/signers.rs index 5706e70..450c6aa 100644 --- a/src/commands/start/genesis/construction/signers.rs +++ b/src/commands/start/genesis/construction/signers.rs @@ -5,8 +5,7 @@ use crate::commands::start::genesis::constants; use crate::commands::start::genesis::keys::get_bls_addresses; use crate::paths::{ - foc_localnet_bin, foc_localnet_docker_volumes_cache, foc_localnet_genesis, - foc_localnet_lotus_keys, + foc_devnet_bin, foc_devnet_docker_volumes_cache, foc_devnet_genesis, foc_devnet_lotus_keys, }; use std::process::Command; use tracing::info; @@ -18,8 +17,8 @@ use tracing::info; pub fn add_signers_to_genesis(run_id: &str) -> Result<(), Box> { info!("🔑 Adding signers to genesis..."); - let genesis_dir = foc_localnet_genesis(run_id); - let keys_dir = foc_localnet_lotus_keys(run_id); + let genesis_dir = foc_devnet_genesis(run_id); + let keys_dir = foc_devnet_lotus_keys(run_id); // Get signer BLS addresses let addresses = get_bls_addresses( @@ -49,13 +48,13 @@ pub fn add_signers_to_genesis(run_id: &str) -> Result<(), Box Result<(), Box Result<(), Box> { use crate::commands::init::keys::load_keys; - let keys_dir = foc_localnet_lotus_keys(run_id); + let keys_dir = foc_devnet_lotus_keys(run_id); let all_keys = load_keys()?; // Filter BLS keys @@ -123,7 +123,7 @@ pub fn get_bls_addresses( count: usize, run_id: &str, ) -> Result, Box> { - let keys_dir = foc_localnet_lotus_keys(run_id); + let keys_dir = foc_devnet_lotus_keys(run_id); let mut keys_subdirs = Vec::with_capacity(count); let mut addresses = Vec::with_capacity(count); diff --git a/src/commands/start/genesis/mod.rs b/src/commands/start/genesis/mod.rs index 045d8b7..53c33da 100644 --- a/src/commands/start/genesis/mod.rs +++ b/src/commands/start/genesis/mod.rs @@ -1,12 +1,12 @@ -//! Genesis preparation module for foc-localnet. +//! Genesis preparation module for foc-devnet. //! -//! This module handles one-time setup tasks required before starting the localnet: +//! This module handles one-time setup tasks required before starting the devnet: //! - Downloading Filecoin proof parameters //! - Generating BLS keys for lotus //! - Pre-sealing sectors for genesis block //! -//! These operations are performed using the foc-builder container and their -//! outputs are cached for reuse across localnet restarts. +//! These operations are performed using the BUILDER_CONTAINER container and their +//! outputs are cached for reuse across devnet restarts. use tracing::info; diff --git a/src/commands/start/genesis/proof_parameters.rs b/src/commands/start/genesis/proof_parameters.rs index 54fce1a..bb1bcf7 100644 --- a/src/commands/start/genesis/proof_parameters.rs +++ b/src/commands/start/genesis/proof_parameters.rs @@ -4,131 +4,287 @@ //! required for lotus operations. use crate::paths::{ - foc_localnet_bin, foc_localnet_docker_volumes, foc_localnet_proof_parameters, + foc_devnet_bin, foc_devnet_docker_volumes, foc_devnet_proof_parameters, CONTAINER_FILECOIN_PROOF_PARAMS_PATH, }; +use crate::utils::retry::{retry_with_fixed_delay, DEFAULT_MAX_RETRIES, DEFAULT_RETRY_DELAY_SECS}; use indicatif::{ProgressBar, ProgressStyle}; use std::fs; use std::process::{Command, Stdio}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::{Duration, Instant}; -use tracing::info; +use tracing::{info, warn}; + +/// S3 URL for pre-packaged Filecoin proof parameters (2KiB sectors) +const PROOF_PARAMS_S3_URL: &str = + "https://fil-proof-params-2k-cache.s3.us-east-2.amazonaws.com/filecoin-proof-params-2k.tar"; /// Ensure Filecoin proof parameters are downloaded. /// -/// Parameters are downloaded once and cached in ~/.foc-localnet/artifacts/filecoin-proof-parameters/ +/// Parameters are downloaded once and cached in ~/.foc-devnet/artifacts/filecoin-proof-parameters/ /// This directory is mounted into lotus containers at /var/tmp/filecoin-proof-parameters/ pub fn ensure_proof_parameters() -> Result<(), Box> { - let params_dir = foc_localnet_proof_parameters(); - - // Check if parameters already exist and are valid - if params_dir.exists() && validate_proof_parameters(¶ms_dir)? { - info!("✓ Proof parameters already exist locally",); + let params_dir = foc_devnet_proof_parameters(); + + // Check if parameters already exist + if params_dir.exists() && params_dir.read_dir()?.next().is_some() { + info!( + "✓ Proof parameters already exist at: {}", + params_dir.display() + ); return Ok(()); } + info!( + "Proof parameters directory does not exist: {}", + params_dir.display() + ); + info!("⬇ Downloading proof parameters (this may take a while)..."); - // Create the directory - fs::create_dir_all(¶ms_dir)?; + // Try primary method: lotus fetch-params + let primary_result = download_via_lotus_fetch_params(¶ms_dir); + + match primary_result { + Ok(_) => { + info!("✓ Proof parameters downloaded successfully via lotus fetch-params"); + return Ok(()); + } + Err(e) => { + warn!("Primary download method (lotus fetch-params) failed: {}", e); + warn!("Falling back to S3 tarball download..."); + } + } - // Run lotus fetch-params in builder container - let bin_dir = foc_localnet_bin(); - let builder_volumes_dir = foc_localnet_docker_volumes().join("builder"); + // Fallback: Download from S3 + download_from_s3(¶ms_dir)?; - // Create a progress bar - let pb = ProgressBar::new_spinner(); - pb.set_style( - ProgressStyle::default_spinner() - .template("{spinner:.cyan} {msg}") - .unwrap() - .tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈ "), - ); + info!("✓ Proof parameters downloaded successfully via S3 fallback"); + Ok(()) +} - let bytes_downloaded = Arc::new(Mutex::new(0u64)); - let start_time = Instant::now(); - let bytes_clone = Arc::clone(&bytes_downloaded); - let params_dir_clone = params_dir.clone(); - - // Spawn a thread to update progress by monitoring directory size - let pb_clone = pb.clone(); - let update_handle = thread::spawn(move || { - loop { - thread::sleep(Duration::from_millis(500)); - - // Calculate directory size - if let Ok(size) = get_dir_size(¶ms_dir_clone) { - let mut total = bytes_clone.lock().unwrap(); - *total = size; - - let elapsed = start_time.elapsed().as_secs_f64(); - if elapsed > 0.0 { - let speed_mbps = (size as f64 / 1_048_576.0) / elapsed; - let total_mb = size as f64 / 1_048_576.0; - pb_clone.set_message(format!( - "Downloaded {:.1} MB ({:.2} MB/s)", - total_mb, speed_mbps - )); +/// Download proof parameters using lotus fetch-params. +/// +/// This is the primary download method that uses the lotus binary's +/// built-in parameter fetching functionality. +fn download_via_lotus_fetch_params( + params_dir: &std::path::Path, +) -> Result<(), Box> { + // Retry the download operation in case of network issues + retry_with_fixed_delay( + || { + // Ensure directory exists for each attempt (in case cleanup removed it) + fs::create_dir_all(params_dir)?; + + // Run lotus fetch-params in builder container + let bin_dir = foc_devnet_bin(); + let builder_volumes_dir = foc_devnet_docker_volumes().join("builder"); + + // Create a progress bar + let pb = ProgressBar::new_spinner(); + pb.set_style( + ProgressStyle::default_spinner() + .template("{spinner:.cyan} {msg}") + .unwrap() + .tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈ "), + ); + + let bytes_downloaded = Arc::new(Mutex::new(0u64)); + let start_time = Instant::now(); + let bytes_clone = Arc::clone(&bytes_downloaded); + let params_dir_clone = params_dir.to_path_buf(); + + // Spawn a thread to update progress by monitoring directory size + let pb_clone = pb.clone(); + let update_handle = thread::spawn(move || { + loop { + thread::sleep(Duration::from_millis(500)); + + // Calculate directory size + if let Ok(size) = get_dir_size(¶ms_dir_clone) { + let mut total = bytes_clone.lock().unwrap(); + *total = size; + + let elapsed = start_time.elapsed().as_secs_f64(); + if elapsed > 0.0 { + let speed_mbps = (size as f64 / 1_048_576.0) / elapsed; + let total_mb = size as f64 / 1_048_576.0; + pb_clone.set_message(format!( + "Downloaded {:.1} MB ({:.2} MB/s)", + total_mb, speed_mbps + )); + } + } + + if !pb_clone.is_finished() { + pb_clone.tick(); + } else { + break; + } + } + }); + + let container_name = format!( + "foc-proof-params-fetch-{}", + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH)? + .as_secs() + ); + let child = Command::new("docker") + .args([ + "run", + "--name", + &container_name, + "-e", + &format!( + "FIL_PROOFS_PARAMETER_CACHE={}", + CONTAINER_FILECOIN_PROOF_PARAMS_PATH + ), + "-v", + &format!("{}:/output", bin_dir.display()), + "-v", + &format!( + "{}:/home/foc-user/.cargo", + builder_volumes_dir.join("cargo").display() + ), + "-v", + &format!( + "{}:{}", + params_dir.display(), + CONTAINER_FILECOIN_PROOF_PARAMS_PATH + ), + crate::constants::BUILDER_DOCKER_IMAGE, + "/bin/bash", + "-c", + &format!( + "/output/lotus fetch-params {}", + super::constants::PROOF_PARAMS_SECTOR_SIZE + ), + ]) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .spawn()?; + + let output = child.wait_with_output()?; + + pb.finish_and_clear(); + drop(update_handle); + + if !output.status.success() { + // Clean up partial download on failure + if params_dir.exists() { + if let Err(cleanup_err) = fs::remove_dir_all(params_dir) { + warn!( + "Failed to clean up partial proof parameters download: {}", + cleanup_err + ); + } } + return Err(format!( + "Failed to download proof parameters: {}", + String::from_utf8_lossy(&output.stderr) + ) + .into()); } - if !pb_clone.is_finished() { - pb_clone.tick(); - } else { - break; + Ok(()) + }, + DEFAULT_MAX_RETRIES, + DEFAULT_RETRY_DELAY_SECS, + "Proof parameters download", + ) +} + +/// Download proof parameters from S3 as a fallback. +/// +/// This method downloads a pre-packaged tarball of proof parameters from S3, +/// extracts it, and places the files in the correct location. +fn download_from_s3(params_dir: &std::path::Path) -> Result<(), Box> { + let tarball_path = std::env::temp_dir().join("filecoin-proof-params-2k.tar"); + + // Ensure params directory exists + fs::create_dir_all(params_dir)?; + + // Download tarball with retry + retry_with_fixed_delay( + || { + info!("Downloading proof parameters tarball from S3..."); + + let pb = ProgressBar::new_spinner(); + pb.set_style( + ProgressStyle::default_spinner() + .template("{spinner:.cyan} {msg}") + .unwrap() + .tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈ "), + ); + pb.set_message("Downloading tarball from S3..."); + + let output = Command::new("curl") + .args([ + "-L", + PROOF_PARAMS_S3_URL, + "-o", + &tarball_path.to_string_lossy(), + ]) + .output()?; + + pb.finish_and_clear(); + + if !output.status.success() { + // Clean up failed download + if tarball_path.exists() { + let _ = fs::remove_file(&tarball_path); + } + return Err(format!( + "Failed to download tarball from S3: {}", + String::from_utf8_lossy(&output.stderr) + ) + .into()); } - } - }); - let child = Command::new("docker") + Ok(()) + }, + DEFAULT_MAX_RETRIES, + DEFAULT_RETRY_DELAY_SECS, + "S3 tarball download", + )?; + + // Extract tarball + info!("Extracting proof parameters tarball..."); + let extract_output = Command::new("tar") .args([ - "run", - "--rm", - "-e", - &format!( - "FIL_PROOFS_PARAMETER_CACHE={}", - CONTAINER_FILECOIN_PROOF_PARAMS_PATH - ), - "-v", - &format!("{}:/output", bin_dir.display()), - "-v", - &format!( - "{}:/home/foc-user/.cargo", - builder_volumes_dir.join("cargo").display() - ), - "-v", - &format!( - "{}:{}", - params_dir.display(), - CONTAINER_FILECOIN_PROOF_PARAMS_PATH - ), - "foc-builder", - "/bin/bash", - "-c", - &format!( - "/output/lotus fetch-params {}", - super::constants::PROOF_PARAMS_SECTOR_SIZE - ), + "-xf", + &tarball_path.to_string_lossy(), + "-C", + ¶ms_dir.to_string_lossy(), ]) - .stdout(Stdio::null()) - .stderr(Stdio::null()) - .spawn()?; + .output()?; - let output = child.wait_with_output()?; - - pb.finish_and_clear(); - drop(update_handle); - - if !output.status.success() { + if !extract_output.status.success() { + // Clean up on extraction failure + let _ = fs::remove_file(&tarball_path); + if params_dir.exists() { + let _ = fs::remove_dir_all(params_dir); + } return Err(format!( - "Failed to download proof parameters: {}", - String::from_utf8_lossy(&output.stderr) + "Failed to extract tarball: {}", + String::from_utf8_lossy(&extract_output.stderr) ) .into()); } - info!("✓ Proof parameters downloaded successfully"); + // Clean up tarball after successful extraction + if tarball_path.exists() { + fs::remove_file(&tarball_path)?; + } + + // Verify extraction succeeded by checking for files + if !params_dir.exists() || params_dir.read_dir()?.next().is_none() { + return Err("Tarball extraction produced no files".into()); + } + + info!("Proof parameters extracted successfully"); Ok(()) } @@ -151,73 +307,3 @@ fn get_dir_size(path: &std::path::Path) -> std::io::Result { Ok(total_size) } - -/// Validate that proof parameters directory contains expected files. -/// -/// This performs a heuristic validation without requiring exact file matches: -/// - Checks for at least one large .params file (> 10MB) -/// - Checks for at least one .srs file (> 100MB) -/// - Checks for multiple .vk (verification key) files (>= 5) -/// - Verifies files follow v28- naming convention -/// - Ensures total directory size is reasonable (> 1GB) -fn validate_proof_parameters( - params_dir: &std::path::Path, -) -> Result> { - if !params_dir.exists() || !params_dir.is_dir() { - return Ok(false); - } - - let entries: Vec<_> = fs::read_dir(params_dir)?.filter_map(|e| e.ok()).collect(); - - if entries.is_empty() { - return Ok(false); - } - - let mut has_large_params = false; - let mut has_srs_file = false; - let mut vk_count = 0; - let mut total_size = 0u64; - - for entry in entries { - let path = entry.path(); - let metadata = entry.metadata()?; - - if !metadata.is_file() { - continue; - } - - let file_name = path.file_name().and_then(|n| n.to_str()).unwrap_or(""); - - let file_size = metadata.len(); - total_size += file_size; - - // Verify v28- naming convention - if !file_name.starts_with("v28-") { - continue; - } - - // Check for .params files (should be large, > 10MB) - if file_name.ends_with(".params") && file_size > 10_000_000 { - has_large_params = true; - } - - // Check for .srs file (should be substantial, > 100MB) - if file_name.ends_with(".srs") && file_size > 100_000_000 { - has_srs_file = true; - } - - // Count .vk (verification key) files - if file_name.ends_with(".vk") { - vk_count += 1; - } - } - - // Validation criteria (very crude): - // - At least one large .params file - // - At least one .srs file - // - At least 5 .vk files - // - Total directory size > 1GB - let is_valid = has_large_params && has_srs_file && vk_count >= 5 && total_size > 1_000_000_000; - - Ok(is_valid) -} diff --git a/src/commands/start/genesis/sectors.rs b/src/commands/start/genesis/sectors.rs index a12ec69..9295ab3 100644 --- a/src/commands/start/genesis/sectors.rs +++ b/src/commands/start/genesis/sectors.rs @@ -3,9 +3,9 @@ //! This module handles pre-sealing sectors required for the genesis miners. use crate::paths::{ - foc_localnet_bin, foc_localnet_docker_volumes_cache, foc_localnet_genesis, - foc_localnet_genesis_sectors, foc_localnet_genesis_sectors_lotus_miner, - foc_localnet_genesis_sectors_pdp_sp, + foc_devnet_bin, foc_devnet_docker_volumes_cache, foc_devnet_genesis, + foc_devnet_genesis_sectors, foc_devnet_genesis_sectors_lotus_miner, + foc_devnet_genesis_sectors_pdp_sp, }; use std::fs; use std::path::PathBuf; @@ -27,15 +27,15 @@ pub fn ensure_presealed_sectors( active_pdp_sp_count: usize, run_id: &str, ) -> Result<(), Box> { - let sectors_dir = foc_localnet_genesis_sectors(run_id); - let genesis_dir = foc_localnet_genesis(run_id); + let sectors_dir = foc_devnet_genesis_sectors(run_id); + let genesis_dir = foc_devnet_genesis(run_id); // Build list of all miner directories to check: lotus-miner + PDP SPs - let mut miner_dirs = vec![foc_localnet_genesis_sectors_lotus_miner(run_id)]; + let mut miner_dirs = vec![foc_devnet_genesis_sectors_lotus_miner(run_id)]; // Add PDP SP directories for i in 1..=active_pdp_sp_count { - miner_dirs.push(foc_localnet_genesis_sectors_pdp_sp(run_id, i)); + miner_dirs.push(foc_devnet_genesis_sectors_pdp_sp(run_id, i)); } // Check if sectors already exist for all miners @@ -71,7 +71,7 @@ pub fn ensure_presealed_sectors( // Pre-seal sectors for lotus-miner let mut miner_configs: Vec<(String, PathBuf)> = vec![( super::constants::LOTUS_MINER_ID.to_string(), - foc_localnet_genesis_sectors_lotus_miner(run_id), + foc_devnet_genesis_sectors_lotus_miner(run_id), )]; // Add PDP SP miners @@ -80,7 +80,7 @@ pub fn ensure_presealed_sectors( "t0{}", super::constants::PDP_SP_MINER_ID_START + (i as u32) - 1 ); - let miner_dir = foc_localnet_genesis_sectors_pdp_sp(run_id, i); + let miner_dir = foc_devnet_genesis_sectors_pdp_sp(run_id, i); miner_configs.push((miner_id, miner_dir)); } @@ -107,13 +107,13 @@ fn preseal_miner_sectors( fs::create_dir_all(miner_dir)?; // Run lotus-seed pre-seal in builder container - let bin_dir = foc_localnet_bin(); - let builder_volumes_dir = foc_localnet_docker_volumes_cache().join("foc-builder"); + let bin_dir = foc_devnet_bin(); + let builder_volumes_dir = + foc_devnet_docker_volumes_cache().join(crate::constants::BUILDER_CONTAINER); // Build docker args with network environment variables let mut docker_args = vec![ "run".to_string(), - "--rm".to_string(), "-u".to_string(), "foc-user".to_string(), "--name".to_string(), @@ -131,7 +131,7 @@ fn preseal_miner_sectors( ), "-v".to_string(), format!("{}:/home/foc-user/.genesis-sectors", miner_dir.display()), - "foc-builder".to_string(), + crate::constants::BUILDER_DOCKER_IMAGE.to_string(), "/bin/bash".to_string(), "-c".to_string(), format!( diff --git a/src/commands/start/lotus/prerequisites.rs b/src/commands/start/lotus/prerequisites.rs index 6a98c37..c2bf1f0 100644 --- a/src/commands/start/lotus/prerequisites.rs +++ b/src/commands/start/lotus/prerequisites.rs @@ -4,18 +4,16 @@ //! before starting the Lotus daemon container. use super::super::genesis::constants::GENESIS_FILE; +use crate::constants::LOTUS_DOCKER_IMAGE; use crate::paths::{ - foc_localnet_bin, foc_localnet_genesis, foc_localnet_genesis_sectors, - foc_localnet_proof_parameters, + foc_devnet_bin, foc_devnet_genesis, foc_devnet_genesis_sectors, foc_devnet_proof_parameters, }; use std::error::Error; use tracing::info; -const IMAGE_NAME: &str = "foc-lotus"; - /// Verify that the genesis block file exists pub fn verify_genesis_file(run_id: &str) -> Result> { - let genesis_dir = foc_localnet_genesis(run_id); + let genesis_dir = foc_devnet_genesis(run_id); let genesis_file = genesis_dir.join(GENESIS_FILE); if !genesis_file.exists() { @@ -31,19 +29,19 @@ pub fn verify_genesis_file(run_id: &str) -> Result Result<(), Box> { // Verify Docker image exists - if !crate::docker::core::image_exists(IMAGE_NAME).unwrap_or(true) { + if !crate::docker::core::image_exists(LOTUS_DOCKER_IMAGE).unwrap_or(true) { return Err(format!( - "Docker image '{}' not found. Please run 'foc-localnet init' to build the image.", - IMAGE_NAME + "Docker image '{}' not found. Please run 'foc-devnet init' to build the image.", + LOTUS_DOCKER_IMAGE ) .into()); } - info!("✓ Docker image '{}' found", IMAGE_NAME); + info!("✓ Docker image '{}' found", LOTUS_DOCKER_IMAGE); // Verify lotus binary exists - let lotus_bin = foc_localnet_bin().join("lotus"); + let lotus_bin = foc_devnet_bin().join("lotus"); if !lotus_bin.exists() { - return Err("Lotus binary not found. Please run 'foc-localnet build lotus' first.".into()); + return Err("Lotus binary not found. Please run 'foc-devnet build lotus' first.".into()); } info!("✓ Lotus binary found"); @@ -57,7 +55,7 @@ pub fn check_genesis_and_params(run_id: &str) -> Result<(), Box> { info!("✓ Genesis file found at {}", genesis_file.display()); // Verify proof parameters exist - let params_dir = foc_localnet_proof_parameters(); + let params_dir = foc_devnet_proof_parameters(); if !params_dir.exists() || params_dir.read_dir()?.next().is_none() { return Err( "Filecoin proof parameters not found. They should have been downloaded during genesis preparation.".into(), @@ -67,7 +65,7 @@ pub fn check_genesis_and_params(run_id: &str) -> Result<(), Box> { info!("✓ Proof parameters found"); // Verify pre-sealed sectors exist - let sectors_dir = foc_localnet_genesis_sectors(run_id); + let sectors_dir = foc_devnet_genesis_sectors(run_id); if !sectors_dir.exists() || sectors_dir.read_dir()?.next().is_none() { return Err( "Pre-sealed sectors not found. They should have been created during genesis preparation.".into(), diff --git a/src/commands/start/lotus/setup.rs b/src/commands/start/lotus/setup.rs index 5c4a8ae..f2cf05d 100644 --- a/src/commands/start/lotus/setup.rs +++ b/src/commands/start/lotus/setup.rs @@ -4,13 +4,12 @@ //! for starting the Lotus daemon container. use super::super::step::SetupContext; +use crate::constants::LOTUS_DOCKER_IMAGE; use crate::docker::containers::lotus_container_name; use crate::docker::network::lotus_network_name; use std::error::Error; use std::fs; -use std::path::PathBuf; - -const IMAGE_NAME: &str = "foc-lotus"; +use std::path::{Path, PathBuf}; /// Enable FEVM in the Lotus config.toml /// @@ -44,7 +43,7 @@ pub fn create_fevm_config(lotus_data_dir: &PathBuf) -> Result<(), Box } /// Set up necessary directories for Lotus daemon -pub fn setup_directories(volumes_dir: &PathBuf) -> Result<(), Box> { +pub fn setup_directories(volumes_dir: &Path) -> Result<(), Box> { // Create lotus data directory in volumes let lotus_data_dir = volumes_dir.join("lotus-data"); fs::create_dir_all(&lotus_data_dir)?; @@ -61,14 +60,13 @@ pub fn setup_directories(volumes_dir: &PathBuf) -> Result<(), Box> { /// Build the Docker run command for starting Lotus daemon pub fn build_docker_command( - volumes_dir: &PathBuf, + volumes_dir: &Path, context: &SetupContext, ) -> Result, Box> { use super::super::genesis::constants::GENESIS_FILE; use crate::paths::{ - foc_localnet_bin, foc_localnet_genesis, foc_localnet_genesis_sectors, - foc_localnet_lotus_keys, foc_localnet_proof_parameters, - CONTAINER_FILECOIN_PROOF_PARAMS_PATH, + foc_devnet_bin, foc_devnet_genesis, foc_devnet_genesis_sectors, foc_devnet_lotus_keys, + foc_devnet_proof_parameters, CONTAINER_FILECOIN_PROOF_PARAMS_PATH, }; // Read allocated ports from context @@ -87,11 +85,11 @@ pub fn build_docker_command( let network_name = lotus_network_name(run_id); // Get paths - let bin_dir = foc_localnet_bin(); - let params_dir = foc_localnet_proof_parameters(); - let genesis_dir = foc_localnet_genesis(run_id); - let sectors_dir = foc_localnet_genesis_sectors(run_id); - let keys_dir = foc_localnet_lotus_keys(run_id); + let bin_dir = foc_devnet_bin(); + let params_dir = foc_devnet_proof_parameters(); + let genesis_dir = foc_devnet_genesis(run_id); + let sectors_dir = foc_devnet_genesis_sectors(run_id); + let keys_dir = foc_devnet_lotus_keys(run_id); let genesis_file = genesis_dir.join(GENESIS_FILE); // Build docker run command @@ -139,7 +137,7 @@ pub fn build_docker_command( docker_args.extend_from_slice(&["-w".to_string(), "/data".to_string()]); // Add image name - docker_args.push(IMAGE_NAME.to_string()); + docker_args.push(LOTUS_DOCKER_IMAGE.to_string()); // Add command to start lotus daemon let genesis_filename = genesis_file diff --git a/src/commands/start/lotus/verification.rs b/src/commands/start/lotus/verification.rs index 4894319..a57534a 100644 --- a/src/commands/start/lotus/verification.rs +++ b/src/commands/start/lotus/verification.rs @@ -8,7 +8,7 @@ use crate::docker::command_logger::run_and_log_command; use crate::docker::containers::lotus_container_name; use crate::docker::wait_for_port; use std::error::Error; -use std::path::PathBuf; +use std::path::Path; use std::thread; use std::time::Duration; use tracing::{info, warn}; @@ -83,7 +83,7 @@ pub fn verify_ports(context: &SetupContext) -> Result<(), Box> { } /// Wait for the Lotus API file to be created -pub fn wait_for_api_file(volumes_dir: &PathBuf) -> Result<(), Box> { +pub fn wait_for_api_file(volumes_dir: &Path) -> Result<(), Box> { // Wait for Lotus API file to exist and daemon to be fully initialized info!("Waiting for Lotus API to be ready (this may take 1-2 minutes)..."); let lotus_data_dir = volumes_dir.join("lotus-data"); diff --git a/src/commands/start/lotus_miner/constants.rs b/src/commands/start/lotus_miner/constants.rs index 278bb73..9abb79c 100644 --- a/src/commands/start/lotus_miner/constants.rs +++ b/src/commands/start/lotus_miner/constants.rs @@ -2,11 +2,11 @@ //! //! This module contains all configuration constants used in the Lotus-Miner startup process. -pub const IMAGE_NAME: &str = "foc-lotus-miner"; +// Docker image constant +pub const IMAGE_NAME: &str = crate::constants::LOTUS_MINER_DOCKER_IMAGE; // Timing constants pub const LOTUS_API_WAIT_SLEEP_SECS: u64 = 2; pub const MINER_API_CHECK_DELAY_SECS: u64 = 5; -pub const TIPSET_CHECK_DELAY_SECS: u64 = 10; pub const PORT_WAIT_TIMEOUT_SECS: u64 = 45; pub const CONTAINER_ID_DISPLAY_LENGTH: usize = 12; diff --git a/src/commands/start/lotus_miner/docker_command.rs b/src/commands/start/lotus_miner/docker_command.rs index 15bf98d..4cefa29 100644 --- a/src/commands/start/lotus_miner/docker_command.rs +++ b/src/commands/start/lotus_miner/docker_command.rs @@ -3,7 +3,7 @@ //! This module provides utilities for building Docker run commands for Lotus-Miner. use std::error::Error; -use std::path::PathBuf; +use std::path::Path; use super::constants::{IMAGE_NAME, LOTUS_API_WAIT_SLEEP_SECS}; use crate::commands::start::lotus_utils::{build_fullnode_api_info, read_lotus_token}; @@ -11,13 +11,13 @@ use crate::commands::start::step::SetupContext; use crate::docker::containers::{lotus_container_name, lotus_miner_container_name}; use crate::docker::network::lotus_network_name; use crate::paths::{ - foc_localnet_bin, foc_localnet_docker_volumes_cache, foc_localnet_genesis_sectors_lotus_miner, - foc_localnet_proof_parameters, CONTAINER_FILECOIN_PROOF_PARAMS_PATH, + foc_devnet_bin, foc_devnet_docker_volumes_cache, foc_devnet_genesis_sectors_lotus_miner, + foc_devnet_proof_parameters, CONTAINER_FILECOIN_PROOF_PARAMS_PATH, }; /// Build the Docker run command for Lotus-Miner pub fn build_miner_docker_command( - volumes_dir: &PathBuf, + volumes_dir: &Path, preseal_files: &(String, String), context: &SetupContext, ) -> Result, Box> { @@ -35,10 +35,11 @@ pub fn build_miner_docker_command( let fullnode_api_info = build_fullnode_api_info(&lotus_token, &lotus_name); // Get paths - let bin_dir = foc_localnet_bin(); - let sectors_dir = foc_localnet_genesis_sectors_lotus_miner(run_id); - let builder_volumes_dir = foc_localnet_docker_volumes_cache().join("foc-builder"); - let params_dir = foc_localnet_proof_parameters(); + let bin_dir = foc_devnet_bin(); + let sectors_dir = foc_devnet_genesis_sectors_lotus_miner(run_id); + let builder_volumes_dir = + foc_devnet_docker_volumes_cache().join(crate::constants::BUILDER_CONTAINER); + let params_dir = foc_devnet_proof_parameters(); // Get allocated miner API port from context let miner_api_port: u16 = context @@ -114,7 +115,7 @@ pub fn build_miner_docker_command( echo "Lotus daemon API is ready!" && \ if [ ! -f $LOTUS_MINER_PATH/config.toml ]; then \ echo "Importing pre-sealed miner key..." && \ - (/usr/local/bin/lotus-bins/lotus wallet import --as-default /sectors/{} 2>&1 | grep -v "key already exists" || true) && \ + (/usr/local/bin/lotus-bins/lotus wallet import --as-default /sectors/{} 2>&1 | grep --invert-match "key already exists" || true) && \ echo "Initializing lotus-miner..." && \ /usr/local/bin/lotus-bins/lotus-miner init --genesis-miner --actor=t01000 --sector-size=2KiB \ --pre-sealed-sectors=/sectors --pre-sealed-metadata=/sectors/{} --nosync; \ diff --git a/src/commands/start/lotus_miner/setup.rs b/src/commands/start/lotus_miner/setup.rs index f977fdb..9ba2e4c 100644 --- a/src/commands/start/lotus_miner/setup.rs +++ b/src/commands/start/lotus_miner/setup.rs @@ -4,12 +4,12 @@ use std::error::Error; use std::fs; -use std::path::PathBuf; +use std::path::Path; -use crate::paths::foc_localnet_genesis_sectors_lotus_miner; +use crate::paths::foc_devnet_genesis_sectors_lotus_miner; /// Set up necessary directories for Lotus-Miner -pub fn setup_miner_directories(volumes_dir: &PathBuf) -> Result<(), Box> { +pub fn setup_miner_directories(volumes_dir: &Path) -> Result<(), Box> { // Create lotus-miner data directory in volumes let miner_data_dir = volumes_dir.join("lotus-miner-data"); fs::create_dir_all(&miner_data_dir)?; @@ -18,7 +18,7 @@ pub fn setup_miner_directories(volumes_dir: &PathBuf) -> Result<(), Box Result<(String, String), Box> { - let sectors_dir = foc_localnet_genesis_sectors_lotus_miner(run_id); + let sectors_dir = foc_devnet_genesis_sectors_lotus_miner(run_id); let preseal_file = "pre-seal-t01000.json"; let preseal_key_file = "pre-seal-t01000.key"; diff --git a/src/commands/start/lotus_miner/verification.rs b/src/commands/start/lotus_miner/verification.rs index 0bf74c6..1f45ab6 100644 --- a/src/commands/start/lotus_miner/verification.rs +++ b/src/commands/start/lotus_miner/verification.rs @@ -7,13 +7,12 @@ use std::thread; use std::time::Duration; use tracing::info; -use super::constants::{ - MINER_API_CHECK_DELAY_SECS, PORT_WAIT_TIMEOUT_SECS, TIPSET_CHECK_DELAY_SECS, -}; +use super::constants::{MINER_API_CHECK_DELAY_SECS, PORT_WAIT_TIMEOUT_SECS}; use crate::commands::start::step::SetupContext; use crate::docker::command_logger::run_and_log_command; use crate::docker::containers::{lotus_container_name, lotus_miner_container_name}; use crate::docker::wait_for_port; +use crate::utils::retry::{retry_with_fixed_delay, DEFAULT_MAX_RETRIES, DEFAULT_RETRY_DELAY_SECS}; // Additional timing constant for API file wait const API_FILE_TIMEOUT_SECS: u64 = 120; @@ -87,45 +86,50 @@ pub fn check_tipset_generation(context: &SetupContext) -> Result<(), Box height1 { - info!("✓ Chain is progressing (height {} -> {})", height1, height2); - Ok(()) - } else { - Err(format!( - "Chain is not progressing. Height remained at {} after {} seconds.", - height1, TIPSET_CHECK_DELAY_SECS + retry_with_fixed_delay( + || { + // Get new chain height + let key = format!("lotus_miner_chain_height_check_{}", lotus_name); + let output2 = run_and_log_command( + "docker", + &[ + "exec", + &lotus_name, + "/usr/local/bin/lotus-bins/lotus", + "chain", + "list", + "--count=1", + ], + context, + &key, + )?; + + if !output2.status.success() { + return Err("Failed to get new chain height".into()); + } + + let chain_output2 = String::from_utf8_lossy(&output2.stdout); + let height2 = parse_chain_height(&chain_output2)?; + + if height2 > height1 { + info!("✓ Chain is progressing (height {} -> {})", height1, height2); + Ok(()) + } else { + Err(format!("Chain height still at {}", height1).into()) + } + }, + DEFAULT_MAX_RETRIES, + DEFAULT_RETRY_DELAY_SECS, + "Chain progression verification", + ) + .map_err(|_| { + format!( + "Chain is not progressing. Height remained at {} after {} attempts.", + height1, DEFAULT_MAX_RETRIES, ) - .into()) - } + })?; + + Ok(()) } /// Perform all post-execution verifications for Lotus-Miner diff --git a/src/commands/start/lotus_utils/mod.rs b/src/commands/start/lotus_utils/mod.rs index 83665c4..dea2d3a 100644 --- a/src/commands/start/lotus_utils/mod.rs +++ b/src/commands/start/lotus_utils/mod.rs @@ -6,7 +6,7 @@ use std::error::Error; use std::fs; use super::step::SetupContext; -use crate::paths::foc_localnet_docker_volumes_run_specific; +use crate::paths::foc_devnet_docker_volumes_run_specific; /// Read the Lotus API token from the lotus-data directory. /// @@ -16,7 +16,7 @@ use crate::paths::foc_localnet_docker_volumes_run_specific; /// # Returns /// The token string if found, or an error if the token file doesn't exist or can't be read. pub fn read_lotus_token(run_id: &str) -> Result> { - let token_path = foc_localnet_docker_volumes_run_specific(run_id) + let token_path = foc_devnet_docker_volumes_run_specific(run_id) .join("lotus-data") .join("token"); diff --git a/src/commands/start/mod.rs b/src/commands/start/mod.rs index 8f747e3..634ca6e 100644 --- a/src/commands/start/mod.rs +++ b/src/commands/start/mod.rs @@ -32,11 +32,11 @@ use crate::commands::start::usdfc_funding::USDFCFundingStep; use crate::config::Config; use crate::docker::core::{container_is_running, remove_container, stop_container}; use crate::docker::{create_all_networks, start_portainer}; -use crate::paths::{foc_localnet_config, foc_localnet_run_dir}; -use crate::run_id::save_current_run_id; +use crate::paths::{foc_devnet_config, foc_devnet_run_dir}; +use crate::run_id::{create_latest_symlink, save_current_run_id}; use crate::version_info::write_version_file; pub use eth_acc_funding::constants::FEVM_ACCOUNTS_PREFUNDED; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use tracing::{info, warn}; /// Stop any existing cluster before starting a new one. @@ -62,11 +62,11 @@ fn setup_directories_and_run_id( let volumes_dir = if let Some(dir) = volumes_dir { PathBuf::from(dir) } else { - crate::paths::foc_localnet_docker_volumes_run_specific(&run_id) + crate::paths::foc_devnet_docker_volumes_run_specific(&run_id) }; // Determine run directory - let run_dir = foc_localnet_run_dir(&run_id); + let run_dir = foc_devnet_run_dir(&run_id); // Create directories if they don't exist std::fs::create_dir_all(&volumes_dir)?; @@ -76,16 +76,25 @@ fn setup_directories_and_run_id( let version_info = crate::version_info::VersionInfo::from_env(); write_version_file(&run_dir, &version_info)?; + // Create symlink from state/latest to this run directory for easier access + create_latest_symlink(&run_id)?; + Ok((volumes_dir, run_dir, run_id)) } -/// Perform a full regenesis reset, deleting all genesis-related files and keys. -fn perform_regenesis() -> Result<(), Box> { - info!("Performing regenesis (full reset)..."); - - // First, stop any running containers to ensure clean state - info!("Stopping any running containers..."); - let containers = vec!["foc-lotus-miner", "foc-lotus", "foc-curio", "foc-yugabyte"]; +/// Stop any running containers from previous runs. +/// +/// Note: We do NOT delete old run volumes or directories since each run has +/// a unique run ID. Old runs are preserved for historical reference and debugging. +fn stop_running_containers() -> Result<(), Box> { + info!("Stopping any running containers from previous runs..."); + + let containers = vec![ + crate::constants::LOTUS_MINER_CONTAINER, + crate::constants::LOTUS_CONTAINER, + crate::constants::CURIO_CONTAINER, + crate::constants::YUGABYTE_CONTAINER, + ]; for container in containers { if container_is_running(container)? { info!("Stopping container '{}'...", container); @@ -94,10 +103,22 @@ fn perform_regenesis() -> Result<(), Box> { } } - let run_specific_volumes_root = crate::paths::foc_localnet_docker_volumes_run_specific_root(); - let runs_dir = crate::paths::foc_localnet_runs(); + info!("All running containers stopped."); + Ok(()) +} + +/// Perform legacy full regenesis (deletes ALL runs - deprecated). +/// +/// This function is kept for backward compatibility but should not be used +/// since it defeats the purpose of run IDs. +#[allow(dead_code)] +fn perform_regenesis_legacy() -> Result<(), Box> { + warn!("Legacy regenesis called - this deletes ALL previous runs!"); + + let run_specific_volumes_root = crate::paths::foc_devnet_docker_volumes_run_specific_root(); + let runs_dir = crate::paths::foc_devnet_runs(); - // Files and directories to delete + // Files and directories to delete (ALL runs) let paths_to_delete = vec![run_specific_volumes_root, runs_dir]; for path in paths_to_delete { @@ -119,14 +140,13 @@ fn perform_regenesis() -> Result<(), Box> { let file_name = path.file_name().unwrap().to_string_lossy(); let status = std::process::Command::new("docker") - .args(&[ + .args([ "run", - "--rm", "-u", "root", "-v", &format!("{}:/work", parent.display()), - "foc-builder", + crate::constants::BUILDER_DOCKER_IMAGE, "rm", "-rf", &format!("/work/{}", file_name), @@ -160,10 +180,10 @@ fn perform_regenesis() -> Result<(), Box> { /// Load and validate the configuration file. fn load_and_validate_config() -> Result> { // Load config to get port range settings - let config_path = foc_localnet_config(); + let config_path = foc_devnet_config(); let config_content = std::fs::read_to_string(&config_path).map_err(|e| { format!( - "Failed to read config file at {:?}: {}. Run 'foc-localnet init' first.", + "Failed to read config file at {:?}: {}. Run 'foc-devnet init' first.", config_path, e ) })?; @@ -183,35 +203,39 @@ fn load_and_validate_config() -> Result> { /// Create all the step instances for the cluster startup sequence. fn create_steps( - volumes_dir: &PathBuf, - run_dir: &PathBuf, + volumes_dir: &Path, + run_dir: &Path, config: &Config, notest: bool, ) -> Vec> { - let lotus_step = LotusStep::new(volumes_dir.clone(), run_dir.clone()); - let lotus_miner_step = LotusMinerStep::new(volumes_dir.clone(), run_dir.clone()); - let eth_acc_funding_step = ETHAccFundingStep::new(run_dir.clone(), config.active_pdp_sp_count); - let usdfc_deploy_step = USDFCDeployStep::new(volumes_dir.clone(), run_dir.clone()); - let usdfc_funding_step = USDFCFundingStep::new(run_dir.clone(), config.active_pdp_sp_count); - let multicall3_deploy_step = MultiCall3DeployStep::new(volumes_dir.clone(), run_dir.clone()); - let foc_deploy_step = FOCDeployStep::new(volumes_dir.clone(), run_dir.clone()); + let lotus_step = LotusStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf()); + let lotus_miner_step = LotusMinerStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf()); + let eth_acc_funding_step = + ETHAccFundingStep::new(run_dir.to_path_buf(), config.active_pdp_sp_count); + let usdfc_deploy_step = USDFCDeployStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf()); + let usdfc_funding_step = + USDFCFundingStep::new(run_dir.to_path_buf(), config.active_pdp_sp_count); + let multicall3_deploy_step = + MultiCall3DeployStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf()); + let foc_deploy_step = FOCDeployStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf()); let pdp_sp_reg_step = PdpSpRegistrationStep::new( - volumes_dir.clone(), - run_dir.clone(), + volumes_dir.to_path_buf(), + run_dir.to_path_buf(), config.active_pdp_sp_count, config.approved_pdp_sp_count, ); let yugabyte_step = YugabyteStep::new( - volumes_dir.clone(), - run_dir.clone(), + volumes_dir.to_path_buf(), + run_dir.to_path_buf(), config.active_pdp_sp_count, ); let curio_step = CurioStep::new( - volumes_dir.clone(), - run_dir.clone(), + volumes_dir.to_path_buf(), + run_dir.to_path_buf(), config.active_pdp_sp_count, ); - let synapse_test_step = SynapseTestE2EStep::new(volumes_dir.clone(), run_dir.clone(), notest); + let synapse_test_step = + SynapseTestE2EStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf(), notest); // Execute all steps // Note: PDP SP registration MUST happen after Curio because it needs @@ -245,35 +269,39 @@ fn create_steps( /// - Epoch 5: MockUSDFC Funding + Curio daemons (can be parallelized, needs FOC Deploy) /// - Epoch 6: PDP SP Registration (needs Curio daemons started) fn create_step_epochs( - volumes_dir: &PathBuf, - run_dir: &PathBuf, + volumes_dir: &Path, + run_dir: &Path, config: &Config, notest: bool, ) -> Vec>> { - let lotus_step = LotusStep::new(volumes_dir.clone(), run_dir.clone()); + let lotus_step = LotusStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf()); let yugabyte_step = YugabyteStep::new( - volumes_dir.clone(), - run_dir.clone(), + volumes_dir.to_path_buf(), + run_dir.to_path_buf(), config.active_pdp_sp_count, ); - let lotus_miner_step = LotusMinerStep::new(volumes_dir.clone(), run_dir.clone()); - let eth_acc_funding_step = ETHAccFundingStep::new(run_dir.clone(), config.active_pdp_sp_count); - let usdfc_deploy_step = USDFCDeployStep::new(volumes_dir.clone(), run_dir.clone()); - let multicall3_deploy_step = MultiCall3DeployStep::new(volumes_dir.clone(), run_dir.clone()); - let foc_deploy_step = FOCDeployStep::new(volumes_dir.clone(), run_dir.clone()); - let usdfc_funding_step = USDFCFundingStep::new(run_dir.clone(), config.active_pdp_sp_count); + let lotus_miner_step = LotusMinerStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf()); + let eth_acc_funding_step = + ETHAccFundingStep::new(run_dir.to_path_buf(), config.active_pdp_sp_count); + let usdfc_deploy_step = USDFCDeployStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf()); + let multicall3_deploy_step = + MultiCall3DeployStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf()); + let foc_deploy_step = FOCDeployStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf()); + let usdfc_funding_step = + USDFCFundingStep::new(run_dir.to_path_buf(), config.active_pdp_sp_count); let curio_step = CurioStep::new( - volumes_dir.clone(), - run_dir.clone(), + volumes_dir.to_path_buf(), + run_dir.to_path_buf(), config.active_pdp_sp_count, ); let pdp_sp_reg_step = PdpSpRegistrationStep::new( - volumes_dir.clone(), - run_dir.clone(), + volumes_dir.to_path_buf(), + run_dir.to_path_buf(), config.active_pdp_sp_count, config.approved_pdp_sp_count, ); - let synapse_test_step = SynapseTestE2EStep::new(volumes_dir.clone(), run_dir.clone(), notest); + let synapse_test_step = + SynapseTestE2EStep::new(volumes_dir.to_path_buf(), run_dir.to_path_buf(), notest); vec![ // Epoch 1: Start Lotus @@ -304,8 +332,8 @@ fn create_step_epochs( /// Execute the cluster startup steps. fn execute_cluster_steps( - volumes_dir: &PathBuf, - run_dir: &PathBuf, + volumes_dir: &Path, + run_dir: &Path, run_id: &str, config: &Config, parallel: bool, @@ -344,13 +372,15 @@ fn execute_cluster_steps( execute_steps_parallel( epoch_refs, - run_id.to_string(), - run_dir.clone(), - config.port_range_start, - config.port_range_count, - Some(portainer_port), - config.active_pdp_sp_count, - config.approved_pdp_sp_count, + step::StepExecutionConfig { + run_id: run_id.to_string(), + run_dir: run_dir.to_path_buf(), + port_start: config.port_range_start, + port_count: config.port_range_count, + portainer_port: Some(portainer_port), + active_pdp_sp_count: config.active_pdp_sp_count, + approved_pdp_sp_count: config.approved_pdp_sp_count, + }, )?; } else { info!("Execution mode: SEQUENTIAL"); @@ -358,13 +388,15 @@ fn execute_cluster_steps( execute_steps( steps.iter().map(|s| s.as_ref()).collect::>(), - run_id.to_string(), - run_dir.clone(), - config.port_range_start, - config.port_range_count, - Some(portainer_port), - config.active_pdp_sp_count, - config.approved_pdp_sp_count, + step::StepExecutionConfig { + run_id: run_id.to_string(), + run_dir: run_dir.to_path_buf(), + port_start: config.port_range_start, + port_count: config.port_range_count, + portainer_port: Some(portainer_port), + active_pdp_sp_count: config.active_pdp_sp_count, + approved_pdp_sp_count: config.approved_pdp_sp_count, + }, )?; } @@ -384,14 +416,17 @@ pub fn start_cluster( let (volumes_dir, run_dir, run_id) = setup_directories_and_run_id(volumes_dir, run_dir, run_id)?; - // Always perform regenesis (full reset) before starting - perform_regenesis()?; + // Stop any running containers (but preserve old run data) + stop_running_containers()?; info!("Starting local cluster..."); info!("Run ID: {}", run_id); info!("Volumes directory: {}", volumes_dir.display()); info!("Run directory: {}", run_dir.display()); + // Log system information + crate::utils::system_info::log_system_info(); + let config = load_and_validate_config()?; // Allocate port for Portainer (first port in dynamic range) @@ -408,7 +443,7 @@ pub fn start_cluster( create_all_networks(&run_id, config.active_pdp_sp_count)?; // Execute steps - execute_cluster_steps( + let exec_result = execute_cluster_steps( &volumes_dir, &run_dir, &run_id, @@ -416,8 +451,45 @@ pub fn start_cluster( parallel, portainer_port, notest, - )?; + ); + + // Always run post-start teardown: persist logs, cleanup dead containers, write status + if let Err(e) = finalize_start_teardown(&run_id) { + warn!("Post-start teardown encountered an error: {}", e); + } + + // Propagate original execution result + exec_result?; info!("Cluster started successfully!"); Ok(()) } + +/// Finalize the start attempt by collecting logs, cleaning dead containers, and writing status. +fn finalize_start_teardown(run_id: &str) -> Result<(), Box> { + use crate::docker::{ + persist_foc_container_logs, remove_dead_foc_containers, write_post_start_status_log, + }; + + info!("═══════════════════════════════════════════════════════════"); + info!("Running post-start teardown for run ID: {}", run_id); + info!("═══════════════════════════════════════════════════════════"); + + // Persist logs for all foc* image containers + info!("[1/3] Persisting logs for all foc* image containers..."); + persist_foc_container_logs(run_id)?; + + // Remove dead containers to keep environment tidy + info!("[2/3] Removing dead foc* containers..."); + remove_dead_foc_containers()?; + + // Write status snapshot to the run directory + info!("[3/3] Writing post-start status snapshot..."); + write_post_start_status_log(run_id)?; + + info!("═══════════════════════════════════════════════════════════"); + info!("Post-start teardown completed successfully"); + info!("═══════════════════════════════════════════════════════════"); + + Ok(()) +} diff --git a/src/commands/start/multicall3_deploy/deployment.rs b/src/commands/start/multicall3_deploy/deployment.rs index 9b22313..835fb56 100644 --- a/src/commands/start/multicall3_deploy/deployment.rs +++ b/src/commands/start/multicall3_deploy/deployment.rs @@ -6,7 +6,7 @@ use super::key_management; use super::prerequisites::check_required_addresses; use crate::commands::start::lotus_utils::get_lotus_rpc_url; use crate::docker::command_logger::run_and_log_command_strings; -use crate::paths::foc_localnet_multicall3_repo; +use crate::paths::foc_devnet_multicall3_repo; use std::error::Error; use tracing::{error, info}; @@ -20,7 +20,7 @@ pub fn deploy_multicall3( info!("Deploying Multicall3 contract..."); // Get the multicall3 repository path - let multicall3_repo = foc_localnet_multicall3_repo(); + let multicall3_repo = foc_devnet_multicall3_repo(); if !multicall3_repo.exists() { return Err(format!( @@ -55,7 +55,6 @@ pub fn deploy_multicall3( let volume_mount = format!("{}:/workspace", multicall3_repo.display()); let args: Vec = vec![ "run".to_string(), - "--rm".to_string(), "-u".to_string(), "foc-user".to_string(), "--name".to_string(), @@ -64,7 +63,7 @@ pub fn deploy_multicall3( "host".to_string(), // Use host network to access Lotus RPC on dynamic port "-v".to_string(), volume_mount, - "foc-builder".to_string(), + crate::constants::BUILDER_DOCKER_IMAGE.to_string(), "bash".to_string(), "-c".to_string(), deploy_cmd, diff --git a/src/commands/start/multicall3_deploy/verification.rs b/src/commands/start/multicall3_deploy/verification.rs index 82ae961..8dcf8c4 100644 --- a/src/commands/start/multicall3_deploy/verification.rs +++ b/src/commands/start/multicall3_deploy/verification.rs @@ -22,14 +22,17 @@ pub fn verify_multicall3( // Verify that the contract exists at the address using cast let verify_cmd = format!("cast code {} --rpc-url {}", contract_address, lotus_rpc_url); + let run_id = context.run_id(); + let container_name = format!("foc-{}-multicall3-verify", run_id); let args: Vec = vec![ "run".to_string(), - "--rm".to_string(), + "--name".to_string(), + container_name, "-u".to_string(), "foc-user".to_string(), "--network".to_string(), "host".to_string(), - "foc-builder".to_string(), + crate::constants::BUILDER_DOCKER_IMAGE.to_string(), "bash".to_string(), "-c".to_string(), verify_cmd, diff --git a/src/commands/start/pdp_service_provider/constants.rs b/src/commands/start/pdp_service_provider/constants.rs index 5bd82b8..9a81eab 100644 --- a/src/commands/start/pdp_service_provider/constants.rs +++ b/src/commands/start/pdp_service_provider/constants.rs @@ -19,7 +19,7 @@ pub const STORAGE_PRICE_PER_TIB_PER_DAY: u64 = 1000000000000000000; pub const MIN_PROVING_PERIOD_EPOCHS: u64 = 2880; /// Geographic location identifier -pub const LOCATION: &str = "LocalNet"; +pub const LOCATION: &str = "DevNet"; /// Provider description -pub const PROVIDER_DESCRIPTION: &str = "PDP Service Provider 0 for LocalNet"; +pub const PROVIDER_DESCRIPTION: &str = "PDP Service Provider 0 for DevNet"; diff --git a/src/commands/start/pdp_service_provider/pdp_service_provider_step.rs b/src/commands/start/pdp_service_provider/pdp_service_provider_step.rs index 2528033..beb8694 100644 --- a/src/commands/start/pdp_service_provider/pdp_service_provider_step.rs +++ b/src/commands/start/pdp_service_provider/pdp_service_provider_step.rs @@ -160,7 +160,7 @@ impl Step for PdpSpRegistrationStep { let deployer_foc_address = context .get("deployer_foc_address") .ok_or("DEPLOYER_FOC address not found in context")?; - let deployer_foc_eth_address = context + let _deployer_foc_eth_address = context .get("deployer_foc_eth_address") .ok_or("DEPLOYER_FOC Ethereum address not found in context")?; @@ -204,27 +204,29 @@ impl Step for PdpSpRegistrationStep { let service_url = format!("http://localhost:{}", pdp_port); match registration::register_single_provider( - run_id, - ®istry_address, - &sp_address, - &sp_eth_address, - &mock_usdfc_address, - &lotus_rpc_url, - &service_url, - sp_index, + ®istration::ProviderRegistrationParams { + run_id, + registry_address: ®istry_address, + pdp_sp_address: &sp_address, + pdp_sp_eth_address: &sp_eth_address, + mock_usdfc_address: &mock_usdfc_address, + lotus_rpc_url: &lotus_rpc_url, + service_url: &service_url, + sp_index, + }, context, ) { Ok(provider_id) => { // Only approve if within approved count if should_approve { if let Err(e) = registration::add_to_approved_list( - run_id, - &warm_storage_address, - provider_id, - deployer_foc_address.as_str(), - deployer_foc_eth_address.as_str(), - &lotus_rpc_url, - sp_index, + ®istration::ApprovedListParams { + run_id, + warm_storage_address: &warm_storage_address, + provider_id, + deployer_foc_address: deployer_foc_address.as_str(), + lotus_rpc_url: &lotus_rpc_url, + }, context, ) { errors.push(format!("SP {} approval failed: {}", sp_index, e)); @@ -252,11 +254,9 @@ impl Step for PdpSpRegistrationStep { // Check for errors if !errors.is_empty() { - return Err(format!( - "Failed to register some providers:\n{}", - errors.join("\n") - ) - .into()); + return Err( + format!("Failed to register some providers:\n{}", errors.join("\n")).into(), + ); } // Store all provider IDs diff --git a/src/commands/start/pdp_service_provider/registration.rs b/src/commands/start/pdp_service_provider/registration.rs index 362f542..24e52b9 100644 --- a/src/commands/start/pdp_service_provider/registration.rs +++ b/src/commands/start/pdp_service_provider/registration.rs @@ -4,39 +4,54 @@ use tracing::info; use super::constants::*; use crate::commands::start::step::SetupContext; -use crate::constants::BUILDER_CONTAINER; +use crate::constants::BUILDER_DOCKER_IMAGE; use crate::docker::command_logger::run_and_log_command_strings; +use crate::utils::retry::{retry_with_fixed_delay, DEFAULT_MAX_RETRIES, DEFAULT_RETRY_DELAY_SECS}; use std::error::Error; +/// Parameters for provider registration +pub struct ProviderRegistrationParams<'a> { + pub run_id: &'a str, + pub registry_address: &'a str, + pub pdp_sp_address: &'a str, + pub pdp_sp_eth_address: &'a str, + pub mock_usdfc_address: &'a str, + pub lotus_rpc_url: &'a str, + pub service_url: &'a str, + pub sp_index: usize, +} + +/// Parameters for adding provider to approved list +pub struct ApprovedListParams<'a> { + pub run_id: &'a str, + pub warm_storage_address: &'a str, + pub provider_id: u64, + pub deployer_foc_address: &'a str, + pub lotus_rpc_url: &'a str, +} + /// Register a single provider in ServiceProviderRegistry contract /// /// Returns the provider ID assigned by the registry. pub fn register_single_provider( - run_id: &str, - registry_address: &str, - pdp_sp_address: &str, - pdp_sp_eth_address: &str, - mock_usdfc_address: &str, - lotus_rpc_url: &str, - service_url: &str, - sp_index: usize, + params: &ProviderRegistrationParams, context: &SetupContext, ) -> Result> { - let _ = run_id; // Not needed when using foc-builder - - let label = format!("PDP_SP_{}", sp_index); + let label = format!("PDP_SP_{}", params.sp_index); + let container_name = format!("foc-{}-pdp-register-sp{}", params.run_id, params.sp_index); info!("Registering {} in ServiceProviderRegistry...", label); // Get private key for this PDP SP let pdp_sp_private_key = - crate::commands::start::foc_deployer::get_private_key(pdp_sp_address, "")?; + crate::commands::start::foc_deployer::get_private_key(params.pdp_sp_address, "")?; // Build capability keys array let cap_keys = build_capability_keys(); // Build capability values array with the specific service URL - let cap_values = build_capability_values_with_url(mock_usdfc_address, service_url)?; + let cap_values = + build_capability_values_with_url(params.mock_usdfc_address, params.service_url)?; // Calculate registration fee in wei let registration_fee_wei = format!("{}000000000000000000", REGISTRATION_FEE_FIL); @@ -56,31 +71,32 @@ pub fn register_single_provider( --rpc-url {} \ --private-key {} \ --gas-limit 10000000000"#, - registry_address, - pdp_sp_eth_address, + params.registry_address, + params.pdp_sp_eth_address, label, PROVIDER_DESCRIPTION, cap_keys, cap_values, registration_fee_wei, - lotus_rpc_url, + params.lotus_rpc_url, pdp_sp_private_key, ); let args: Vec = vec![ "run".to_string(), - "--rm".to_string(), + "--name".to_string(), + container_name, "-u".to_string(), "foc-user".to_string(), "--network".to_string(), "host".to_string(), - BUILDER_CONTAINER.to_string(), + BUILDER_DOCKER_IMAGE.to_string(), "bash".to_string(), "-c".to_string(), cast_cmd, ]; - let key = format!("pdp_register_provider_sp{}", sp_index); + let key = format!("pdp_register_provider_sp{}", params.sp_index); let output = run_and_log_command_strings("docker", &args, context, &key)?; if !output.status.success() { @@ -101,8 +117,13 @@ pub fn register_single_provider( wait_for_confirmation(); // Query provider ID - let provider_id = - query_provider_id(registry_address, pdp_sp_eth_address, lotus_rpc_url, context)?; + let provider_id = query_provider_id( + params.run_id, + params.registry_address, + params.pdp_sp_eth_address, + params.lotus_rpc_url, + context, + )?; info!("✓ {} Provider ID: {}", label, provider_id); Ok(provider_id) @@ -110,50 +131,45 @@ pub fn register_single_provider( /// Add provider to approved list in WarmStorage contract pub fn add_to_approved_list( - run_id: &str, - warm_storage_address: &str, - provider_id: u64, - deployer_foc_address: &str, - _deployer_foc_eth_address: &str, - lotus_rpc_url: &str, - _sp_index: usize, + params: &ApprovedListParams, context: &SetupContext, ) -> Result<(), Box> { - let _ = run_id; // Not needed when using foc-builder - info!( "Adding provider {} to WarmStorage approved list...", - provider_id + params.provider_id ); // Get private key for DEPLOYER_FOC let deployer_foc_private_key = - crate::commands::start::foc_deployer::get_private_key(deployer_foc_address, "")?; + crate::commands::start::foc_deployer::get_private_key(params.deployer_foc_address, "")?; // Use high gas limit for FEVM (cast send doesn't support gas-estimate-multiplier) - let provider_id_str = provider_id.to_string(); + let provider_id_str = params.provider_id.to_string(); + let container_name = format!("foc-{}-pdp-approve-{}", params.run_id, params.provider_id); + let args: Vec = vec![ "run".to_string(), - "--rm".to_string(), + "--name".to_string(), + container_name, "-u".to_string(), "foc-user".to_string(), "--network".to_string(), "host".to_string(), - BUILDER_CONTAINER.to_string(), + BUILDER_DOCKER_IMAGE.to_string(), "cast".to_string(), "send".to_string(), - warm_storage_address.to_string(), + params.warm_storage_address.to_string(), "addApprovedProvider(uint256)".to_string(), provider_id_str, "--rpc-url".to_string(), - lotus_rpc_url.to_string(), + params.lotus_rpc_url.to_string(), "--private-key".to_string(), deployer_foc_private_key, "--gas-limit".to_string(), "10000000000".to_string(), ]; - let key = format!("pdp_add_approved_provider_{}", provider_id); + let key = format!("pdp_add_approved_provider_{}", params.provider_id); let output = run_and_log_command_strings("docker", &args, context, &key)?; if !output.status.success() { @@ -237,19 +253,23 @@ fn encode_uint_minimal(value: u64) -> String { /// Query provider ID from registry by eth address fn query_provider_id( + run_id: &str, registry_address: &str, pdp_sp_eth_address: &str, lotus_rpc_url: &str, context: &SetupContext, ) -> Result> { + let container_name = format!("foc-{}-pdp-query-provider-{}", run_id, pdp_sp_eth_address); + let args: Vec = vec![ "run".to_string(), - "--rm".to_string(), + "--name".to_string(), + container_name, "-u".to_string(), "foc-user".to_string(), "--network".to_string(), "host".to_string(), - BUILDER_CONTAINER.to_string(), + BUILDER_DOCKER_IMAGE.to_string(), "cast".to_string(), "call".to_string(), registry_address.to_string(), @@ -297,36 +317,44 @@ pub fn verify_provider_count( lotus_rpc_url: &str, context: &SetupContext, ) -> Result> { - let _ = run_id; // Not needed when using foc-builder - - let args: Vec = vec![ - "run".to_string(), - "--rm".to_string(), - "-u".to_string(), - "foc-user".to_string(), - "--network".to_string(), - "host".to_string(), - BUILDER_CONTAINER.to_string(), - "cast".to_string(), - "call".to_string(), - registry_address.to_string(), - "getProviderCount()(uint256)".to_string(), - "--rpc-url".to_string(), - lotus_rpc_url.to_string(), - ]; - - let key = "pdp_verify_provider_count".to_string(); - let output = run_and_log_command_strings("docker", &args, context, &key)?; - - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - return Err(format!("Failed to query provider count: {}", stderr).into()); - } - - let result = String::from_utf8_lossy(&output.stdout); - let count: u64 = result.trim().parse().unwrap_or(0); - - Ok(count) + retry_with_fixed_delay( + || { + let container_name = format!("foc-{}-pdp-verify-count", run_id); + + let args: Vec = vec![ + "run".to_string(), + "--name".to_string(), + container_name, + "-u".to_string(), + "foc-user".to_string(), + "--network".to_string(), + "host".to_string(), + BUILDER_DOCKER_IMAGE.to_string(), + "cast".to_string(), + "call".to_string(), + registry_address.to_string(), + "getProviderCount()(uint256)".to_string(), + "--rpc-url".to_string(), + lotus_rpc_url.to_string(), + ]; + + let key = "pdp_verify_provider_count".to_string(); + let output = run_and_log_command_strings("docker", &args, context, &key)?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(format!("Failed to query provider count: {}", stderr).into()); + } + + let result = String::from_utf8_lossy(&output.stdout); + let count: u64 = result.trim().parse().unwrap_or(0); + + Ok(count) + }, + DEFAULT_MAX_RETRIES, + DEFAULT_RETRY_DELAY_SECS, + "Provider count verification", + ) } /// Verify provider ID by address on-chain @@ -339,37 +367,45 @@ pub fn verify_provider_id_by_address( lotus_rpc_url: &str, context: &SetupContext, ) -> Result> { - let _ = run_id; // Not needed when using foc-builder - - let args: Vec = vec![ - "run".to_string(), - "--rm".to_string(), - "-u".to_string(), - "foc-user".to_string(), - "--network".to_string(), - "host".to_string(), - BUILDER_CONTAINER.to_string(), - "cast".to_string(), - "call".to_string(), - registry_address.to_string(), - "getProviderIdByAddress(address)(uint256)".to_string(), - provider_address.to_string(), - "--rpc-url".to_string(), - lotus_rpc_url.to_string(), - ]; - - let key = format!("pdp_verify_provider_id_{}", provider_address); - let output = run_and_log_command_strings("docker", &args, context, &key)?; - - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - return Err(format!("Failed to query provider ID by address: {}", stderr).into()); - } - - let result = String::from_utf8_lossy(&output.stdout); - let provider_id: u64 = result.trim().parse().unwrap_or(0); - - Ok(provider_id) + retry_with_fixed_delay( + || { + let container_name = format!("foc-{}-pdp-verify-id-{}", run_id, provider_address); + + let args: Vec = vec![ + "run".to_string(), + "--name".to_string(), + container_name, + "-u".to_string(), + "foc-user".to_string(), + "--network".to_string(), + "host".to_string(), + BUILDER_DOCKER_IMAGE.to_string(), + "cast".to_string(), + "call".to_string(), + registry_address.to_string(), + "getProviderIdByAddress(address)(uint256)".to_string(), + provider_address.to_string(), + "--rpc-url".to_string(), + lotus_rpc_url.to_string(), + ]; + + let key = format!("pdp_verify_provider_id_{}", provider_address); + let output = run_and_log_command_strings("docker", &args, context, &key)?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(format!("Failed to query provider ID by address: {}", stderr).into()); + } + + let result = String::from_utf8_lossy(&output.stdout); + let provider_id: u64 = result.trim().parse().unwrap_or(0); + + Ok(provider_id) + }, + DEFAULT_MAX_RETRIES, + DEFAULT_RETRY_DELAY_SECS, + &format!("Provider ID verification for {}", provider_address), + ) } /// Verify provider is in approved list using StateView contract @@ -383,37 +419,47 @@ pub fn verify_approved_provider( lotus_rpc_url: &str, context: &SetupContext, ) -> Result> { - let _ = run_id; // Not needed when using foc-builder - - // Use isProviderApproved function on StateView contract - let provider_id_str = provider_id.to_string(); - let args: Vec = vec![ - "run".to_string(), - "--rm".to_string(), - "-u".to_string(), - "foc-user".to_string(), - "--network".to_string(), - "host".to_string(), - BUILDER_CONTAINER.to_string(), - "cast".to_string(), - "call".to_string(), - state_view_address.to_string(), - "isProviderApproved(uint256)(bool)".to_string(), - provider_id_str, - "--rpc-url".to_string(), - lotus_rpc_url.to_string(), - ]; - - let key = format!("pdp_verify_approved_provider_{}", provider_id); - let output = run_and_log_command_strings("docker", &args, context, &key)?; - - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - return Err(format!("Failed to query if provider is approved: {}", stderr).into()); - } - - let result = String::from_utf8_lossy(&output.stdout); - let is_approved = result.trim() == "true"; - - Ok(is_approved) + retry_with_fixed_delay( + || { + // Use isProviderApproved function on StateView contract + let provider_id_str = provider_id.to_string(); + let container_name = format!("foc-{}-pdp-verify-approved-{}", run_id, provider_id); + let args: Vec = vec![ + "run".to_string(), + "--name".to_string(), + container_name, + "-u".to_string(), + "foc-user".to_string(), + "--network".to_string(), + "host".to_string(), + BUILDER_DOCKER_IMAGE.to_string(), + "cast".to_string(), + "call".to_string(), + state_view_address.to_string(), + "isProviderApproved(uint256)(bool)".to_string(), + provider_id_str, + "--rpc-url".to_string(), + lotus_rpc_url.to_string(), + ]; + + let key = format!("pdp_verify_approved_provider_{}", provider_id); + let output = run_and_log_command_strings("docker", &args, context, &key)?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(format!("Failed to query if provider is approved: {}", stderr).into()); + } + + let result = String::from_utf8_lossy(&output.stdout); + let is_approved = result.trim() == "true"; + + Ok(is_approved) + }, + DEFAULT_MAX_RETRIES, + DEFAULT_RETRY_DELAY_SECS, + &format!( + "Approved provider verification for provider ID {}", + provider_id + ), + ) } diff --git a/src/commands/start/step/mod.rs b/src/commands/start/step/mod.rs index 29992e8..cbf97c5 100644 --- a/src/commands/start/step/mod.rs +++ b/src/commands/start/step/mod.rs @@ -55,7 +55,7 @@ pub struct SetupContext { /// Run ID for this execution (e.g., "251203-1246-thirsty-wolf") run_id: String, - /// Run-specific directory (e.g., ~/.foc-localnet/run/251203-1246-thirsty-wolf) + /// Run-specific directory (e.g., ~/.foc-devnet/run/251203-1246-thirsty-wolf) run_dir: PathBuf, /// Port allocator for dynamic port assignment (thread-safe) @@ -153,8 +153,8 @@ impl SetupContext { /// /// # Example /// ``` - /// context.save_command("lotus_container_create", "docker run -d foc-lotus"); - /// // Stored as: "lotus_container_create" = "docker run -d foc-lotus" + /// context.save_command("lotus_container_create", "docker run -d [LOTUS_CONTAINER]"); + /// // Stored as: "lotus_container_create" = "docker run -d [LOTUS_CONTAINER]" /// // Also appended to: "command_history" list /// ``` pub fn save_command(&self, key: &str, command_str: &str) { @@ -308,55 +308,67 @@ pub trait Step: Send + Sync { /// * `port_start` - Starting port for the contiguous port range /// * `port_count` - Number of ports in the range /// * `portainer_port` - Optional port already allocated for Portainer +/// +/// Configuration for step execution +pub struct StepExecutionConfig { + pub run_id: String, + pub run_dir: PathBuf, + pub port_start: u16, + pub port_count: u16, + pub portainer_port: Option, + pub active_pdp_sp_count: usize, + pub approved_pdp_sp_count: usize, +} + pub fn execute_steps( steps: Vec<&dyn Step>, - run_id: String, - run_dir: PathBuf, - port_start: u16, - port_count: u16, - portainer_port: Option, - active_pdp_sp_count: usize, - approved_pdp_sp_count: usize, + config: StepExecutionConfig, ) -> Result<(), Box> { // Create port allocator and verify all ports are available - let mut port_allocator = PortAllocator::new(port_start, port_count)?; + let mut port_allocator = PortAllocator::new(config.port_start, config.port_count)?; info!( "Port range check: {}-{} ({} ports)", - port_start, - port_start + port_count - 1, - port_count + config.port_start, + config.port_start + config.port_count - 1, + config.port_count ); // If Portainer is using a port in our range, we don't want to fail the availability check // because Portainer is already running (started by us). // So we verify all ports EXCEPT the portainer port if it's in range. - for port in port_start..(port_start + port_count) { - if let Some(p_port) = portainer_port { + for port in config.port_start..(config.port_start + config.port_count) { + if let Some(p_port) = config.portainer_port { if port == p_port { continue; } } if !crate::docker::core::is_port_available(port) { - // This is a bit of a hack since we're calling a private function from PortAllocator - // but we'll just do the check here. + // Check port availability directly to ensure no conflicts before starting containers return Err(format!("Port {} is already in use", port).into()); } } info!("All ports in range are available"); // If Portainer port was provided, mark it as allocated in our allocator - if let Some(p_port) = portainer_port { - if p_port >= port_start && p_port < (port_start + port_count) { + if let Some(p_port) = config.portainer_port { + if p_port >= config.port_start && p_port < (config.port_start + config.port_count) { port_allocator.mark_allocated(p_port)?; } } - let context = SetupContext::with_run_id_and_ports(run_id, run_dir, port_allocator); + let context = + SetupContext::with_run_id_and_ports(config.run_id, config.run_dir, port_allocator); // Set initial state - context.set("active_pdp_sp_count", active_pdp_sp_count.to_string()); - context.set("approved_pdp_sp_count", approved_pdp_sp_count.to_string()); + context.set( + "active_pdp_sp_count", + config.active_pdp_sp_count.to_string(), + ); + context.set( + "approved_pdp_sp_count", + config.approved_pdp_sp_count.to_string(), + ); let overall_start = Instant::now(); let mut all_step_timings = Vec::new(); @@ -401,28 +413,22 @@ pub fn execute_steps( /// Returns Ok(()) if all steps in all epochs complete successfully, or an error if any step fails. pub fn execute_steps_parallel( step_epochs: Vec>, - run_id: String, - run_dir: PathBuf, - port_start: u16, - port_count: u16, - portainer_port: Option, - active_pdp_sp_count: usize, - approved_pdp_sp_count: usize, + config: StepExecutionConfig, ) -> Result<(), Box> { // Create port allocator and verify all ports are available - let mut port_allocator = PortAllocator::new(port_start, port_count)?; + let mut port_allocator = PortAllocator::new(config.port_start, config.port_count)?; info!( "Port range check: {}-{} ({} ports)", - port_start, - port_start + port_count - 1, - port_count + config.port_start, + config.port_start + config.port_count - 1, + config.port_count ); // If Portainer is using a port in our range, we don't want to fail the availability check // because Portainer is already running (started by us). - for port in port_start..(port_start + port_count) { - if let Some(p_port) = portainer_port { + for port in config.port_start..(config.port_start + config.port_count) { + if let Some(p_port) = config.portainer_port { if port == p_port { continue; } @@ -434,19 +440,25 @@ pub fn execute_steps_parallel( info!("✓ All ports in range are available"); // Mark portainer port as allocated if provided - if let Some(port) = portainer_port { + if let Some(port) = config.portainer_port { port_allocator.mark_allocated(port)?; } let context = Arc::new(SetupContext::with_run_id_and_ports( - run_id, - run_dir, + config.run_id, + config.run_dir, port_allocator, )); // Set initial state - context.set("active_pdp_sp_count", active_pdp_sp_count.to_string()); - context.set("approved_pdp_sp_count", approved_pdp_sp_count.to_string()); + context.set( + "active_pdp_sp_count", + config.active_pdp_sp_count.to_string(), + ); + context.set( + "approved_pdp_sp_count", + config.approved_pdp_sp_count.to_string(), + ); let overall_start = Instant::now(); let mut all_step_timings: Vec<(String, Duration)> = Vec::new(); diff --git a/src/commands/start/synapse_test_e2e/synapse_test_step.rs b/src/commands/start/synapse_test_e2e/synapse_test_step.rs index 93ef5d8..c8f971d 100644 --- a/src/commands/start/synapse_test_e2e/synapse_test_step.rs +++ b/src/commands/start/synapse_test_e2e/synapse_test_step.rs @@ -1,17 +1,38 @@ +use crate::commands::init::keys::{load_keys, KeyInfo}; use crate::commands::start::step::{SetupContext, Step}; -use crate::constants::BUILDER_IMAGE; +use crate::constants::BUILDER_DOCKER_IMAGE; use crate::docker::core::docker_command; use crate::paths::{ - contract_addresses_file, foc_localnet_docker_volumes_cache, foc_localnet_keys, - foc_localnet_synapse_sdk_repo, + contract_addresses_file, foc_devnet_docker_volumes_cache, foc_devnet_keys, + foc_devnet_synapse_sdk_repo, }; use rand::Rng; use std::error::Error; use std::fs::File; use std::io::Write; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use tracing::{info, warn}; +const POST_DEPLOY_WAIT_SECONDS: u64 = 5; + +/// Type alias for extracted contract addresses and keys +pub type ContractAddresses = (String, String, String, String, String); + +/// Parameters for Docker test execution +struct DockerTestParams<'a> { + run_id: &'a str, + synapse_sdk_path: &'a Path, + builder_volumes_dir: &'a Path, + random_file_path: &'a Path, + script: &'a str, + user_key: &'a str, + lotus_rpc_url: &'a str, + warm_storage_addr: &'a str, + multicall3_addr: &'a str, + usdfc_addr: &'a str, + sp_registry_addr: &'a str, +} + pub struct SynapseTestE2EStep { #[allow(dead_code)] volumes_dir: PathBuf, @@ -40,10 +61,10 @@ impl Step for SynapseTestE2EStep { return Ok(()); } - let synapse_sdk_path = foc_localnet_synapse_sdk_repo(); + let synapse_sdk_path = foc_devnet_synapse_sdk_repo(); if !synapse_sdk_path.exists() { return Err(format!( - "synapse-sdk repository not found at {}. Please run 'foc-localnet init' to clone it.", + "synapse-sdk repository not found at {}. Please run 'foc-devnet init' to clone it.", synapse_sdk_path.display() ) .into()); @@ -64,156 +85,285 @@ impl Step for SynapseTestE2EStep { info!("Running Synapse E2E Test..."); let run_id = context.run_id(); - let synapse_sdk_path = foc_localnet_synapse_sdk_repo(); - let builder_volumes_dir = foc_localnet_docker_volumes_cache().join("foc-builder"); - - // Load contract addresses from file - let addresses_path = contract_addresses_file(run_id); - let addresses_file = File::open(&addresses_path)?; - let addresses: serde_json::Value = serde_json::from_reader(addresses_file)?; - - // Load keys from file - let keys_path = foc_localnet_keys().join("addresses.json"); - let keys_file = File::open(&keys_path)?; - let keys: Vec = serde_json::from_reader(keys_file)?; - - // Get required addresses and keys - let user_key_value = keys - .iter() - .find(|k| k["name"] == "USER_1") - .ok_or("Key USER_1 not found in addresses.json")?; - let user_key = format!( - "0x{}", - user_key_value["private_key"] - .as_str() - .ok_or("Private key is not a string")? - ); + let synapse_sdk_path = foc_devnet_synapse_sdk_repo(); + let builder_volumes_dir = + foc_devnet_docker_volumes_cache().join(crate::constants::BUILDER_CONTAINER); - let warm_storage_addr = addresses["foc_contracts"]["filecoin_warm_storage_service_proxy"] - .as_str() - .ok_or("Warm storage address not found in contract_addresses.json")? - .to_string(); - let usdfc_addr = addresses["contracts"]["usdfc"] - .as_str() - .ok_or("USDFC address not found in contract_addresses.json")? - .to_string(); - let multicall3_addr = addresses["contracts"]["multicall"] - .as_str() - .ok_or("Multicall3 address not found in contract_addresses.json")? - .to_string(); - let sp_registry_addr = addresses["foc_contracts"]["service_provider_registry_proxy"] - .as_str() - .ok_or("SP Registry address not found in contract_addresses.json")? - .to_string(); + // Load contract addresses and keys + let addresses = load_contract_addresses(run_id)?; + let keys = load_wallet_keys()?; + + // Extract required addresses and keys + let (user_key, warm_storage_addr, usdfc_addr, multicall3_addr, sp_registry_addr) = + extract_required_addresses(&addresses, &keys)?; let lotus_rpc_url = crate::commands::start::lotus_utils::get_lotus_rpc_url(context)?; - // Create random file for testing - let random_file_path = self.run_dir.join("random_test_file.txt"); - { - let mut file = File::create(&random_file_path)?; - let mut rng = rand::thread_rng(); - let data: Vec = (0..912).map(|_| rng.gen()).collect(); - file.write_all(&data)?; - } - info!("Created random test file at {}", random_file_path.display()); - - // Prepare environment variables - let env_vars = vec![ - ("CLIENT_PRIVATE_KEY", user_key.clone()), - ("NETWORK", "localnet".to_string()), - ("LOCALNET_WARM_STORAGE_ADDRESS", warm_storage_addr.clone()), - ("LOCALNET_USDFC_ADDRESS", usdfc_addr.clone()), - ("LOCALNET_MULTICALL3_ADDRESS", multicall3_addr.clone()), - ("LOCALNET_SP_REGISTRY_ADDRESS", sp_registry_addr.clone()), - ("LOCALNET_RPC_URL", lotus_rpc_url.clone()), - ("PRIVATE_KEY", user_key.clone()), - ("CI", "true".to_string()), - ]; - - // Prepare the script to run inside the container - let script = r#" -set -e -cd /synapse-sdk -echo "Installing dependencies..." -pnpm install - -echo "Building SDK..." -pnpm build - -echo "Running post-deploy setup..." -node utils/post-deploy-setup.js - -echo "Waiting for 5 seconds..." -sleep 5 - -echo "Running storage E2E test..." -node utils/example-storage-e2e.js --network localnet /tmp/random_test_file.txt -"#; - - let mut docker_args = vec![ - "run".to_string(), - "--rm".to_string(), - "--name".to_string(), - format!("foc-{}-synapse-test", run_id), - "--network".to_string(), - "host".to_string(), - "-u".to_string(), - "root".to_string(), // Run as root to ensure permissions - ]; - - // Add environment variables - for (key, value) in env_vars { - docker_args.push("-e".to_string()); - docker_args.push(format!("{}={}", key, value)); - } + // Create random test file + let random_file_path = create_random_test_file(&self.run_dir)?; - // Mount synapse-sdk - // Resolve symlink to ensure Docker mounts the actual directory - let synapse_sdk_real_path = synapse_sdk_path - .canonicalize() - .unwrap_or(synapse_sdk_path.clone()); - docker_args.push("-v".to_string()); - docker_args.push(format!("{}:/synapse-sdk", synapse_sdk_real_path.display())); - - // Mount random file - docker_args.push("-v".to_string()); - docker_args.push(format!( - "{}:/tmp/random_test_file.txt", - random_file_path.display() - )); - - // Mount cargo cache - docker_args.push("-v".to_string()); - docker_args.push(format!( - "{}:/root/.cargo", - builder_volumes_dir.join("cargo").display() - )); - - docker_args.push(BUILDER_IMAGE.to_string()); - docker_args.push("/bin/bash".to_string()); - docker_args.push("-c".to_string()); - docker_args.push(script.to_string()); - - let args_ref: Vec<&str> = docker_args.iter().map(|s| s.as_str()).collect(); - - info!("Executing test script in container..."); - let output = docker_command(&args_ref)?; - - if !output.status.success() { - let stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); - warn!("Synapse E2E Test failed!"); - warn!("Stdout:\n{}", stdout); - warn!("Stderr:\n{}", stderr); - return Err("Synapse E2E Test failed".into()); - } + // Generate the test script + let script = generate_test_script( + &lotus_rpc_url, + &warm_storage_addr, + &multicall3_addr, + &usdfc_addr, + &sp_registry_addr, + ); - info!("✓ Synapse E2E Test completed successfully"); - Ok(()) + // Build and execute docker command + execute_docker_test(&DockerTestParams { + run_id, + synapse_sdk_path: &synapse_sdk_path, + builder_volumes_dir: &builder_volumes_dir, + random_file_path: &random_file_path, + script: &script, + user_key: &user_key, + lotus_rpc_url: &lotus_rpc_url, + warm_storage_addr: &warm_storage_addr, + multicall3_addr: &multicall3_addr, + usdfc_addr: &usdfc_addr, + sp_registry_addr: &sp_registry_addr, + }) } fn post_execute(&self, _context: &SetupContext) -> Result<(), Box> { Ok(()) } } + +/// Build and execute docker test container. +fn execute_docker_test(params: &DockerTestParams) -> Result<(), Box> { + let docker_args = build_docker_command(params)?; + + let args_ref: Vec<&str> = docker_args.iter().map(|s| s.as_str()).collect(); + + info!("Executing test script in container..."); + let output = docker_command(&args_ref)?; + + if !output.status.success() { + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + warn!("Synapse E2E Test failed!"); + warn!("Stdout:\n{}", stdout); + warn!("Stderr:\n{}", stderr); + return Err("Synapse E2E Test failed".into()); + } + + info!("✓ Synapse E2E Test completed successfully"); + Ok(()) +} + +/// Build docker command arguments for test execution. +fn build_docker_command(params: &DockerTestParams) -> Result, Box> { + let mut docker_args = vec![ + "run".to_string(), + "--name".to_string(), + format!("foc-{}-synapse-test", params.run_id), + "--network".to_string(), + "host".to_string(), + "-u".to_string(), + "root".to_string(), + ]; + + // Add environment variables required by synapse-sdk scripts + // Note: example-storage-e2e.js uses env vars, not CLI flags + let env_vars = vec![ + ("CLIENT_PRIVATE_KEY", params.user_key.to_string()), + ("PRIVATE_KEY", params.user_key.to_string()), + ("RPC_URL", params.lotus_rpc_url.to_string()), + ("WARM_STORAGE_ADDRESS", params.warm_storage_addr.to_string()), + ("MULTICALL3_ADDRESS", params.multicall3_addr.to_string()), + ("USDFC_ADDRESS", params.usdfc_addr.to_string()), + ("SP_REGISTRY_ADDRESS", params.sp_registry_addr.to_string()), + ("CI", "true".to_string()), + ]; + + for (key, value) in env_vars { + docker_args.push("-e".to_string()); + docker_args.push(format!("{}={}", key, value)); + } + + // Mount synapse-sdk + let synapse_sdk_real_path = params + .synapse_sdk_path + .canonicalize() + .unwrap_or_else(|_| params.synapse_sdk_path.to_path_buf()); + docker_args.push("-v".to_string()); + docker_args.push(format!("{}:/synapse-sdk", synapse_sdk_real_path.display())); + + // Mount random test file + docker_args.push("-v".to_string()); + docker_args.push(format!( + "{}:/tmp/random_test_file.txt", + params.random_file_path.display() + )); + + // Mount cargo cache + docker_args.push("-v".to_string()); + docker_args.push(format!( + "{}:/root/.cargo", + params.builder_volumes_dir.join("cargo").display() + )); + + // Add image and command + docker_args.push(BUILDER_DOCKER_IMAGE.to_string()); + docker_args.push("/bin/bash".to_string()); + docker_args.push("-c".to_string()); + docker_args.push(params.script.to_string()); + + Ok(docker_args) +} + +/// Load contract addresses from file. +fn load_contract_addresses(run_id: &str) -> Result> { + let addresses_path = contract_addresses_file(run_id); + let addresses_file = File::open(&addresses_path)?; + let addresses: serde_json::Value = serde_json::from_reader(addresses_file)?; + Ok(addresses) +} + +/// Load wallet keys from the generated addresses file. +fn load_wallet_keys() -> Result, Box> { + let keys_file = foc_devnet_keys().join("addresses.json"); + if !keys_file.exists() { + return Err(format!("Keys file not found at {}", keys_file.display()).into()); + } + + load_keys() +} + +/// Extract required addresses and keys from loaded data. +fn extract_required_addresses( + addresses: &serde_json::Value, + keys: &[KeyInfo], +) -> Result> { + let user_key = keys + .iter() + .find(|k| k.name == "USER_1") + .ok_or("USER_1 key not found in addresses.json")? + .private_key + .clone(); + let user_key_prefixed = format!("0x{}", user_key); + + // Extract contract addresses + let warm_storage_addr = addresses["foc_contracts"]["filecoin_warm_storage_service_proxy"] + .as_str() + .ok_or("Warm storage address not found in contract_addresses.json")? + .to_string(); + let usdfc_addr = addresses["contracts"]["usdfc"] + .as_str() + .ok_or("USDFC address not found in contract_addresses.json")? + .to_string(); + let multicall3_addr = addresses["contracts"]["multicall"] + .as_str() + .ok_or("Multicall3 address not found in contract_addresses.json")? + .to_string(); + let sp_registry_addr = addresses["foc_contracts"]["service_provider_registry_proxy"] + .as_str() + .ok_or("SP Registry address not found in contract_addresses.json")? + .to_string(); + + Ok(( + user_key_prefixed, + warm_storage_addr, + usdfc_addr, + multicall3_addr, + sp_registry_addr, + )) +} + +/// Create a random test file for the E2E test. +fn create_random_test_file(run_dir: &Path) -> Result> { + let random_file_path = run_dir.join("random_test_file.txt"); + let mut file = File::create(&random_file_path)?; + let mut rng = rand::thread_rng(); + let data: Vec = (0..912).map(|_| rng.gen()).collect(); + file.write_all(&data)?; + info!("Created random test file at {}", random_file_path.display()); + Ok(random_file_path) +} + +/// Generate the shell script for synapse-sdk E2E testing. +fn generate_test_script( + lotus_rpc_url: &str, + warm_storage_addr: &str, + multicall3_addr: &str, + usdfc_addr: &str, + sp_registry_addr: &str, +) -> String { + let mut lines = Vec::new(); + lines.extend(bootstrap_commands()); + lines.push(build_post_deploy_command( + lotus_rpc_url, + warm_storage_addr, + multicall3_addr, + usdfc_addr, + sp_registry_addr, + )); + lines.extend(wait_commands()); + lines.push(build_storage_e2e_command()); + + lines.join("\n") +} + +/// Steps to install and build the SDK inside the container. +fn bootstrap_commands() -> Vec { + vec![ + "set -e".to_string(), + "cd /synapse-sdk".to_string(), + "echo \"Installing dependencies...\"".to_string(), + "pnpm install".to_string(), + "".to_string(), + "echo \"Building SDK...\"".to_string(), + "pnpm build".to_string(), + "".to_string(), + ] +} + +/// CLI invocation for post-deploy setup. +fn build_post_deploy_command( + lotus_rpc_url: &str, + warm_storage_addr: &str, + multicall3_addr: &str, + usdfc_addr: &str, + sp_registry_addr: &str, +) -> String { + [ + "echo \"Running post-deploy setup...\"".to_string(), + format!( + concat!( + "node utils/post-deploy-setup.js \\\n", + " --mode client \\\n", + " --network devnet \\\n", + " --rpc-url {} \\\n", + " --warm-storage {} \\\n", + " --multicall3 {} \\\n", + " --usdfc {} \\\n", + " --sp-registry {}", + ), + lotus_rpc_url, warm_storage_addr, multicall3_addr, usdfc_addr, sp_registry_addr, + ), + ] + .join("\n") +} + +/// Simple wait between setup and test to allow on-chain activation. +fn wait_commands() -> Vec { + vec![ + format!( + "echo \"Waiting for {} seconds...\"", + POST_DEPLOY_WAIT_SECONDS + ), + format!("sleep {}", POST_DEPLOY_WAIT_SECONDS), + "".to_string(), + ] +} + +/// CLI invocation for the storage E2E test. +/// The script uses environment variables for configuration (set via Docker -e flags). +fn build_storage_e2e_command() -> String { + "echo \"Running storage E2E test...\"\n\ +node utils/example-storage-e2e.js /tmp/random_test_file.txt" + .to_string() +} diff --git a/src/commands/start/usdfc_deploy/deployment.rs b/src/commands/start/usdfc_deploy/deployment.rs index 5e4b36d..a8e54e2 100644 --- a/src/commands/start/usdfc_deploy/deployment.rs +++ b/src/commands/start/usdfc_deploy/deployment.rs @@ -9,6 +9,7 @@ use crate::commands::start::lotus_utils::get_lotus_rpc_url; use crate::commands::start::step::SetupContext; use crate::docker::command_logger::run_and_log_command; use std::error::Error; +use std::path::PathBuf; use tracing::{error, info}; /// Deploy MockUSDFC using the Foundry project @@ -17,7 +18,7 @@ pub fn deploy_mock_usdfc_foundry( private_key: &str, lotus_rpc_url: &str, run_id: &str, -) -> Result> { +) -> Result<(String, PathBuf), Box> { info!("Deploying MockUSDFC using Foundry project..."); // Get the contract directory from embedded assets @@ -46,7 +47,6 @@ pub fn deploy_mock_usdfc_foundry( "docker", &[ "run", - "--rm", "-u", "foc-user", "--name", @@ -55,7 +55,7 @@ pub fn deploy_mock_usdfc_foundry( "host", // Use host network to access Lotus RPC on dynamic port "-v", &format!("{}:/workspace", contract_dir.display()), - "foc-builder", + crate::constants::BUILDER_DOCKER_IMAGE, "bash", "-c", &deploy_cmd, @@ -96,7 +96,7 @@ pub fn deploy_mock_usdfc_foundry( info!("✓ MockUSDFC deployed at: {}", contract_address); - Ok(contract_address.to_string()) + Ok((contract_address.to_string(), contract_dir)) } /// Perform the MockUSDFC deployment process @@ -119,7 +119,7 @@ pub fn perform_token_deployment( let run_id = context.run_id(); // Deploy MockUSDFC - let mock_usdfc_address = + let (mock_usdfc_address, contract_dir) = deploy_mock_usdfc_foundry(context, &private_key, &lotus_rpc_url, run_id)?; // Store in context @@ -135,6 +135,7 @@ pub fn perform_token_deployment( &mock_usdfc_address, &lotus_rpc_url, run_id, + &contract_dir, )?; info!("✓ MockUSDFC token deployed successfully!"); diff --git a/src/commands/start/usdfc_deploy/foundry_setup.rs b/src/commands/start/usdfc_deploy/foundry_setup.rs index 031429e..d0fb823 100644 --- a/src/commands/start/usdfc_deploy/foundry_setup.rs +++ b/src/commands/start/usdfc_deploy/foundry_setup.rs @@ -6,10 +6,10 @@ use crate::commands::start::step::SetupContext; use crate::docker::command_logger::run_and_log_command; use crate::embedded_assets; -use crate::paths::foc_localnet_run_dir; +use crate::paths::foc_devnet_run_dir; use std::error::Error; use std::fs; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use tracing::info; /// Get or create the MockUSDFC project directory from embedded assets @@ -17,7 +17,7 @@ use tracing::info; /// Extracts the embedded MockUSDFC Foundry project to a temporary directory /// and returns the path to that directory. pub fn get_mockusdfc_project_dir(run_id: &str) -> Result> { - let run_dir = foc_localnet_run_dir(run_id); + let run_dir = foc_devnet_run_dir(run_id); let extract_target = run_dir.join("mockusdfc-extract"); // Always clean and re-extract to ensure we have the latest embedded version @@ -45,7 +45,7 @@ pub fn get_mockusdfc_project_dir(run_id: &str) -> Result /// Setup the Foundry project (install dependencies if needed) pub fn setup_foundry_project( context: &SetupContext, - contract_dir: &PathBuf, + contract_dir: &Path, run_id: &str, ) -> Result<(), Box> { let openzeppelin_path = contract_dir.join("lib/openzeppelin-contracts"); @@ -58,19 +58,21 @@ pub fn setup_foundry_project( if !git_dir.exists() { info!("Initializing git repository..."); let key = format!("usdfc_setup_git_init_{}", run_id); + let container_name = format!("foc-{}-usdfc-git-init", run_id); let output = run_and_log_command( "docker", &[ "run", - "--rm", + "--name", + &container_name, "-u", "foc-user", "-v", &format!("{}:/workspace", contract_dir.display()), - "foc-builder", + crate::constants::BUILDER_DOCKER_IMAGE, "bash", "-c", - "cd /workspace && git init && git config user.email 'foc@localnet' && git config user.name 'FOC Localnet'", + "cd /workspace && git init && git config user.email 'foc@devnet' && git config user.name 'FOC DevNet'", ], context, &key, @@ -87,16 +89,18 @@ pub fn setup_foundry_project( // Install dependencies let key = format!("usdfc_setup_install_deps_{}", run_id); + let container_name = format!("foc-{}-usdfc-install-deps", run_id); let output = run_and_log_command( "docker", &[ "run", - "--rm", + "--name", + &container_name, "-u", "foc-user", "-v", &format!("{}:/workspace", contract_dir.display()), - "foc-builder", + crate::constants::BUILDER_DOCKER_IMAGE, "bash", "-c", "cd /workspace && \ @@ -121,14 +125,18 @@ pub fn setup_foundry_project( // Build contracts info!("Building MockUSDFC contract..."); let key = format!("usdfc_setup_build_{}", run_id); + let container_name = format!("foc-{}-usdfc-build", run_id); let output = run_and_log_command( "docker", &[ "run", - "--rm", + "--name", + &container_name, + "-u", + "foc-user", "-v", &format!("{}:/workspace", contract_dir.display()), - "foc-builder", + crate::constants::BUILDER_DOCKER_IMAGE, "bash", "-c", "cd /workspace && forge build", diff --git a/src/commands/start/usdfc_deploy/verification.rs b/src/commands/start/usdfc_deploy/verification.rs index 62733d6..b2c6c57 100644 --- a/src/commands/start/usdfc_deploy/verification.rs +++ b/src/commands/start/usdfc_deploy/verification.rs @@ -2,12 +2,16 @@ //! //! This module handles the verification of deployed MockUSDFC contracts. -use super::foundry_setup::get_mockusdfc_project_dir; use crate::commands::start::step::SetupContext; use crate::docker::command_logger::run_and_log_command; +use crate::utils::retry::{retry_with_fixed_delay, DEFAULT_MAX_RETRIES, DEFAULT_RETRY_DELAY_SECS}; use std::error::Error; +use std::path::Path; use tracing::{info, warn}; +/// Time to wait for transaction confirmation before verification (in seconds) +const TRANSACTION_CONFIRMATION_WAIT_SECS: u64 = 6; + /// Verify the deployed MockUSDFC contract pub fn verify_mock_usdfc( context: &SetupContext, @@ -15,62 +19,82 @@ pub fn verify_mock_usdfc( contract_address: &str, lotus_rpc_url: &str, run_id: &str, + contract_dir: &Path, ) -> Result<(), Box> { info!("Verifying MockUSDFC contract functions..."); - // Get the contract directory from embedded assets - let contract_dir = get_mockusdfc_project_dir(run_id)?; - // Wait a bit for transaction confirmation info!("Waiting for transaction confirmation..."); - std::thread::sleep(std::time::Duration::from_secs(6)); + std::thread::sleep(std::time::Duration::from_secs( + TRANSACTION_CONFIRMATION_WAIT_SECS, + )); - let verify_cmd = format!( - "cd /workspace && \ - forge script script/Verify.s.sol:VerifyMockUSDFC \ - --rpc-url {} \ - --private-key {} \ - --sig 'run(address)' {} \ - -vv", - lotus_rpc_url, private_key, contract_address - ); + // Retry verification with fixed delay + let verification_result = retry_with_fixed_delay( + || { + let verify_cmd = format!( + "cd /workspace && \ + forge script script/Verify.s.sol:VerifyMockUSDFC \ + --rpc-url {} \ + --private-key {} \ + --sig 'run(address)' {} \ + -vv", + lotus_rpc_url, private_key, contract_address + ); - let key = format!("usdfc_verify_{}", run_id); - let output = run_and_log_command( - "docker", - &[ - "run", - "--rm", - "-u", - "foc-user", - "--network", - "host", - "-v", - &format!("{}:/workspace", contract_dir.display()), - "foc-builder", - "bash", - "-c", - &verify_cmd, - ], - context, - &key, - )?; + let key = format!("usdfc_verify_{}", run_id); + let container_name = format!("foc-{}-usdfc-verify", run_id); + let output = run_and_log_command( + "docker", + &[ + "run", + "--name", + &container_name, + "-u", + "foc-user", + "--network", + "host", + "-v", + &format!("{}:/workspace", contract_dir.display()), + crate::constants::BUILDER_DOCKER_IMAGE, + "bash", + "-c", + &verify_cmd, + ], + context, + &key, + )?; - let _stdout = String::from_utf8_lossy(&output.stdout); - let stderr = String::from_utf8_lossy(&output.stderr); + let _stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); - if !output.status.success() { - warn!("Verification failed"); - if !stderr.is_empty() { - warn!("Error output:"); - for line in stderr.lines() { - warn!("{}", line); + if !output.status.success() { + return Err(format!( + "Verification failed: {}", + if !stderr.is_empty() { + stderr.to_string() + } else { + "Unknown error".to_string() + } + ) + .into()); } + + Ok(()) + }, + DEFAULT_MAX_RETRIES, + DEFAULT_RETRY_DELAY_SECS, + "MockUSDFC contract verification", + ); + + match verification_result { + Ok(_) => { + info!("✓ All contract functions verified"); + } + Err(e) => { + warn!("Contract verification failed after retries: {}", e); + warn!("Continuing despite verification warning"); } - // Don't fail the step, just warn - warn!("Continuing despite verification warning"); - } else { - info!("All contract functions verified"); } Ok(()) diff --git a/src/commands/start/usdfc_funding/funding_operations.rs b/src/commands/start/usdfc_funding/funding_operations.rs index 643dc13..0786c9d 100644 --- a/src/commands/start/usdfc_funding/funding_operations.rs +++ b/src/commands/start/usdfc_funding/funding_operations.rs @@ -4,24 +4,29 @@ use crate::commands::start::step::SetupContext; use crate::docker::command_logger::run_and_log_command; +use crate::utils::retry::{retry_with_fixed_delay, DEFAULT_MAX_RETRIES, DEFAULT_RETRY_DELAY_SECS}; use ethers_core::types::U256; use hex; use std::error::Error; use tracing::info; +/// Parameters for MockUSDFC transfer operations +pub struct USDFCTransferParams<'a> { + pub from_private_key: &'a str, + pub to_eth_address: &'a str, + pub amount: &'a str, + pub token_address: &'a str, + pub description: &'a str, + pub nonce: Option, + pub lotus_rpc_url: &'a str, +} + /// Transfer MockUSDFC tokens from one address to another using cast pub fn transfer_mock_usdfc( + params: &USDFCTransferParams, context: &SetupContext, - from_private_key: &str, - _from_eth_address: &str, - to_eth_address: &str, - amount: &str, - token_address: &str, - description: &str, - nonce: Option, - lotus_rpc_url: &str, ) -> Result<(), Box> { - info!("Transferring MockUSDFC tokens: {}...", description); + info!("Transferring MockUSDFC tokens: {}...", params.description); let mut cast_cmd = format!( "cd /workspace && cast send {} \ @@ -29,28 +34,38 @@ pub fn transfer_mock_usdfc( --rpc-url {} \ 'transfer(address,uint256)' {} {} \ --gas-limit 100000000", - token_address, from_private_key, lotus_rpc_url, to_eth_address, amount + params.token_address, + params.from_private_key, + params.lotus_rpc_url, + params.to_eth_address, + params.amount ); // Add nonce if provided - if let Some(nonce_val) = nonce { + if let Some(nonce_val) = params.nonce { cast_cmd.push_str(&format!(" --nonce {}", nonce_val)); } // Debug output // println!("Executing command: {}", cast_cmd); - let key = format!("usdfc_transfer_{}", description.replace(" ", "_")); + let key = format!("usdfc_transfer_{}", params.description.replace(" ", "_")); + let container_name = format!( + "foc-{}-usdfc-transfer-{}", + context.run_id(), + params.description.replace(" ", "-").replace("→", "to") + ); let output = run_and_log_command( "docker", &[ "run", - "--rm", + "--name", + &container_name, "--network", "host", // Use host network to access localhost:1234 "-v", "/tmp:/workspace", - "foc-builder", + crate::constants::BUILDER_DOCKER_IMAGE, "bash", "-c", &cast_cmd, @@ -75,63 +90,75 @@ pub fn check_mock_usdfc_balance( token_address: &str, lotus_rpc_url: &str, ) -> Result> { - // info!("Checking MockUSDFC balance for {}...", eth_address); - - let key = format!("usdfc_balance_check_{}", eth_address); - let output = run_and_log_command( - "docker", - &[ - "run", - "--rm", - "--network", - "host", - "-v", - &format!( - "{}:/workspace", - crate::paths::project_root()? - .join("contracts/MockUSDFC") - .display() - ), - "foc-builder", - "bash", - "-c", - &format!( - "cd /workspace && cast call {} \ - --rpc-url {} \ - 'balanceOf(address)' {}", - token_address, lotus_rpc_url, eth_address - ), - ], - context, - &key, - )?; - - if !output.status.success() { - return Err(format!( - "Failed to check balance for {}: {}", - eth_address, - String::from_utf8_lossy(&output.stderr) - ) - .into()); - } - - let balance_hex = String::from_utf8_lossy(&output.stdout).trim().to_string(); - - if balance_hex.is_empty() || balance_hex == "0x" { - return Ok(U256::zero()); - } - - // Remove "0x" prefix if it exists - let hex_str = balance_hex.strip_prefix("0x").unwrap_or(&balance_hex); - - // Decode hex to bytes - let bytes = match hex::decode(hex_str) { - Ok(bytes) => bytes, - Err(e) => return Err(format!("Failed to decode hex string: {}: {}", hex_str, e).into()), - }; - - // Convert bytes to U256 - let balance_u256 = U256::from_big_endian(&bytes); - - Ok(balance_u256) + // Retry balance check with fixed delay + retry_with_fixed_delay( + || { + let key = format!("usdfc_balance_check_{}", eth_address); + let container_name = format!( + "foc-{}-usdfc-balance-check-{}", + context.run_id(), + ð_address[..8] + ); + let output = run_and_log_command( + "docker", + &[ + "run", + "--rm", + "--name", + &container_name, + "--network", + "host", + "-v", + &format!( + "{}:/workspace", + crate::paths::project_root()? + .join("contracts/MockUSDFC") + .display() + ), + crate::constants::BUILDER_DOCKER_IMAGE, + "bash", + "-c", + &format!( + "cd /workspace && cast call {} \ + --rpc-url {} \ + 'balanceOf(address)' {}", + token_address, lotus_rpc_url, eth_address + ), + ], + context, + &key, + )?; + + if !output.status.success() { + return Err(format!( + "Failed to check balance for {}: {}", + eth_address, + String::from_utf8_lossy(&output.stderr) + ) + .into()); + } + + let balance_hex = String::from_utf8_lossy(&output.stdout).trim().to_string(); + + if balance_hex.is_empty() || balance_hex == "0x" { + return Ok(U256::zero()); + } + + // Remove "0x" prefix if it exists + let hex_str = balance_hex.strip_prefix("0x").unwrap_or(&balance_hex); + + // Decode hex to bytes + let bytes = hex::decode(hex_str).map_err(|e| -> Box { + format!("Failed to decode hex string: {}: {}", hex_str, e).into() + })?; + + // Convert bytes to U256 + let balance_u256 = U256::from_big_endian(&bytes); + + Ok(balance_u256) + }, + DEFAULT_MAX_RETRIES, + DEFAULT_RETRY_DELAY_SECS, + &format!("MockUSDFC balance check for {}", eth_address), + ) } diff --git a/src/commands/start/usdfc_funding/key_operations.rs b/src/commands/start/usdfc_funding/key_operations.rs index 3b1433e..be121e8 100644 --- a/src/commands/start/usdfc_funding/key_operations.rs +++ b/src/commands/start/usdfc_funding/key_operations.rs @@ -8,7 +8,7 @@ use std::fs; /// Load addresses from the state/addresses.json file fn load_state_addresses() -> Result, Box> { - let state_file = crate::paths::foc_localnet_keys().join("addresses.json"); + let state_file = crate::paths::foc_devnet_keys().join("addresses.json"); if !state_file.exists() { return Err(format!("State addresses file not found: {}", state_file.display()).into()); } diff --git a/src/commands/start/usdfc_funding/usdfc_funding_step.rs b/src/commands/start/usdfc_funding/usdfc_funding_step.rs index 6d510cb..d287b9a 100644 --- a/src/commands/start/usdfc_funding/usdfc_funding_step.rs +++ b/src/commands/start/usdfc_funding/usdfc_funding_step.rs @@ -4,7 +4,7 @@ //! to user and service provider addresses. use super::constants::{token_amount_to_wei, TRANSACTION_CONFIRMATION_WAIT_SECS}; -use super::funding_operations::{check_mock_usdfc_balance, transfer_mock_usdfc}; +use super::funding_operations::{self, check_mock_usdfc_balance, transfer_mock_usdfc}; use super::key_operations::get_user_private_key; use crate::commands::start::step::{SetupContext, Step}; use crate::commands::start::usdfc_funding::key_operations::get_user_eth_address; @@ -121,7 +121,7 @@ impl USDFCFundingStep { let amount_wei_str = amount_wei.to_string(); let amount = *amount_tokens; let pk = deployer_private_key.to_string(); - let from = deployer_mockusdfc_eth.to_string(); + let _from = deployer_mockusdfc_eth.to_string(); let contract = mockusdfc_address.to_string(); let rpc = lotus_rpc_url.to_string(); @@ -135,17 +135,19 @@ impl USDFCFundingStep { info!("Transferring {} USDFC: {}...", amount, description); match transfer_mock_usdfc( + &funding_operations::USDFCTransferParams { + from_private_key: &pk, + to_eth_address: ð_address, + amount: &amount_wei_str, + token_address: &contract, + description: &description, + nonce: Some( + (batch_idx * MAX_CONCURRENT_TRANSFERS + batch_transfer_idx + 1) + as u64, + ), + lotus_rpc_url: &rpc, + }, &context_clone, - &pk, - &from, - ð_address, - &amount_wei_str, - &contract, - &description, - Some( - (batch_idx * MAX_CONCURRENT_TRANSFERS + batch_transfer_idx + 1) as u64, - ), - &rpc, ) { Ok(_) => { info!("Transferred {} USDFC: {}", amount, description); diff --git a/src/commands/start/yugabyte/mod.rs b/src/commands/start/yugabyte/mod.rs index 705a932..dd46151 100644 --- a/src/commands/start/yugabyte/mod.rs +++ b/src/commands/start/yugabyte/mod.rs @@ -1,4 +1,5 @@ use super::step::{SetupContext, Step}; +use crate::constants::YUGABYTE_DOCKER_IMAGE; use crate::docker::command_logger::run_and_log_command; use std::error::Error; use std::path::PathBuf; @@ -12,9 +13,7 @@ use crate::docker::network::pdp_miner_network_name; use crate::docker::{ container_exists, container_is_running, stop_and_remove_container, wait_for_port, }; -use crate::paths::foc_localnet_yugabyte_sp_volume; - -const IMAGE_NAME: &str = "foc-yugabyte"; +use crate::paths::foc_devnet_yugabyte_sp_volume; /// Spawn a single Yugabyte instance (used for parallel spawning). /// @@ -35,7 +34,7 @@ fn spawn_yugabyte_instance( // Create data directory for this instance // This will be mounted to /home/foc-user/yb_base in the container // yugabyted will create subdirectories (data/, conf/, logs/) under this base directory - let data_dir = foc_localnet_yugabyte_sp_volume(run_id, sp_idx); + let data_dir = foc_devnet_yugabyte_sp_volume(run_id, sp_idx); std::fs::create_dir_all(&data_dir)?; // Stop and remove existing container if it exists @@ -93,7 +92,7 @@ fn spawn_yugabyte_instance( ]); // Add image name - docker_args.push(IMAGE_NAME); + docker_args.push(YUGABYTE_DOCKER_IMAGE); // Add YugabyteDB startup command with full configuration // CRITICAL: --base_dir must match the volume mount location @@ -236,14 +235,14 @@ impl Step for YugabyteStep { fn pre_execute(&self, context: &SetupContext) -> Result<(), Box> { // Verify Docker image exists - if !crate::docker::core::image_exists(IMAGE_NAME).unwrap_or(true) { + if !crate::docker::core::image_exists(YUGABYTE_DOCKER_IMAGE).unwrap_or(true) { return Err(format!( - "Docker image '{}' not found. Please run 'foc-localnet init' to build the image.", - IMAGE_NAME + "Docker image '{}' not found. Please run 'foc-devnet init' to build the image.", + YUGABYTE_DOCKER_IMAGE ) .into()); } - info!("✓ Docker image '{}' found", IMAGE_NAME); + info!("✓ Docker image '{}' found", YUGABYTE_DOCKER_IMAGE); // Check if ports are available let sp_count = self.active_sp_count; diff --git a/src/commands/status/build_status.rs b/src/commands/status/build_status.rs index 344cf9b..7f9c3f7 100644 --- a/src/commands/status/build_status.rs +++ b/src/commands/status/build_status.rs @@ -1,27 +1,28 @@ //! # Build Status //! -//! This module handles the display of build status information for foc-localnet binaries. +//! This module handles the display of build status information for foc-devnet binaries. //! //! It provides functionality to: //! - Check if expected binaries exist //! - Display build timestamps //! - Show relative time since build -use crate::paths::foc_localnet_bin; +use crate::paths::foc_devnet_bin; use chrono::{DateTime, Utc}; +use std::process::Command; use tracing::info; use super::utils::format_time_ago; /// Print build status of artifacts in tabular format. /// -/// This function displays the build status of all expected foc-localnet binaries, +/// This function displays the build status of all expected foc-devnet binaries, /// including whether they exist, their file sizes, and when they were last built. /// /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status::build_status::print_build_status; +/// use foc_devnet::commands::status::build_status::print_build_status; /// /// print_build_status().expect("Failed to print build status"); /// ``` @@ -30,44 +31,63 @@ use super::utils::format_time_ago; /// /// Returns an error if file system operations fail. pub fn print_build_status() -> Result<(), Box> { - info!("Build Status"); - - let bin_dir = foc_localnet_bin(); + let bin_dir = foc_devnet_bin(); // Check for expected binaries - let expected_binaries = vec!["lotus", "lotus-miner", "lotus-shed", "lotus-seed", "curio"]; - - // Print header - info!( - "{:<15} {:<10} {:<40} {:<20}", - "Binary", "Status", "Path", "Time of Build" - ); - info!("{:-<15} {:-<10} {:-<40} {:-<20}", "", "", "", ""); + let expected_binaries = vec![ + "lotus", + "lotus-miner", + "lotus-shed", + "lotus-seed", + "curio", + "pdptool", + "sptool", + ]; for binary in expected_binaries { let binary_path = bin_dir.join(binary); - let (status, path_display, time_display) = if binary_path.exists() { + if binary_path.exists() { let metadata = std::fs::metadata(&binary_path)?; let modified: DateTime = metadata.modified()?.into(); let time_ago = format_time_ago(Utc::now() - modified); - ( - "Ready", - binary_path.display().to_string(), - format!("{} ({})", modified.format("%Y-%m-%d %H:%M"), time_ago), - ) - } else { - ("Missing", "N/A".to_string(), "N/A".to_string()) - }; - info!( - "{:<15} {:<10} {:<40} {:<20}", - binary, status, path_display, time_display - ); + // Get version information + let version = get_binary_version(&binary_path); + + info!( + "Binary: {}: Ready | Built {} ({}) | Version: {}", + binary, + modified.format("%Y-%m-%d %H:%M"), + time_ago, + version + ); + } else { + info!("Binary \"{}\": Missing", binary); + } } Ok(()) } +/// Get the version string for a binary by executing it with --version. +/// +/// Returns the version string or "Unknown" if the command fails. +fn get_binary_version(binary_path: &std::path::Path) -> String { + match Command::new(binary_path).arg("--version").output() { + Ok(output) if output.status.success() => { + let stdout = String::from_utf8_lossy(&output.stdout); + // Take first line and clean it up + stdout + .lines() + .next() + .unwrap_or("Unknown") + .trim() + .to_string() + } + _ => "Unknown".to_string(), + } +} + #[cfg(test)] mod tests { use super::*; @@ -75,7 +95,7 @@ mod tests { #[test] fn test_print_build_status() { // This test mainly verifies that the function doesn't panic - // In a real scenario, you'd mock the foc_localnet_bin function + // In a real scenario, you'd mock the foc_devnet_bin function // or set up the environment properly let result = print_build_status(); // We expect this to work even if no binaries exist diff --git a/src/commands/status/code_version.rs b/src/commands/status/code_version.rs index d0aa74f..d91ccad 100644 --- a/src/commands/status/code_version.rs +++ b/src/commands/status/code_version.rs @@ -1,6 +1,6 @@ //! # Code Version Status //! -//! This module handles the display of code version information for the foc-localnet system. +//! This module handles the display of code version information for the foc-devnet system. //! //! It provides functionality to: //! - Display git repository information for Lotus and Curio @@ -8,21 +8,22 @@ //! - Indicate readiness status of code repositories use crate::config::Config; -use crate::paths::foc_localnet_config; -use std::fs; +use crate::paths::foc_devnet_config; +use std::{fs, io::Error}; use tracing::info; use super::git::{format_location_info, get_git_info, get_repo_path_from_config}; /// Print code version information in tabular format. /// -/// This function displays version information for both Lotus and Curio repositories, -/// including their source types, current versions, commit hashes, and readiness status. +/// This function displays version information for Lotus, Curio, Filecoin-Services, +/// and Synapse-SDK repositories, including their source types, current versions, +/// commit hashes, and readiness status. /// /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status::code_version::print_code_version; +/// use foc_devnet::commands::status::code_version::print_code_version; /// /// print_code_version().expect("Failed to print code version"); /// ``` @@ -33,10 +34,18 @@ use super::git::{format_location_info, get_git_info, get_repo_path_from_config}; /// - The configuration file cannot be read or parsed /// - Git repository information cannot be retrieved pub fn print_code_version() -> Result<(), Box> { - info!("Code Versions"); - // Load configuration - let config_path = foc_localnet_config(); + let config_path = foc_devnet_config(); + + // Check if config exists + if !config_path.exists() { + return Err(Error::new( + std::io::ErrorKind::Unsupported, + "Configuration not initialized. Run 'foc-devnet init' first.", + ) + .into()); + } + let config_content = fs::read_to_string(&config_path) .map_err(|e| format!("Failed to read config file at {:?}: {}", config_path, e))?; let config: Config = toml::from_str(&config_content) @@ -56,23 +65,45 @@ pub fn print_code_version() -> Result<(), Box> { let (curio_source_type, curio_version, curio_commit, curio_status) = format_location_info(&config.curio, &curio_git_info, &curio_repo_path); - // Print header + // Get git information for Filecoin-Services + let fc_services_repo_path = + get_repo_path_from_config(&config.filecoin_services, "filecoin-services"); + let fc_services_git_info = get_git_info(&fc_services_repo_path)?; + + let (fc_services_source_type, fc_services_version, fc_services_commit, fc_services_status) = + format_location_info( + &config.filecoin_services, + &fc_services_git_info, + &fc_services_repo_path, + ); + + // Get git information for Synapse-SDK + let synapse_sdk_repo_path = get_repo_path_from_config(&config.synapse_sdk, "synapse-sdk"); + let synapse_sdk_git_info = get_git_info(&synapse_sdk_repo_path)?; + + let (synapse_sdk_source_type, synapse_sdk_version, synapse_sdk_commit, synapse_sdk_status) = + format_location_info( + &config.synapse_sdk, + &synapse_sdk_git_info, + &synapse_sdk_repo_path, + ); + + // Print log-style output info!( - "{:<15} {:<20} {:<15} {:<15} {:<15}", - "Component", "Source Type", "Version", "Commit", "Status" + "Lotus: [{},{},{}] {}", + lotus_source_type, lotus_version, lotus_commit, lotus_status ); info!( - "{:-<15} {:-<20} {:-<15} {:-<15} {:-<15}", - "", "", "", "", "" + "Curio: [{},{},{}] {}", + curio_source_type, curio_version, curio_commit, curio_status ); - info!( - "{:<15} {:<20} {:<15} {:<15} {:<15}", - "Lotus", lotus_source_type, lotus_version, lotus_commit, lotus_status + "Filecoin-Services: [{},{},{}] {}", + fc_services_source_type, fc_services_version, fc_services_commit, fc_services_status ); info!( - "{:<15} {:<20} {:<15} {:<15} {:<15}", - "Curio", curio_source_type, curio_version, curio_commit, curio_status + "Synapse-SDK: [{},{},{}] {}", + synapse_sdk_source_type, synapse_sdk_version, synapse_sdk_commit, synapse_sdk_status ); Ok(()) diff --git a/src/commands/status/disk_usage.rs b/src/commands/status/disk_usage.rs index dc01828..f5086c8 100644 --- a/src/commands/status/disk_usage.rs +++ b/src/commands/status/disk_usage.rs @@ -1,6 +1,6 @@ //! # Disk Usage //! -//! This module handles the display of disk usage information for foc-localnet directories. +//! This module handles the display of disk usage information for foc-devnet directories. //! //! It provides functionality to: //! - Calculate directory sizes @@ -10,21 +10,21 @@ use tracing::info; use crate::paths::{ - foc_localnet_artifacts, foc_localnet_bin, foc_localnet_code, foc_localnet_docker_volumes, - foc_localnet_home, foc_localnet_logs, foc_localnet_state, foc_localnet_tmp, + foc_devnet_artifacts, foc_devnet_bin, foc_devnet_code, foc_devnet_docker_volumes, + foc_devnet_home, foc_devnet_logs, foc_devnet_state, foc_devnet_tmp, }; use super::utils::{format_size, get_directory_size}; -/// Print disk usage information for foc-localnet directories. +/// Print disk usage information for foc-devnet directories. /// -/// This function displays disk usage statistics for all foc-localnet directories, +/// This function displays disk usage statistics for all foc-devnet directories, /// including code, binaries, logs, state, temporary files, and artifacts. /// /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status::disk_usage::print_disk_usage; +/// use foc_devnet::commands::status::disk_usage::print_disk_usage; /// /// print_disk_usage().expect("Failed to print disk usage"); /// ``` @@ -35,7 +35,7 @@ use super::utils::{format_size, get_directory_size}; pub fn print_disk_usage() -> Result<(), Box> { info!("Disk Usage"); - let home_dir = foc_localnet_home(); + let home_dir = foc_devnet_home(); // Print header info!("{:<25} {:<10} {:<40}", "Directory", "Size", "Path"); @@ -43,12 +43,12 @@ pub fn print_disk_usage() -> Result<(), Box> { // Main directories let directories = vec![ - ("Home", foc_localnet_home()), - ("Logs", foc_localnet_logs()), - ("State", foc_localnet_state()), - ("Tmp", foc_localnet_tmp()), - ("Code", foc_localnet_code()), - ("Binaries", foc_localnet_bin()), + ("Home", foc_devnet_home()), + ("Logs", foc_devnet_logs()), + ("State", foc_devnet_state()), + ("Tmp", foc_devnet_tmp()), + ("Code", foc_devnet_code()), + ("Binaries", foc_devnet_bin()), ]; for (name, path) in &directories { @@ -64,7 +64,7 @@ pub fn print_disk_usage() -> Result<(), Box> { } // Artifacts breakdown - let artifacts_dir = foc_localnet_artifacts(); + let artifacts_dir = foc_devnet_artifacts(); let artifacts_size = get_directory_size(&artifacts_dir)?; info!( @@ -75,7 +75,7 @@ pub fn print_disk_usage() -> Result<(), Box> { ); // Docker volumes - let docker_volumes_dir = foc_localnet_docker_volumes(); + let docker_volumes_dir = foc_devnet_docker_volumes(); let docker_volumes_size = get_directory_size(&docker_volumes_dir)?; info!( "{:<25} {:<10} {:<40}", @@ -96,7 +96,7 @@ pub fn print_disk_usage() -> Result<(), Box> { // Total size let total_size = get_directory_size(&home_dir)?; - info!("Total foc-localnet size: {}", format_size(total_size)); + info!("Total foc-devnet size: {}", format_size(total_size)); Ok(()) } diff --git a/src/commands/status/git/formatters.rs b/src/commands/status/git/formatters.rs index 915fa1b..20a92e5 100644 --- a/src/commands/status/git/formatters.rs +++ b/src/commands/status/git/formatters.rs @@ -22,9 +22,9 @@ use crate::config::Location; /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status::git::formatters::format_location_info; -/// use foc_localnet::commands::status::git::git_info::GitInfo; -/// use foc_localnet::config::{Location, GitBranch}; +/// use foc_devnet::commands::status::git::formatters::format_location_info; +/// use foc_devnet::commands::status::git::git_info::GitInfo; +/// use foc_devnet::config::{Location, GitBranch}; /// use std::path::Path; /// /// let location = Location::GitBranch(GitBranch { diff --git a/src/commands/status/git/git_info.rs b/src/commands/status/git/git_info.rs index b048f42..eeb643e 100644 --- a/src/commands/status/git/git_info.rs +++ b/src/commands/status/git/git_info.rs @@ -28,7 +28,7 @@ pub enum GitInfo { /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status::git::git_info::get_git_info; +/// use foc_devnet::commands::status::git::git_info::get_git_info; /// use std::path::Path; /// /// let repo_path = Path::new("/path/to/repo"); diff --git a/src/commands/status/git/repo_paths.rs b/src/commands/status/git/repo_paths.rs index 71c733a..b6fd59e 100644 --- a/src/commands/status/git/repo_paths.rs +++ b/src/commands/status/git/repo_paths.rs @@ -4,7 +4,7 @@ //! based on configuration locations. use crate::config::Location; -use crate::paths::foc_localnet_code; +use crate::paths::foc_devnet_code; /// Get the repository path to check for git information based on the config location. /// @@ -14,8 +14,8 @@ use crate::paths::foc_localnet_code; /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status::git::repo_paths::get_repo_path_from_config; -/// use foc_localnet::config::{Location, GitBranch}; +/// use foc_devnet::commands::status::git::repo_paths::get_repo_path_from_config; +/// use foc_devnet::config::{Location, GitBranch}; /// /// let location = Location::GitBranch(GitBranch { /// url: "https://github.com/example/repo".to_string(), @@ -35,8 +35,8 @@ pub fn get_repo_path_from_config(location: &Location, component: &str) -> std::p std::path::PathBuf::from(dir) } Location::GitTag { .. } | Location::GitCommit { .. } | Location::GitBranch { .. } => { - // For git sources, check if it exists in the foc-localnet code directory - foc_localnet_code().join(component) + // For git sources, check if it exists in the foc-devnet code directory + foc_devnet_code().join(component) } } } diff --git a/src/commands/status/keys.rs b/src/commands/status/keys.rs index e3f1275..dede443 100644 --- a/src/commands/status/keys.rs +++ b/src/commands/status/keys.rs @@ -1,40 +1,17 @@ -//! Keys status display for foc-localnet. +//! Keys status display for foc-devnet. //! -//! This module displays the generated addresses and private keys -//! for various foc-localnet components. - -use crate::commands::init::keys::load_keys; +//! This module displays the generated addresses and where to find their +//! private keys for various foc-devnet components. +use crate::paths::foc_devnet_keys; use tracing::info; /// Print the keys status information. -/// -/// Displays all generated addresses and their private keys. pub fn print_keys_status() -> Result<(), Box> { - info!("Generated Keys"); - - let keys = load_keys()?; - - for key in keys { - let addr_display = if let Some(addr) = key.filecoin_address.as_ref() { - if addr.starts_with("t3") { - format!("{} (t3)", addr) - } else if addr.starts_with("t4") { - format!("{} (t4)", addr) - } else { - format!("{} (unknown)", addr) - } - } else { - "N/A".to_string() - }; - - info!("{}: {}", key.name, addr_display); - - if let Some(eth) = key.eth_address { - info!("Ethereum: {}", eth); - } - - info!("Private Key: {}", key.private_key); - } + let keys_dir = foc_devnet_keys(); + info!( + "Deterministic Keys and Addresses stored in: {}", + keys_dir.display() + ); Ok(()) } diff --git a/src/commands/status/mod.rs b/src/commands/status/mod.rs index 80d402a..d113569 100644 --- a/src/commands/status/mod.rs +++ b/src/commands/status/mod.rs @@ -1,18 +1,20 @@ //! # Status Module //! -//! This module provides comprehensive status reporting for the FOC LocalNet system. +//! This module provides comprehensive status reporting for the FOC DevNet system. //! //! The status command displays information about: -//! - Code versions and git status for Lotus and Curio repositories +//! - Code versions and git status for repositories (Lotus, Curio, Filecoin-Services, Synapse-SDK) //! - Build status of system binaries +//! - Proof parameters availability and validation //! - Running status of Docker containers and services //! - System uptime information -//! - Disk usage across various directories +//! - Running system details (block height, ports, file locations) when system is active +//! - Generated keys and their file locations //! //! ## Usage //! //! ```rust -//! use foc_localnet::commands::status; +//! use foc_devnet::commands::status; //! //! // Display full system status //! status::status()?; @@ -24,33 +26,33 @@ //! The status module is organized into several submodules: //! - `code_version`: Handles repository version and git information display //! - `build_status`: Manages binary build status reporting +//! - `proof_params`: Checks proof parameters availability and validation //! - `running_status`: Reports Docker container and service status //! - `uptime`: Calculates and displays system uptime -//! - `disk_usage`: Provides disk usage statistics +//! - `running_system_info`: Shows detailed info when system is running (ports, block height, files) +//! - `keys`: Displays generated keys and their file locations //! - `git`: Git repository utilities -//! - `docker`: Docker container management utilities //! - `utils`: Common formatting and utility functions pub mod build_status; pub mod code_version; -pub mod disk_usage; pub mod git; pub mod keys; -pub mod running_status; +pub mod proof_params; +pub mod running; +pub mod running_system_info; pub mod uptime; pub mod utils; -use tracing::info; - /// Execute the status command. /// -/// This function displays a pretty-printed status of the foc-localnet system, +/// This function displays a pretty-printed status of the foc-devnet system, /// including code version, build status, running status, and uptime information. /// /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status; +/// use foc_devnet::commands::status; /// /// // Display the current system status /// status::status().expect("Failed to display status"); @@ -64,25 +66,26 @@ use tracing::info; /// - Docker command execution failures /// - File system access problems pub fn status() -> Result<(), Box> { - info!("FOC LocalNet Status"); - // Code version information code_version::print_code_version()?; // Artifacts build status build_status::print_build_status()?; + // Proof parameters status + proof_params::print_proof_params_status()?; + // System running status - running_status::print_running_status()?; + running::print_running_status()?; // Uptime information (if running) uptime::print_uptime()?; + // Running system information (ports, block height, files) - only if system is running + running_system_info::print_running_system_info()?; + // Keys information keys::print_keys_status()?; - // Disk usage information - disk_usage::print_disk_usage()?; - Ok(()) } diff --git a/src/commands/status/proof_params.rs b/src/commands/status/proof_params.rs new file mode 100644 index 0000000..39025f0 --- /dev/null +++ b/src/commands/status/proof_params.rs @@ -0,0 +1,29 @@ +//! Proof parameters status display for foc-devnet. +//! +//! This module displays the proof parameters docker volume status, +//! including availability. + +use crate::paths::foc_devnet_proof_parameters; +use tracing::info; + +/// Print the proof parameters status. +/// +/// Displays whether the proof parameters docker volume is available. +pub fn print_proof_params_status() -> Result<(), Box> { + let params_dir = foc_devnet_proof_parameters(); + + if !params_dir.exists() { + info!("Run 'foc-devnet start' to download proof parameters. (first run downloads and caches it for future runs)"); + return Ok(()); + } + + // Check if the directory has content + if params_dir.read_dir()?.next().is_some() { + info!("FilProofParams: OK, at: {}", params_dir.display()); + } else { + info!("FilProofParams: EMPTY, at: {}", params_dir.display()); + info!("Run 'foc-devnet start' to download proof parameters."); + } + + Ok(()) +} diff --git a/src/commands/status/running.rs b/src/commands/status/running.rs new file mode 100644 index 0000000..60a4d5e --- /dev/null +++ b/src/commands/status/running.rs @@ -0,0 +1,207 @@ +//! # Running Status +//! +//! This module handles the display of system running status for foc-devnet services. +//! +//! It provides functionality to: +//! - Check Docker container status +//! - Display service uptime +//! - Show port accessibility +//! - Indicate overall system health + +use tracing::info; + +use crate::docker::core::image_exists; +use crate::docker::status::{ + get_container_ports, get_container_uptime, get_running_foc_containers, +}; +use crate::run_id::load_current_run_id; + +/// Print running status of the system in tabular format. +/// +/// This function displays the status of all expected foc-devnet services, +/// including Docker containers, their uptime, and port accessibility. +/// If a run ID exists, it shows the actual container names with run ID prefix. +/// +/// # Examples +/// +/// ```rust,no_run +/// use foc_devnet::commands::status::running_status::print_running_status; +/// +/// print_running_status().expect("Failed to print running status"); +/// ``` +/// +/// # Errors +/// +/// Returns an error if Docker commands fail. +pub fn print_running_status() -> Result<(), Box> { + // Try to get current run ID + let run_id = load_current_run_id().ok(); + + // Check for running Docker containers + let containers = get_running_foc_containers()?; + + let expected_containers = if let Some(ref id) = run_id { + vec![ + ("Lotus Daemon", format!("foc-{}-lotus", id)), + ("Lotus Miner", format!("foc-{}-lotus-miner", id)), + ("YugabyteDB", format!("foc-{}-yugabyte", id)), + ("Builder", format!("foc-{}-builder", id)), + ] + } else { + vec![ + ( + "Lotus Daemon", + crate::constants::LOTUS_CONTAINER.to_string(), + ), + ( + "Lotus Miner", + crate::constants::LOTUS_MINER_CONTAINER.to_string(), + ), + ("Curio", crate::constants::CURIO_CONTAINER.to_string()), + ( + "YugabyteDB", + crate::constants::YUGABYTE_CONTAINER.to_string(), + ), + ("Builder", crate::constants::BUILDER_CONTAINER.to_string()), + ] + }; + + if let Some(ref id) = run_id { + info!("Run ID: {}", id); + } + + for (service_name, container_name) in &expected_containers { + let is_running = containers.contains(container_name); + let image_name = extract_base_image_name(container_name); + let image_available = image_exists(&image_name).unwrap_or(false); + + if is_running { + let uptime = get_container_uptime(container_name)?; + let ports = format_container_ports(container_name)?; + info!( + "{}: Running Container | Uptime: {} | Ports: {}", + service_name, uptime, ports + ); + } else if !image_available { + info!("{}: Running Container Unavailable", service_name); + } else { + info!("{}: Stopped", service_name); + } + } + + // Check for Curio instances if run ID exists + if let Some(ref id) = run_id { + for sp_idx in 1..=5 { + let curio_container = format!("foc-{}-curio-{}", id, sp_idx); + if containers.contains(&curio_container) { + let uptime = get_container_uptime(&curio_container)?; + let ports = format_container_ports(&curio_container)?; + info!( + "Curio SP-{}: Running | Uptime: {} | Ports: {}", + sp_idx, uptime, ports + ); + } + } + } + + Ok(()) +} + +/// Extract base image name from container name. +/// +/// Handles both run-id prefixed names (foc--) and +/// simple names (foc-). +fn extract_base_image_name(container_name: &str) -> String { + if !container_name.starts_with("foc-") { + return container_name.to_string(); + } + + let parts: Vec<&str> = container_name.split('-').collect(); + + // foc-- or foc--- + if parts.len() >= 3 { + // Extract everything after the run_id part + // For foc-26jan02-1058_TizzyTike-lotus -> LOTUS_CONTAINER + // For foc-26jan02-1058_TizzyTike-curio-1 -> CURIO_CONTAINER + let service_parts = &parts[2..]; + + // If it ends with a number (like curio-1), remove it + let ends_with_number = service_parts + .last() + .and_then(|s| s.parse::().ok()) + .is_some(); + + if service_parts.len() >= 2 && ends_with_number { + format!("foc-{}", service_parts[..service_parts.len() - 1].join("-")) + } else { + format!("foc-{}", service_parts.join("-")) + } + } else { + container_name.to_string() + } +} + +/// Format container ports for display. +/// +/// Extracts just the host port numbers from docker port output, +/// filtering out IPv6 bindings and showing only unique ports. +fn format_container_ports(container_name: &str) -> Result> { + let ports_output = get_container_ports(container_name)?; + let output = String::from_utf8_lossy(&ports_output.stdout); + + // Parse lines like "1234/tcp -> 0.0.0.0:5701" and extract just "5701" + let mut ports = Vec::new(); + for line in output.lines() { + if let Some(arrow_pos) = line.find("->") { + let binding = &line[arrow_pos + 2..].trim(); + // Only take IPv4 bindings (0.0.0.0) + if binding.starts_with("0.0.0.0:") { + if let Some(colon_pos) = binding.rfind(':') { + let port = &binding[colon_pos + 1..]; + ports.push(port.to_string()); + } + } + } + } + + if ports.is_empty() { + Ok("N/A".to_string()) + } else { + Ok(ports.join(", ")) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_print_running_status() { + // This test verifies that the function doesn't panic + // In a real environment with Docker, it would check actual containers + let result = print_running_status(); + // We expect this to work even if Docker is not available + // (it will just show empty results) + assert!(result.is_ok()); + } + + #[test] + fn test_extract_base_image_name() { + assert_eq!( + extract_base_image_name("foc-26jan02-1058_TizzyTike-lotus"), + crate::constants::LOTUS_CONTAINER + ); + assert_eq!( + extract_base_image_name("foc-26jan02-1058_TizzyTike-lotus-miner"), + crate::constants::LOTUS_MINER_CONTAINER + ); + assert_eq!( + extract_base_image_name("foc-26jan02-1058_TizzyTike-curio-1"), + crate::constants::CURIO_CONTAINER + ); + assert_eq!( + extract_base_image_name(crate::constants::LOTUS_CONTAINER), + crate::constants::LOTUS_CONTAINER + ); + } +} diff --git a/src/commands/status/running_status.rs b/src/commands/status/running_status.rs deleted file mode 100644 index 293adc0..0000000 --- a/src/commands/status/running_status.rs +++ /dev/null @@ -1,121 +0,0 @@ -//! # Running Status -//! -//! This module handles the display of system running status for foc-localnet services. -//! -//! It provides functionality to: -//! - Check Docker container status -//! - Display service uptime -//! - Show port accessibility -//! - Indicate overall system health - -use tracing::{info, warn}; - -use crate::docker::core::image_exists; -use crate::docker::status::{ - get_container_ports, get_container_uptime, get_running_foc_containers, -}; - -/// Print running status of the system in tabular format. -/// -/// This function displays the status of all expected foc-localnet services, -/// including Docker containers, their uptime, and port accessibility. -/// -/// # Examples -/// -/// ```rust,no_run -/// use foc_localnet::commands::status::running_status::print_running_status; -/// -/// print_running_status().expect("Failed to print running status"); -/// ``` -/// -/// # Errors -/// -/// Returns an error if Docker commands fail. -pub fn print_running_status() -> Result<(), Box> { - info!("System Status"); - - // Check for running Docker containers - let containers = get_running_foc_containers()?; - - let expected_containers = vec![ - ("Lotus Daemon", "foc-lotus"), - ("Lotus Miner", "foc-lotus-miner"), - ("Curio", "foc-curio"), - ("YugabyteDB", "foc-yugabyte"), - ("Builder", "foc-builder"), - ]; - - // Print header - info!( - "{:<20} {:<12} {:<20} {:<15} {:<20}", - "Service", "Status", "Container", "Uptime", "Ports" - ); - info!( - "{:-<20} {:-<12} {:-<20} {:-<15} {:-<20}", - "", "", "", "", "" - ); - - let mut all_running = true; - for (service_name, container_name) in &expected_containers { - let is_running = containers.contains(&container_name.to_string()); - let image_available = image_exists(container_name).unwrap_or(false); - - // Determine status based on image availability and running state - let status = if !image_available { - "Unavailable".to_string() - } else if is_running { - "Running".to_string() - } else { - // Don't count builder as "not running" for all_running check - if *container_name != "foc-builder" { - all_running = false; - } - "Stopped".to_string() - }; - - // Get uptime if container is running - let uptime = if is_running { - get_container_uptime(container_name)? - } else { - "N/A".to_string() - }; - - // Get port status if container is running - let port_status = if is_running { - let ports_output = get_container_ports(container_name)?; - String::from_utf8_lossy(&ports_output.stdout) - .trim() - .to_string() - } else { - "N/A".to_string() - }; - - info!( - "{:<20} {:<12} {:<20} {:<15} {:<20}", - service_name, status, container_name, uptime, port_status - ); - } - - if all_running { - info!("All services are running!"); - } else { - warn!("Some services are not running."); - } - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_print_running_status() { - // This test verifies that the function doesn't panic - // In a real environment with Docker, it would check actual containers - let result = print_running_status(); - // We expect this to work even if Docker is not available - // (it will just show empty results) - assert!(result.is_ok()); - } -} diff --git a/src/commands/status/running_system_info.rs b/src/commands/status/running_system_info.rs new file mode 100644 index 0000000..307064e --- /dev/null +++ b/src/commands/status/running_system_info.rs @@ -0,0 +1,160 @@ +//! Running system information display for foc-devnet. +//! +//! This module displays detailed information about the currently running +//! system, including block height, service ports, and file locations. + +use crate::paths::{contract_addresses_file, foc_metadata_file, step_context_file}; +use crate::run_id::load_current_run_id; +use std::process::Command; +use tracing::{info, warn}; + +/// Print running system information. +/// +/// This displays detailed info when the system is running, including: +/// - Current run ID +/// - Block height +/// - Service ports +/// - File locations for addresses and context dumps +pub fn print_running_system_info() -> Result<(), Box> { + // Try to load current run ID + let run_id = match load_current_run_id() { + Ok(id) => id, + Err(_) => { + // No run ID means system is not running + return Ok(()); + } + }; + + info!("Current Run ID: {}", run_id); + + // Get block height + if let Some(height) = get_lotus_block_height(&run_id) { + info!("Chain Block Height: {}", height); + } else { + warn!("Chain Block Height: Unable to retrieve"); + } + + // Print service ports + print_service_ports(&run_id)?; + + // Print file locations + print_file_locations(&run_id)?; + + Ok(()) +} + +/// Get the current lotus chain block height. +/// +/// This function queries the lotus node to get the current block height of the chain. +fn get_lotus_block_height(run_id: &str) -> Option { + let container_name = format!("foc-{}-lotus", run_id); + + let output = Command::new("docker") + .args([ + "exec", + &container_name, + "/usr/local/bin/lotus-bins/lotus", + "chain", + "list", + "--count=1", + ]) + .output() + .ok()?; + + if !output.status.success() { + return None; + } + + let stdout = String::from_utf8_lossy(&output.stdout); + + // Parse the block height from the first line (format: "HEIGHT: (timestamp) [ ... ]") + for line in stdout.lines() { + if let Some(colon_pos) = line.find(':') { + let height_str = line[..colon_pos].trim(); + if let Ok(height) = height_str.parse::() { + return Some(height); + } + } + } + + None +} + +/// Print service ports for accessing various services. +fn print_service_ports(run_id: &str) -> Result<(), Box> { + // Get port for each service + let services = vec![ + ("Lotus RPC", format!("foc-{}-lotus", run_id), "1234/tcp"), + ("Lotus P2P", format!("foc-{}-lotus", run_id), "1235/tcp"), + ( + "Lotus Miner API", + format!("foc-{}-lotus-miner", run_id), + "2345/tcp", + ), + ]; + + for (service_name, container_name, internal_port) in services { + if let Ok(port) = get_container_port(&container_name, internal_port) { + info!("{}: http://0.0.0.0:{}", service_name, port); + } + } + + // Check for Curio instances + for sp_idx in 1..=5 { + let curio_container = format!("foc-{}-curio-{}", run_id, sp_idx); + if container_exists(&curio_container) { + if let Ok(port) = get_container_port(&curio_container, "12300/tcp") { + info!("Curio SP-{} API: http://0.0.0.0:{}", sp_idx, port); + } + } + } + + Ok(()) +} + +/// Check if a container exists. +fn container_exists(container_name: &str) -> bool { + Command::new("docker") + .args(["inspect", container_name]) + .output() + .map(|output| output.status.success()) + .unwrap_or(false) +} + +/// Get the mapped port for a container's internal port. +fn get_container_port( + container_name: &str, + internal_port: &str, +) -> Result> { + let output = Command::new("docker") + .args(["port", container_name, internal_port]) + .output()?; + + if !output.status.success() { + return Err("Failed to get port".into()); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + + // Parse output like "0.0.0.0:1234" + if let Some(colon_pos) = stdout.rfind(':') { + let port = stdout[colon_pos + 1..].trim(); + Ok(port.to_string()) + } else { + Err("Port not mapped".into()) + } +} + +/// Print file locations for addresses and context dumps. +fn print_file_locations(run_id: &str) -> Result<(), Box> { + let contract_addr_file = contract_addresses_file(run_id); + info!("Deployed Addresses: {}", contract_addr_file.display()); + + let foc_meta_file = foc_metadata_file(run_id); + info!("FOC Metadata: {}", foc_meta_file.display()); + + let step_ctx_file = step_context_file(run_id); + info!("Step Context: {}", step_ctx_file.display()); + + Ok(()) +} diff --git a/src/commands/status/uptime.rs b/src/commands/status/uptime.rs index eba6d63..f6761cd 100644 --- a/src/commands/status/uptime.rs +++ b/src/commands/status/uptime.rs @@ -1,6 +1,6 @@ //! # System Uptime //! -//! This module handles the display of system uptime information for foc-localnet. +//! This module handles the display of system uptime information for foc-devnet. //! //! It provides functionality to: //! - Calculate total system uptime based on container start times @@ -22,7 +22,7 @@ use crate::docker::status::{get_container_start_time, get_running_foc_containers /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status::uptime::get_lotus_block_height; +/// use foc_devnet::commands::status::uptime::get_lotus_block_height; /// /// if let Some(height) = get_lotus_block_height() { /// println!("Current block height: {}", height); @@ -32,7 +32,7 @@ fn get_lotus_block_height() -> Option { let output = Command::new("docker") .args([ "exec", - "foc-lotus", + crate::constants::LOTUS_CONTAINER, "/usr/local/bin/lotus-bins/lotus", "chain", "list", @@ -59,7 +59,7 @@ fn get_lotus_block_height() -> Option { /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status::uptime::get_containers_cpu_usage; +/// use foc_devnet::commands::status::uptime::get_containers_cpu_usage; /// /// if let Some(cpu) = get_containers_cpu_usage() { /// println!("Containers CPU usage: {:.1}%", cpu); @@ -105,7 +105,7 @@ fn get_containers_cpu_usage() -> Option { /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status::uptime::get_containers_memory_usage; +/// use foc_devnet::commands::status::uptime::get_containers_memory_usage; /// /// if let Some((used, limit)) = get_containers_memory_usage() { /// println!("Containers memory: {:.1}GB / {:.1}GB", used, limit); @@ -171,13 +171,13 @@ fn parse_memory_value(mem_str: &str) -> Option { /// Print uptime information if system is running. /// -/// This function displays the total uptime of the foc-localnet system by finding +/// This function displays the total uptime of the foc-devnet system by finding /// the oldest running container and calculating how long the system has been running. /// /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status::uptime::print_uptime; +/// use foc_devnet::commands::status::uptime::print_uptime; /// /// print_uptime().expect("Failed to print uptime"); /// ``` @@ -186,8 +186,6 @@ fn parse_memory_value(mem_str: &str) -> Option { /// /// Returns an error if Docker commands fail. pub fn print_uptime() -> Result<(), Box> { - info!("System Uptime"); - let containers = get_running_foc_containers()?; if containers.is_empty() { diff --git a/src/commands/status/utils.rs b/src/commands/status/utils.rs index 650ab7e..7f1403d 100644 --- a/src/commands/status/utils.rs +++ b/src/commands/status/utils.rs @@ -12,7 +12,7 @@ /// # Examples /// /// ```rust -/// use foc_localnet::commands::status::utils::format_size; +/// use foc_devnet::commands::status::utils::format_size; /// /// assert_eq!(format_size(1024), "1.0 KB"); /// assert_eq!(format_size(1048576), "1.0 MB"); @@ -40,7 +40,7 @@ pub fn format_size(bytes: u64) -> String { /// # Examples /// /// ```rust -/// use foc_localnet::commands::status::utils::format_duration; +/// use foc_devnet::commands::status::utils::format_duration; /// /// assert_eq!(format_duration(3661), "1h 1m 1s"); /// assert_eq!(format_duration(86461), "1d 0h 1m 1s"); @@ -67,7 +67,7 @@ pub fn format_duration(total_seconds: i64) -> String { /// # Examples /// /// ```rust -/// use foc_localnet::commands::status::utils::format_time_ago; +/// use foc_devnet::commands::status::utils::format_time_ago; /// use chrono::{Duration, Utc}; /// /// let duration = Duration::hours(2) + Duration::minutes(30); @@ -97,7 +97,7 @@ pub fn format_time_ago(duration: chrono::Duration) -> String { /// # Examples /// /// ```rust,no_run -/// use foc_localnet::commands::status::utils::get_directory_size; +/// use foc_devnet::commands::status::utils::get_directory_size; /// use std::path::Path; /// /// let size = get_directory_size(Path::new("/tmp")).unwrap(); diff --git a/src/commands/stop.rs b/src/commands/stop.rs index df21b90..b9945e9 100644 --- a/src/commands/stop.rs +++ b/src/commands/stop.rs @@ -6,10 +6,10 @@ use tracing::{info, warn}; /// Container names for all services const CONTAINERS: &[(&str, &str)] = &[ - ("foc-curio", "Curio"), - ("foc-yugabyte", "YugabyteDB"), - ("foc-lotus-miner", "Lotus-Miner"), - ("foc-lotus", "Lotus"), + (crate::constants::CURIO_CONTAINER, "Curio"), + (crate::constants::YUGABYTE_CONTAINER, "YugabyteDB"), + (crate::constants::LOTUS_MINER_CONTAINER, "Lotus-Miner"), + (crate::constants::LOTUS_CONTAINER, "Lotus"), ]; /// Execute the stop command. diff --git a/src/config.rs b/src/config.rs index 8eb599f..34e95cd 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,4 +1,4 @@ -//! Configuration module for foc-localnet. +//! Configuration module for foc-devnet. //! //! This module defines the configuration structures used to manage the local //! Filecoin on-chain cloud cluster. It includes settings for node counts, @@ -114,7 +114,7 @@ impl Location { } } -/// Main configuration structure for the foc-localnet application. +/// Main configuration structure for the foc-devnet application. /// /// This struct contains all the settings needed to configure and run a local /// Filecoin cluster for testing filecoin-onchain-cloud functionality. It includes @@ -123,7 +123,7 @@ impl Location { pub struct Config { /// Starting port number for the contiguous port range. /// - /// All ports used by the localnet will be dynamically allocated from a contiguous + /// All ports used by the devnet will be dynamically allocated from a contiguous /// range starting at this port. This ensures no port conflicts and allows /// easy firewall configuration. /// Default: 5700 @@ -133,7 +133,7 @@ pub struct Config { /// /// This defines the size of the port range available for allocation. /// For example, with port_range_start=5700 and port_range_count=300, - /// ports 5700-5999 are reserved for the localnet. + /// ports 5700-5999 are reserved for the devnet. /// Default: 100 pub port_range_count: u16, @@ -200,29 +200,32 @@ impl Default for Config { /// The default configuration sets up a minimal cluster with one of each /// node type and assumes pre-built executables are available in standard /// system locations (/usr/local/bin/). + /// + /// The defaults should always use `GitCommit` or `GitTag` locations to ensure + /// reproducibility. fn default() -> Self { Self { port_range_start: 5700, port_range_count: 100, lotus: Location::GitTag { url: "https://github.com/filecoin-project/lotus.git".to_string(), - tag: "v1.34.0".to_string(), + tag: "v1.34.4-rc1".to_string(), }, - curio: Location::GitBranch { + curio: Location::GitCommit { url: "https://github.com/filecoin-project/curio.git".to_string(), - branch: "pdpv0".to_string(), + commit: "4d53c8017ad345410adfd80794fd7518b49c9128".to_string(), }, - filecoin_services: Location::GitTag { + filecoin_services: Location::GitCommit { url: "https://github.com/FilOzone/filecoin-services.git".to_string(), - tag: "v1.0.0".to_string(), + commit: "0179f8b328c3dc36e81e44677e0078f064975377".to_string(), }, - multicall3: Location::GitBranch { + multicall3: Location::GitTag { url: "https://github.com/mds1/multicall3.git".to_string(), - branch: "main".to_string(), + tag: "v3.1.0".to_string(), }, - synapse_sdk: Location::GitTag { - url: "git@github.com:FilOzone/synapse-sdk.git".to_string(), - tag: "synapse-sdk-v0.36.1".to_string(), + synapse_sdk: Location::GitCommit { + url: "https://github.com/FilOzone/synapse-sdk.git".to_string(), + commit: "773551bf1e9cf4cdc49aeb63a47a81f8dc5cb9e1".to_string(), }, yugabyte_download_url: "https://software.yugabyte.com/releases/2.25.1.0/yugabyte-2.25.1.0-b381-linux-x86_64.tar.gz".to_string(), approved_pdp_sp_count: 1, diff --git a/src/constants.rs b/src/constants.rs index e1596ce..6453232 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -1,15 +1,23 @@ -//! Constants used throughout the foc-localnet codebase. +//! Constants used throughout the foc-devnet codebase. //! //! This module centralizes all magic numbers, container names, port numbers, //! and other constants to avoid scattering them throughout the codebase. -/// Container names (base - will be prefixed with foc-- in practice) -pub const LOTUS_CONTAINER: &str = "foc-lotus"; -pub const LOTUS_MINER_CONTAINER: &str = "foc-lotus-miner"; -pub const BUILDER_CONTAINER: &str = "foc-builder"; -pub const YUGABYTE_CONTAINER: &str = "foc-yugabyte"; -pub const CURIO_CONTAINER: &str = "foc-curio"; -pub const PORTAINER_CONTAINER: &str = "foc-portainer"; +/// Docker image names +pub const LOTUS_DOCKER_IMAGE: &str = "foc-lotus"; +pub const LOTUS_MINER_DOCKER_IMAGE: &str = "foc-lotus-miner"; +pub const BUILDER_DOCKER_IMAGE: &str = "foc-builder"; +pub const YUGABYTE_DOCKER_IMAGE: &str = "foc-yugabyte"; +pub const CURIO_DOCKER_IMAGE: &str = "foc-curio"; +pub const PORTAINER_DOCKER_IMAGE: &str = "foc-portainer"; + +/// Docker container names (base - will be prefixed with foc-c-- in practice) +pub const LOTUS_CONTAINER: &str = "foc-c-lotus"; +pub const LOTUS_MINER_CONTAINER: &str = "foc-c-lotus-miner"; +pub const BUILDER_CONTAINER: &str = "foc-c-builder"; +pub const YUGABYTE_CONTAINER: &str = "foc-c-yugabyte"; +pub const CURIO_CONTAINER: &str = "foc-c-curio"; +pub const PORTAINER_CONTAINER: &str = "foc-c-portainer"; /// Port numbers pub const LOTUS_RPC_PORT: u16 = 1234; @@ -34,7 +42,7 @@ pub const PORT_CHECK_TIMEOUT_MS: u64 = 5000; pub const MAX_PDP_SP_COUNT: usize = 5; /// Service configuration -pub const SERVICE_NAME: &str = "FOC LocalNet Warm Storage"; +pub const SERVICE_NAME: &str = "FOC DevNet Warm Storage"; pub const SERVICE_DESCRIPTION: &str = "Warm storage service for FOC local development network"; /// Token parameters @@ -43,34 +51,27 @@ pub const MOCK_USDFC_DECIMALS: u8 = 18; pub const MOCK_USDFC_SYMBOL: &str = "USDFC"; pub const MOCK_USDFC_NAME: &str = "Mock USDFC"; -/// Docker image names -pub const LOTUS_IMAGE: &str = "foc-lotus"; -pub const LOTUS_MINER_IMAGE: &str = "foc-lotus-miner"; -pub const BUILDER_IMAGE: &str = "foc-builder"; -pub const YUGABYTE_IMAGE: &str = "foc-yugabyte"; -pub const CURIO_IMAGE: &str = "foc-curio"; - /// Network configuration -pub const LOCAL_NETWORK_CHAIN_ID: u64 = 1414; // Local network chain ID +pub const LOCAL_NETWORK_CHAIN_ID: u64 = 31415926; // Local network chain ID -/// Localnet network parameters (for Lotus, Lotus-Miner, and Curio) -pub const FOC_LOCALNET_BLOCK_DELAY: u64 = 4; // Block delay in seconds -pub const FOC_LOCALNET_PROPAGATION_DELAY: u64 = 2; // Propagation delay in seconds -pub const FOC_LOCALNET_EQUIVOCATION_DELAY: u64 = 0; // Equivocation delay in seconds +/// devnet network parameters (for Lotus, Lotus-Miner, and Curio) +pub const FOC_DEVNET_BLOCK_DELAY: u64 = 4; // Block delay in seconds +pub const FOC_DEVNET_PROPAGATION_DELAY: u64 = 2; // Propagation delay in seconds +pub const FOC_DEVNET_EQUIVOCATION_DELAY: u64 = 0; // Equivocation delay in seconds /// Simple service contract address (zero address) -pub const FOC_LOCALNET_CONTRACT_SIMPLE: &str = "0x0000000000000000000000000000000000000000"; +pub const FOC_DEVNET_CONTRACT_SIMPLE: &str = "0x0000000000000000000000000000000000000000"; /// Environment variable names -pub const ENV_FOC_LOCALNET_CHAIN_ID: &str = "FOC_LOCALNET_CHAIN_ID"; -pub const ENV_FOC_LOCALNET_BLOCK_DELAY: &str = "FOC_LOCALNET_BLOCK_DELAY"; -pub const ENV_FOC_LOCALNET_PROPAGATION_DELAY: &str = "FOC_LOCALNET_PROPAGATION_DELAY"; -pub const ENV_FOC_LOCALNET_EQUIVOCATION_DELAY: &str = "FOC_LOCALNET_EQUIVOCATION_DELAY"; -pub const ENV_FOC_LOCALNET_CONTRACT_PAY: &str = "FOC_CONTRACT_PAY"; -pub const ENV_FOC_LOCALNET_CONTRACT_FWSS: &str = "FOC_CONTRACT_FWSS"; -pub const ENV_FOC_LOCALNET_CONTRACT_MULTICALL: &str = "FOC_CONTRACT_MULTICALL"; -pub const ENV_FOC_LOCALNET_CONTRACT_SIMPLE: &str = "FOC_CONTRACT_SIMPLE"; -pub const ENV_FOC_LOCALNET_CONTRACT_USDFC: &str = "FOC_CONTRACT_USDFC"; +pub const ENV_FOC_DEVNET_CHAIN_ID: &str = "FOC_DEVNET_CHAIN_ID"; +pub const ENV_FOC_DEVNET_BLOCK_DELAY: &str = "FOC_DEVNET_BLOCK_DELAY"; +pub const ENV_FOC_DEVNET_PROPAGATION_DELAY: &str = "FOC_DEVNET_PROPAGATION_DELAY"; +pub const ENV_FOC_DEVNET_EQUIVOCATION_DELAY: &str = "FOC_DEVNET_EQUIVOCATION_DELAY"; +pub const ENV_FOC_DEVNET_CONTRACT_PAY: &str = "FOC_CONTRACT_PAY"; +pub const ENV_FOC_DEVNET_CONTRACT_FWSS: &str = "FOC_CONTRACT_FWSS"; +pub const ENV_FOC_DEVNET_CONTRACT_MULTICALL: &str = "FOC_CONTRACT_MULTICALL"; +pub const ENV_FOC_DEVNET_CONTRACT_SIMPLE: &str = "FOC_CONTRACT_SIMPLE"; +pub const ENV_FOC_DEVNET_CONTRACT_USDFC: &str = "FOC_CONTRACT_USDFC"; /// File paths within containers pub const LOTUS_BINARY_PATH: &str = "/usr/local/bin/lotus-bins/lotus"; diff --git a/src/crypto/mnemonic.rs b/src/crypto/mnemonic.rs index d66c485..e928d1b 100644 --- a/src/crypto/mnemonic.rs +++ b/src/crypto/mnemonic.rs @@ -1,12 +1,12 @@ -use crate::paths::foc_localnet_keys; +use crate::paths::foc_devnet_keys; use bip39::{Language, Mnemonic}; use std::fs; /// Save mnemonic to file. /// This is used to persist the generated mnemonic for future key derivation. -/// Default: ~/.foc-localnet/keys/mnemonic.txt +/// Default: ~/.foc-devnet/keys/mnemonic.txt pub fn store_mnemonic(mnemonic: &Mnemonic) -> Result<(), Box> { - let keys_dir = foc_localnet_keys(); + let keys_dir = foc_devnet_keys(); fs::create_dir_all(&keys_dir)?; let mnemonic_file = keys_dir.join("mnemonic.txt"); fs::write(mnemonic_file, mnemonic.to_string())?; @@ -15,9 +15,9 @@ pub fn store_mnemonic(mnemonic: &Mnemonic) -> Result<(), Box Result> { - let keys_dir = foc_localnet_keys(); + let keys_dir = foc_devnet_keys(); let mnemonic_file = keys_dir.join("mnemonic.txt"); let mnemonic_str = fs::read_to_string(mnemonic_file)?; let mnemonic = Mnemonic::parse_in_normalized(Language::English, &mnemonic_str)?; diff --git a/src/docker/build.rs b/src/docker/build.rs index 94edb6d..1cc47bf 100644 --- a/src/docker/build.rs +++ b/src/docker/build.rs @@ -115,9 +115,9 @@ pub fn build_yugabyte_image(name: &str) -> Result<(), Box /// Validate that YugabyteDB artifacts are available for building. fn validate_yugabyte_artifacts() -> Result<(), Box> { - use crate::paths::foc_localnet_artifacts; + use crate::paths::foc_devnet_artifacts; - let artifacts_dir = foc_localnet_artifacts(); + let artifacts_dir = foc_devnet_artifacts(); let yugabyte_dir = artifacts_dir.join("yugabyte"); if !yugabyte_dir.exists() { @@ -132,12 +132,12 @@ fn validate_yugabyte_artifacts() -> Result<(), Box> { /// Perform the actual YugabyteDB image build process. fn perform_yugabyte_build(name: &str, image_tag: &str) -> Result<(), Box> { - use crate::paths::foc_localnet_artifacts; + use crate::paths::foc_devnet_artifacts; let dockerfile_content = embedded_assets::get_dockerfile(name) .ok_or_else(|| format!("Embedded Dockerfile not found for: {}", name))?; - let artifacts_dir = foc_localnet_artifacts(); + let artifacts_dir = foc_devnet_artifacts(); print_yugabyte_build_info(image_tag, &artifacts_dir); @@ -233,14 +233,14 @@ fn finalize_build_progress( } } -/// Build and cache all required Docker images for foc-localnet. +/// Build and cache all required Docker images for foc-devnet. /// /// This function builds the following images from embedded Dockerfiles: -/// - foc-builder (Foundry tools) -/// - foc-lotus (Filecoin daemon) -/// - foc-lotus-miner (Filecoin miner) -/// - foc-yugabyte (Database) -/// - foc-curio (Second-generation miner) +/// - BUILDER_DOCKER_IMAGE (Foundry tools) +/// - LOTUS_DOCKER_IMAGE (Filecoin daemon) +/// - LOTUS_MINER_DOCKER_IMAGE (Filecoin miner) +/// - YUGABYTE_DOCKER_IMAGE (Database) +/// - CURIO_DOCKER_IMAGE (Second-generation miner) pub fn build_and_cache_docker_images() -> Result<(), Box> { info!("Building and caching Docker images..."); diff --git a/src/docker/core.rs b/src/docker/core.rs index 53d9443..382e29d 100644 --- a/src/docker/core.rs +++ b/src/docker/core.rs @@ -1,7 +1,7 @@ //! Core Docker utilities and abstractions. //! //! This module provides the fundamental Docker operations and shell command abstractions -//! used throughout foc-localnet. It consolidates the functionality from the old docker.rs +//! used throughout foc-devnet. It consolidates the functionality from the old docker.rs //! and shell.rs modules into a single, well-organized structure. use std::error::Error; @@ -22,11 +22,19 @@ pub fn run_command(program: &str, args: &[&str]) -> Result 200 { + format!("{}... (truncated)", &cmd_str[..200]) + } else { + cmd_str + }; + return Err(format!( - "Command failed: {} {} -> {}", - program, - args.join(" "), - stderr + "Command failed: {} {}\nSTDOUT:\n{}\nSTDERR:\n{}", + program, cmd_display, stdout, stderr ) .into()); } @@ -44,6 +52,18 @@ pub fn docker_command(args: &[&str]) -> Result> { run_command("docker", args) } +/// Get logs from a Docker container. +/// +/// # Arguments +/// * `container_name` - The name of the container to get logs from +/// +/// # Returns +/// The container logs on success. +pub fn get_container_logs(container_name: &str) -> Result> { + let output = docker_command(&["logs", container_name])?; + Ok(String::from_utf8_lossy(&output.stdout).to_string()) +} + /// Check if a port is available (not in use) pub fn is_port_available(port: u16) -> bool { TcpListener::bind(format!("127.0.0.1:{}", port)).is_ok() @@ -52,7 +72,7 @@ pub fn is_port_available(port: u16) -> bool { /// Check if a Docker image exists locally. /// /// # Arguments -/// * `image_name` - The image name to check (e.g., "foc-lotus") +/// * `image_name` - The image name to check (e.g., LOTUS_DOCKER_IMAGE) /// /// # Returns /// true if the image exists, false otherwise. diff --git a/src/docker/init.rs b/src/docker/init.rs index 0d1aea6..45c3e92 100644 --- a/src/docker/init.rs +++ b/src/docker/init.rs @@ -1,14 +1,14 @@ //! Docker initialization utilities. //! //! This module provides functions for initializing Docker volumes, -//! containers, and other setup tasks required for foc-localnet. +//! containers, and other setup tasks required for foc-devnet. use crate::docker::core::{ chown_command, container_exists, copy_from_container, create_container, get_current_gid, get_current_uid, image_exists, }; use crate::embedded_assets; -use crate::paths::foc_localnet_docker_volumes; +use crate::paths::foc_devnet_docker_volumes; use std::collections::HashMap; use std::fs; use std::path::{Path, PathBuf}; @@ -20,7 +20,7 @@ struct VolumesMap { /// Create volume directories for all Docker images. pub fn create_volume_directories_for_images() -> Result<(), Box> { - let volumes_base_dir = foc_localnet_docker_volumes(); + let volumes_base_dir = foc_devnet_docker_volumes(); let volume_map_names = ["builder", "curio", "lotus-miner", "lotus", "yugabyte"]; for image_name in volume_map_names { diff --git a/src/docker/logs.rs b/src/docker/logs.rs new file mode 100644 index 0000000..4d13baa --- /dev/null +++ b/src/docker/logs.rs @@ -0,0 +1,134 @@ +//! Docker log collection and cleanup utilities. +//! +//! This module provides functions to persist logs from all containers +//! whose images start with the prefix "foc" and to remove any dead +//! containers after a start attempt, regardless of success or failure. +//! +//! The log files are stored under the run-specific directory: +//! ~/.foc-devnet/run//logs/..docker.log + +use crate::docker::core::{docker_command, get_container_logs}; +use crate::paths::foc_devnet_run_dir; +use std::error::Error; +use std::fs; +use std::path::PathBuf; +use tracing::{info, warn}; + +/// Information about a Docker container for logging and cleanup. +#[derive(Debug, Clone)] +pub struct ContainerInfo { + pub name: String, + pub image: String, + pub status: String, +} + +/// List all containers (running or stopped) whose image name starts with the given prefix. +pub fn list_containers_by_image_prefix(prefix: &str) -> Result, Box> { + let output = docker_command(&["ps", "-a", "--format", "{{.Names}}|{{.Image}}|{{.Status}}"])?; + let stdout = String::from_utf8_lossy(&output.stdout); + + let mut result = Vec::new(); + for line in stdout.lines() { + let parts: Vec<&str> = line.split('|').collect(); + if parts.len() >= 3 { + let name = parts[0].trim().to_string(); + let image = parts[1].trim().to_string(); + let status = parts[2].trim().to_string(); + if image.starts_with(prefix) { + result.push(ContainerInfo { + name, + image, + status, + }); + } + } + } + Ok(result) +} + +/// Persist logs for all containers whose image starts with "foc" under the run logs directory. +pub fn persist_foc_container_logs(run_id: &str) -> Result<(), Box> { + let containers = list_containers_by_image_prefix("foc")?; + let logs_dir = foc_devnet_run_dir(run_id).join("logs"); + fs::create_dir_all(&logs_dir)?; + + info!( + "Persisting logs for {} foc* containers to {}", + containers.len(), + logs_dir.display() + ); + + for c in containers { + let safe_image = c.image.replace(':', "_"); + let file_path = logs_dir.join(format!("{}.{}.docker.log", c.name, safe_image)); + let content = match get_container_logs(&c.name) { + Ok(logs) => { + info!("✓ Captured logs for container '{}'", c.name); + logs + } + Err(e) => { + warn!("Failed to get logs for container '{}': {}", c.name, e); + format!("Failed to get logs for container '{}': {}\n", c.name, e) + } + }; + fs::write(&file_path, content)?; + } + info!("✓ All container logs persisted"); + Ok(()) +} + +/// Remove all containers whose image starts with "foc" and are not running. +pub fn remove_dead_foc_containers() -> Result<(), Box> { + let containers = list_containers_by_image_prefix("foc")?; + let mut removed_count = 0; + + for c in containers { + // Heuristic: remove if status contains "Exited" or "Dead" or "Created" + let status_lower = c.status.to_lowercase(); + let is_dead = status_lower.contains("exited") + || status_lower.contains("dead") + || status_lower.contains("created") + || status_lower.contains("removing") + || status_lower.contains("paused"); + if is_dead { + // Best-effort remove; ignore errors so cleanup continues + match docker_command(&["rm", &c.name]) { + Ok(_) => { + info!( + "✓ Removed dead container: {} (status: {})", + c.name, c.status + ); + removed_count += 1; + } + Err(e) => { + warn!("Failed to remove container '{}': {}", c.name, e); + } + } + } + } + info!("✓ Removed {} dead foc* containers", removed_count); + Ok(()) +} + +/// Write the output of `foc-devnet status` to the run's post-start status log file. +pub fn write_post_start_status_log(run_id: &str) -> Result> { + let run_dir = foc_devnet_run_dir(run_id); + fs::create_dir_all(&run_dir)?; + let status_file = run_dir.join("post_start_status.log"); + + info!("Writing post-start status to: {}", status_file.display()); + + let exe = std::env::current_exe()?; + let output = std::process::Command::new(exe).arg("status").output()?; + + let mut content = String::new(); + content.push_str(&String::from_utf8_lossy(&output.stdout)); + if !output.status.success() { + content.push_str("\n[status command failed]\n"); + content.push_str(&String::from_utf8_lossy(&output.stderr)); + } + + fs::write(&status_file, content)?; + info!("✓ Post-start status logged"); + Ok(status_file) +} diff --git a/src/docker/mod.rs b/src/docker/mod.rs index bf94881..900e88c 100644 --- a/src/docker/mod.rs +++ b/src/docker/mod.rs @@ -1,4 +1,4 @@ -//! Docker utilities and abstractions for foc-localnet. +//! Docker utilities and abstractions for foc-devnet. //! //! This module consolidates all Docker-related functionality into a single, //! well-organized structure. It replaces the old scattered docker.rs and shell.rs @@ -9,6 +9,7 @@ pub mod command_logger; pub mod containers; pub mod core; pub mod init; +pub mod logs; pub mod network; pub mod portainer; pub mod shell; @@ -34,6 +35,10 @@ pub use containers::{ portainer_container_name, yugabyte_container_name, }; pub use init::{create_volume_directories_for_images, set_volume_ownership}; +pub use logs::{ + list_containers_by_image_prefix, persist_foc_container_logs, remove_dead_foc_containers, + write_post_start_status_log, +}; pub use network::{ connect_container_to_network, create_all_networks, delete_all_networks, lotus_miner_network_name, lotus_network_name, pdp_miner_network_name, diff --git a/src/docker/portainer.rs b/src/docker/portainer.rs index 8ab9c11..cd02ce9 100644 --- a/src/docker/portainer.rs +++ b/src/docker/portainer.rs @@ -119,7 +119,7 @@ pub fn start_portainer(run_id: &str, port: u16) -> Result<(), Box> { pub fn stop_portainer(run_id: &str) -> Result<(), Box> { let container_name = portainer_container_name(run_id); - println!("{}", "Stopping Portainer..."); + println!("Stopping Portainer..."); if !container_exists(&container_name)? { println!(" Portainer container does not exist"); diff --git a/src/docker/shell.rs b/src/docker/shell.rs index f8c0a82..cb4e24c 100644 --- a/src/docker/shell.rs +++ b/src/docker/shell.rs @@ -1,34 +1,38 @@ //! High-level shell command abstractions. //! //! This module provides high-level abstractions for blockchain-related shell commands -//! like Lotus, Forge, Cast, and other tools used in foc-localnet. +//! like Lotus, Forge, Cast, and other tools used in foc-devnet. use crate::docker::core::{docker_command, exec_in_container}; use std::error::Error; use std::process::Output; -/// Execute a lotus command inside the foc-lotus container. +/// Execute a lotus command inside the LOTUS_CONTAINER container. pub fn lotus_command(args: &[&str]) -> Result> { - exec_in_container("foc-lotus", "/usr/local/bin/lotus-bins/lotus", args) + exec_in_container( + crate::constants::LOTUS_CONTAINER, + "/usr/local/bin/lotus-bins/lotus", + args, + ) } -/// Execute a lotus-miner command inside the foc-lotus-miner container. +/// Execute a lotus-miner command inside the LOTUS_MINER_CONTAINER container. pub fn lotus_miner_command(args: &[&str]) -> Result> { exec_in_container( - "foc-lotus-miner", + crate::constants::LOTUS_MINER_CONTAINER, "/usr/local/bin/lotus-bins/lotus-miner", args, ) } -/// Execute a forge command inside the foc-builder container. +/// Execute a forge command inside the BUILDER_CONTAINER container. pub fn forge_command(args: &[&str]) -> Result> { - exec_in_container("foc-builder", "forge", args) + exec_in_container(crate::constants::BUILDER_CONTAINER, "forge", args) } -/// Execute a cast command inside the foc-builder container. +/// Execute a cast command inside the BUILDER_CONTAINER container. pub fn cast_command(args: &[&str]) -> Result> { - exec_in_container("foc-builder", "cast", args) + exec_in_container(crate::constants::BUILDER_CONTAINER, "cast", args) } /// Execute a lotus wallet command. @@ -89,20 +93,21 @@ pub fn lotus_get_eth_address(f4_address: &str) -> Result> } /// Run a docker container with host networking. +/// Note: Does not use --rm flag to allow log inspection after container exits. pub fn docker_run_host_network(image: &str, args: &[&str]) -> Result> { - let mut full_args = vec!["run", "--rm", "--network", "host"]; + let mut full_args = vec!["run", "--network", "host"]; full_args.extend_from_slice(args); full_args.extend_from_slice(&["-i", image]); docker_command(&full_args) } -/// Run a docker container with volume mounts. +/// Note: Does not use --rm flag to allow log inspection after container exits. pub fn docker_run_with_volumes( image: &str, volumes: &[&str], args: &[&str], ) -> Result> { - let mut full_args = vec!["run", "--rm"]; + let mut full_args = vec!["run"]; for volume in volumes { full_args.push("-v"); full_args.push(volume); @@ -112,18 +117,22 @@ pub fn docker_run_with_volumes( docker_command(&full_args) } -/// Execute a bash command inside the foc-builder container. +/// Execute a bash command inside the BUILDER_CONTAINER container. pub fn foc_builder_bash_command(command: &str) -> Result> { - exec_in_container("foc-builder", "bash", &["-c", command]) + exec_in_container( + crate::constants::BUILDER_CONTAINER, + "bash", + &["-c", command], + ) } -/// Execute a forge build command in the foc-builder container. +/// Execute a forge build command in the BUILDER_CONTAINER container. pub fn forge_build_in_container(working_dir: &str) -> Result> { let command = format!("cd {} && forge build", working_dir); foc_builder_bash_command(&command) } -/// Execute a forge script command in the foc-builder container. +/// Execute a forge script command in the BUILDER_CONTAINER container. pub fn forge_script_deploy( script_path: &str, rpc_url: &str, diff --git a/src/embedded_assets.rs b/src/embedded_assets.rs index 33152ab..088ad14 100644 --- a/src/embedded_assets.rs +++ b/src/embedded_assets.rs @@ -1,4 +1,4 @@ -//! Embedded assets for foc-localnet. +//! Embedded assets for foc-devnet. //! //! This module contains all external files embedded into the binary //! using include_bytes! to make the binary self-contained. diff --git a/src/lib.rs b/src/lib.rs index 5a5fb99..1a603c4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,4 @@ -//! foc-localnet library crate +//! foc-devnet library crate //! //! This crate provides the core functionality for managing local Filecoin //! on-chain cloud clusters for testing purposes. @@ -16,4 +16,5 @@ pub mod paths; pub mod poison; pub mod port_allocator; pub mod run_id; +pub mod utils; pub mod version_info; diff --git a/src/logger.rs b/src/logger.rs index 6d3fdcf..095e99e 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -1,4 +1,4 @@ -use crate::paths::{foc_localnet_run_dir, foc_localnet_run_log_file, foc_localnet_state_latest}; +use crate::paths::{foc_devnet_run_dir, foc_devnet_run_log_file, foc_devnet_state_latest}; use std::fs; use std::os::unix::fs::symlink; use std::path::Path; @@ -12,10 +12,10 @@ use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, Env /// /// It also updates the `state/latest` symlink to point to the current run directory. pub fn init_logging(run_id: &str) -> Result<(), Box> { - let run_dir = foc_localnet_run_dir(run_id); + let run_dir = foc_devnet_run_dir(run_id); fs::create_dir_all(&run_dir)?; - let log_file_path = foc_localnet_run_log_file(run_id); + let log_file_path = foc_devnet_run_log_file(run_id); let log_file = fs::File::create(log_file_path)?; let file_layer = fmt::layer().with_ansi(false).with_writer(log_file); @@ -39,7 +39,7 @@ pub fn init_logging(run_id: &str) -> Result<(), Box> { } fn update_latest_symlink(run_dir: &Path) -> Result<(), Box> { - let latest = foc_localnet_state_latest(); + let latest = foc_devnet_state_latest(); // Remove existing symlink or directory if it exists if latest.exists() || latest.is_symlink() { diff --git a/src/main.rs b/src/main.rs index 3aed456..d5ffc5f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,13 +1,12 @@ -//! Main entry point for foc-localnet. +//! Main entry point for foc-devnet. //! //! This module provides the main application entry point with command routing. use clap::Parser; -use foc_localnet::cli::{Cli, Commands}; -use foc_localnet::logger::init_logging; -use foc_localnet::poison; -use foc_localnet::run_id::generate_run_id; -use tracing::error; +use foc_devnet::cli::{Cli, Commands}; +use foc_devnet::logger::init_logging; +use foc_devnet::poison; +use foc_devnet::run_id::generate_run_id; mod main_app; @@ -42,6 +41,7 @@ fn main() -> Result<(), Box> { proof_params_dir, force, rand, + no_docker_build, } => main_app::command_handlers::handle_init( curio, lotus, @@ -52,6 +52,7 @@ fn main() -> Result<(), Box> { proof_params_dir, force, rand, + no_docker_build, ), Commands::Build { build_command } => { main_app::command_handlers::handle_build(build_command) @@ -67,13 +68,6 @@ fn main() -> Result<(), Box> { poison::remove_poison()?; Ok(()) } - Err(e) => { - // Leave poison file in place on error - error!( - "Command failed, poison file left in place for safety: {}", - e - ); - Err(e) - } + Err(e) => Err(e), } } diff --git a/src/main_app/command_handlers.rs b/src/main_app/command_handlers.rs index d8a0b4a..95fdd6e 100644 --- a/src/main_app/command_handlers.rs +++ b/src/main_app/command_handlers.rs @@ -4,12 +4,13 @@ use std::fs; -use foc_localnet::cli::BuildCommands; -use foc_localnet::commands; -use foc_localnet::commands::build::Project; -use foc_localnet::config::Config; -use foc_localnet::paths::foc_localnet_config; -use foc_localnet::poison; +use foc_devnet::cli::BuildCommands; +use foc_devnet::commands; +use foc_devnet::commands::build::Project; +use foc_devnet::commands::init::InitOptions; +use foc_devnet::config::Config; +use foc_devnet::paths::foc_devnet_config; +use foc_devnet::poison; /// Execute the start command pub fn handle_start( @@ -30,6 +31,7 @@ pub fn handle_stop() -> Result<(), Box> { } /// Execute the init command +#[allow(clippy::too_many_arguments)] pub fn handle_init( curio: Option, lotus: Option, @@ -40,19 +42,21 @@ pub fn handle_init( proof_params_dir: Option, force: bool, rand: bool, + no_docker_build: bool, ) -> Result<(), Box> { poison::create_poison("Init")?; - commands::init_environment( - curio, - lotus, - filecoin_services, - synapse_sdk, + commands::init_environment(InitOptions { + curio_location: curio, + lotus_location: lotus, + filecoin_services_location: filecoin_services, + synapse_sdk_location: synapse_sdk, yugabyte_url, yugabyte_archive, proof_params_dir, force, - rand, - ) + use_random_mnemonic: rand, + no_docker_build, + }) } /// Execute the build command @@ -60,7 +64,7 @@ pub fn handle_build(build_command: BuildCommands) -> Result<(), Box Result<(), Box> { "-dirty" }; - info!("foc-localnet {}", version_info.version); + info!("foc-devnet {}", version_info.version); info!("Commit: {}{}", version_info.commit, dirty_suffix); info!("Branch: {}", version_info.branch); @@ -38,5 +39,37 @@ pub fn handle_version() -> Result<(), Box> { version_info.build_time_utc, relative_time ); info!("Built (Local): {}", version_info.build_time_local); + + // Print default configuration values + let default_config = Config::default(); + info!(""); + print_location_info("default:code:lotus", &default_config.lotus); + print_location_info("default:code:curio", &default_config.curio); + print_location_info( + "default:code:filecoin-services", + &default_config.filecoin_services, + ); + print_location_info("default:code:multicall3", &default_config.multicall3); + print_location_info("default:code:synapse-sdk", &default_config.synapse_sdk); + info!("default:yugabyte: {}", default_config.yugabyte_download_url); + Ok(()) } + +/// Print location information in a formatted way +fn print_location_info(label: &str, location: &Location) { + match location { + Location::LocalSource { dir } => { + info!("{}: local source at {}", label, dir); + } + Location::GitCommit { url, commit } => { + info!("{}: {}, commit {}", label, url, commit); + } + Location::GitTag { url, tag } => { + info!("{}: {}, tag {}", label, url, tag); + } + Location::GitBranch { url, branch } => { + info!("{}: {}, branch {}", label, url, branch); + } + } +} diff --git a/src/paths.rs b/src/paths.rs index e30376f..46f6a54 100644 --- a/src/paths.rs +++ b/src/paths.rs @@ -1,219 +1,219 @@ use std::path::PathBuf; -/// Returns the path to the foc-localnet home directory, e.g., ~/.foc-localnet -pub fn foc_localnet_home() -> PathBuf { +/// Returns the path to the foc-devnet home directory, e.g., ~/.foc-devnet +pub fn foc_devnet_home() -> PathBuf { dirs::home_dir() .unwrap_or_else(|| PathBuf::from("/tmp")) - .join(".foc-localnet") + .join(".foc-devnet") } -/// Returns the path to the foc-localnet logs directory, e.g., ~/.foc-localnet/logs -pub fn foc_localnet_logs() -> PathBuf { - foc_localnet_home().join("logs") +/// Returns the path to the foc-devnet logs directory, e.g., ~/.foc-devnet/logs +pub fn foc_devnet_logs() -> PathBuf { + foc_devnet_home().join("logs") } -/// Returns the path to the foc-localnet tmp directory, e.g., ~/.foc-localnet/tmp -pub fn foc_localnet_tmp() -> PathBuf { - foc_localnet_home().join("tmp") +/// Returns the path to the foc-devnet tmp directory, e.g., ~/.foc-devnet/tmp +pub fn foc_devnet_tmp() -> PathBuf { + foc_devnet_home().join("tmp") } -/// Returns the path to the foc-localnet runs directory, e.g., ~/.foc-localnet/run -pub fn foc_localnet_runs() -> PathBuf { - foc_localnet_home().join("run") +/// Returns the path to the foc-devnet runs directory, e.g., ~/.foc-devnet/run +pub fn foc_devnet_runs() -> PathBuf { + foc_devnet_home().join("run") } /// Returns the path to a specific run directory -/// e.g., ~/.foc-localnet/run/20231218_123456 -pub fn foc_localnet_run_dir(run_id: &str) -> PathBuf { - foc_localnet_runs().join(run_id) +/// e.g., ~/.foc-devnet/run/20231218_123456 +pub fn foc_devnet_run_dir(run_id: &str) -> PathBuf { + foc_devnet_runs().join(run_id) } /// Returns the path to the execution log for a specific run -pub fn foc_localnet_run_log_file(run_id: &str) -> PathBuf { - foc_localnet_run_dir(run_id).join("setup.log") +pub fn foc_devnet_run_log_file(run_id: &str) -> PathBuf { + foc_devnet_run_dir(run_id).join("setup.log") } /// Returns the path to the version file for a specific run -pub fn foc_localnet_run_version_file(run_id: &str) -> PathBuf { - foc_localnet_run_dir(run_id).join("version.txt") +pub fn foc_devnet_run_version_file(run_id: &str) -> PathBuf { + foc_devnet_run_dir(run_id).join("version.txt") } /// Returns the path to the contract addresses file for a specific run pub fn contract_addresses_file(run_id: &str) -> PathBuf { - foc_localnet_run_dir(run_id).join("contract_addresses.json") + foc_devnet_run_dir(run_id).join("contract_addresses.json") } -/// Returns the path to the foc-localnet bin directory, e.g., ~/.foc-localnet/bin -pub fn foc_localnet_bin() -> PathBuf { - foc_localnet_home().join("bin") +/// Returns the path to the foc-devnet bin directory, e.g., ~/.foc-devnet/bin +pub fn foc_devnet_bin() -> PathBuf { + foc_devnet_home().join("bin") } -/// Returns the path to the foc-localnet state directory, e.g., ~/.foc-localnet/state -pub fn foc_localnet_state() -> PathBuf { - foc_localnet_home().join("state") +/// Returns the path to the foc-devnet state directory, e.g., ~/.foc-devnet/state +pub fn foc_devnet_state() -> PathBuf { + foc_devnet_home().join("state") } -/// Returns the path to the latest run symlink, e.g., ~/.foc-localnet/state/latest -pub fn foc_localnet_state_latest() -> PathBuf { - foc_localnet_state().join("latest") +/// Returns the path to the latest run symlink, e.g., ~/.foc-devnet/state/latest +pub fn foc_devnet_state_latest() -> PathBuf { + foc_devnet_state().join("latest") } -/// Returns the path to the foc-localnet keys directory, e.g., ~/.foc-localnet/keys -pub fn foc_localnet_keys() -> PathBuf { - foc_localnet_home().join("keys") +/// Returns the path to the foc-devnet keys directory, e.g., ~/.foc-devnet/keys +pub fn foc_devnet_keys() -> PathBuf { + foc_devnet_home().join("keys") } -/// Returns the path to the poison file, e.g., ~/.foc-localnet/state/.poison +/// Returns the path to the poison file, e.g., ~/.foc-devnet/state/.poison pub fn poison_file() -> PathBuf { - foc_localnet_state().join(".poison") + foc_devnet_state().join(".poison") } /// Returns the path to the FOC metadata file for a specific run pub fn foc_metadata_file(run_id: &str) -> PathBuf { - foc_localnet_run_dir(run_id).join("foc_metadata.json") + foc_devnet_run_dir(run_id).join("foc_metadata.json") } /// Returns the path to the step context file for a specific run pub fn step_context_file(run_id: &str) -> PathBuf { - foc_localnet_run_dir(run_id).join("step_context.json") + foc_devnet_run_dir(run_id).join("step_context.json") } /// Returns the path to the PDP_SP_X provider ID file for a specific run pub fn pdp_sp_provider_id_file(run_id: &str, sp_idx: usize) -> PathBuf { - foc_localnet_run_dir(run_id) + foc_devnet_run_dir(run_id) .join("pdp_sps") .join(format!("{}.provider_id.json", sp_idx)) } -/// Returns the path to the foc-localnet remote pulls directory, e.g., ~/.foc-localnet/remote-pulls -pub fn foc_localnet_code() -> PathBuf { - foc_localnet_home().join("code") +/// Returns the path to the foc-devnet remote pulls directory, e.g., ~/.foc-devnet/remote-pulls +pub fn foc_devnet_code() -> PathBuf { + foc_devnet_home().join("code") } /// Returns the path to the "lotus" repository -pub fn foc_localnet_lotus_repo() -> PathBuf { - foc_localnet_code().join("lotus") +pub fn foc_devnet_lotus_repo() -> PathBuf { + foc_devnet_code().join("lotus") } /// Returns the path to the "curio" repository -pub fn foc_localnet_curio_repo() -> PathBuf { - foc_localnet_code().join("curio") +pub fn foc_devnet_curio_repo() -> PathBuf { + foc_devnet_code().join("curio") } /// Returns the path to the "filecoin-services" repository -pub fn foc_localnet_filecoin_services_repo() -> PathBuf { - foc_localnet_code().join("filecoin-services") +pub fn foc_devnet_filecoin_services_repo() -> PathBuf { + foc_devnet_code().join("filecoin-services") } /// Returns the path to the "multicall3" repository -pub fn foc_localnet_multicall3_repo() -> PathBuf { - foc_localnet_code().join("multicall3") +pub fn foc_devnet_multicall3_repo() -> PathBuf { + foc_devnet_code().join("multicall3") } /// Returns the path to the "synapse-sdk" repository -pub fn foc_localnet_synapse_sdk_repo() -> PathBuf { - foc_localnet_code().join("synapse-sdk") +pub fn foc_devnet_synapse_sdk_repo() -> PathBuf { + foc_devnet_code().join("synapse-sdk") } -/// Returns the path to the foc-localnet artifacts directory, e.g., ~/.foc-localnet/artifacts -pub fn foc_localnet_artifacts() -> PathBuf { - foc_localnet_home().join("artifacts") +/// Returns the path to the foc-devnet artifacts directory, e.g., ~/.foc-devnet/artifacts +pub fn foc_devnet_artifacts() -> PathBuf { + foc_devnet_home().join("artifacts") } -/// Returns the path where docker volumes are stored, e.g., ~/.foc-localnet/docker/volumes -pub fn foc_localnet_docker_volumes() -> PathBuf { - foc_localnet_home().join("docker").join("volumes") +/// Returns the path where docker volumes are stored, e.g., ~/.foc-devnet/docker/volumes +pub fn foc_devnet_docker_volumes() -> PathBuf { + foc_devnet_home().join("docker").join("volumes") } -/// Returns the path to the cache volumes directory, e.g., ~/.foc-localnet/docker/volumes/cache -pub fn foc_localnet_docker_volumes_cache() -> PathBuf { - foc_localnet_docker_volumes().join("cache") +/// Returns the path to the cache volumes directory, e.g., ~/.foc-devnet/docker/volumes/cache +pub fn foc_devnet_docker_volumes_cache() -> PathBuf { + foc_devnet_docker_volumes().join("cache") } -/// Returns the path to the run-specific volumes directory, e.g., ~/.foc-localnet/docker/volumes/run-specific -pub fn foc_localnet_docker_volumes_run_specific_root() -> PathBuf { - foc_localnet_docker_volumes().join("run-specific") +/// Returns the path to the run-specific volumes directory, e.g., ~/.foc-devnet/docker/volumes/run-specific +pub fn foc_devnet_docker_volumes_run_specific_root() -> PathBuf { + foc_devnet_docker_volumes().join("run-specific") } -/// Returns the path to a specific run's volumes directory, e.g., ~/.foc-localnet/docker/volumes/run-specific/ -pub fn foc_localnet_docker_volumes_run_specific(run_id: &str) -> PathBuf { - foc_localnet_docker_volumes_run_specific_root().join(run_id) +/// Returns the path to a specific run's volumes directory, e.g., ~/.foc-devnet/docker/volumes/run-specific/ +pub fn foc_devnet_docker_volumes_run_specific(run_id: &str) -> PathBuf { + foc_devnet_docker_volumes_run_specific_root().join(run_id) } -/// Returns the path to the foc-localnet configuration, e.g., ~/.foc-localnet/config.toml -pub fn foc_localnet_config() -> PathBuf { - foc_localnet_home().join("config.toml") +/// Returns the path to the foc-devnet configuration, e.g., ~/.foc-devnet/config.toml +pub fn foc_devnet_config() -> PathBuf { + foc_devnet_home().join("config.toml") } /// Returns the path to the Filecoin proof parameters directory -/// e.g., ~/.foc-localnet/docker/volumes/cache/filecoin-proof-parameters -pub fn foc_localnet_proof_parameters() -> PathBuf { - foc_localnet_docker_volumes_cache().join("filecoin-proof-parameters") +/// e.g., ~/.foc-devnet/docker/volumes/cache/filecoin-proof-parameters +pub fn foc_devnet_proof_parameters() -> PathBuf { + foc_devnet_docker_volumes_cache().join("filecoin-proof-parameters") } /// Returns the path to store BLS keys for lotus -/// e.g., ~/.foc-localnet/docker/volumes/run-specific//lotus-keys -pub fn foc_localnet_lotus_keys(run_id: &str) -> PathBuf { - foc_localnet_docker_volumes_run_specific(run_id).join("lotus-keys") +/// e.g., ~/.foc-devnet/docker/volumes/run-specific//lotus-keys +pub fn foc_devnet_lotus_keys(run_id: &str) -> PathBuf { + foc_devnet_docker_volumes_run_specific(run_id).join("lotus-keys") } /// Returns the path to the pre-sealed sectors for genesis -/// e.g., ~/.foc-localnet/docker/volumes/run-specific//genesis-sectors -pub fn foc_localnet_genesis_sectors(run_id: &str) -> PathBuf { - foc_localnet_docker_volumes_run_specific(run_id).join("genesis-sectors") +/// e.g., ~/.foc-devnet/docker/volumes/run-specific//genesis-sectors +pub fn foc_devnet_genesis_sectors(run_id: &str) -> PathBuf { + foc_devnet_docker_volumes_run_specific(run_id).join("genesis-sectors") } /// Returns the path to the pre-sealed sectors for miner 1 (t01000) -/// e.g., ~/.foc-localnet/docker/volumes/run-specific//genesis-sectors/lotus-miner -pub fn foc_localnet_genesis_sectors_lotus_miner(run_id: &str) -> PathBuf { - foc_localnet_genesis_sectors(run_id).join("lotus-miner") +/// e.g., ~/.foc-devnet/docker/volumes/run-specific//genesis-sectors/lotus-miner +pub fn foc_devnet_genesis_sectors_lotus_miner(run_id: &str) -> PathBuf { + foc_devnet_genesis_sectors(run_id).join("lotus-miner") } /// Returns the path to the pre-sealed sectors for a PDP SP miner (base-1 indexed) /// /// PDP SP 1 = t01001, PDP SP 2 = t01002, etc. -/// e.g., ~/.foc-localnet/docker/volumes/run-specific//genesis-sectors/pdp-sp-1 -pub fn foc_localnet_genesis_sectors_pdp_sp(run_id: &str, sp_index: usize) -> PathBuf { - foc_localnet_genesis_sectors(run_id).join(format!("pdp-sp-{}", sp_index)) +/// e.g., ~/.foc-devnet/docker/volumes/run-specific//genesis-sectors/pdp-sp-1 +pub fn foc_devnet_genesis_sectors_pdp_sp(run_id: &str, sp_index: usize) -> PathBuf { + foc_devnet_genesis_sectors(run_id).join(format!("pdp-sp-{}", sp_index)) } /// **DEPRECATED:** No longer used. Curio miners are now PDP Service Providers. /// /// This path function remains for backward compatibility during cleanup operations. -/// e.g., ~/.foc-localnet/docker/volumes/run-specific//genesis-sectors/curio-miner +/// e.g., ~/.foc-devnet/docker/volumes/run-specific//genesis-sectors/curio-miner #[allow(dead_code)] -pub fn foc_localnet_genesis_sectors_curio_miner(run_id: &str) -> PathBuf { - foc_localnet_genesis_sectors(run_id).join("curio-miner") +pub fn foc_devnet_genesis_sectors_curio_miner(run_id: &str) -> PathBuf { + foc_devnet_genesis_sectors(run_id).join("curio-miner") } /// Returns the path to the genesis template -/// e.g., ~/.foc-localnet/docker/volumes/run-specific//genesis -pub fn foc_localnet_genesis(run_id: &str) -> PathBuf { - foc_localnet_docker_volumes_run_specific(run_id).join("genesis") +/// e.g., ~/.foc-devnet/docker/volumes/run-specific//genesis +pub fn foc_devnet_genesis(run_id: &str) -> PathBuf { + foc_devnet_docker_volumes_run_specific(run_id).join("genesis") } /// Returns the path to the curio volumes directory -/// e.g., ~/.foc-localnet/docker/volumes/run-specific//curio -pub fn foc_localnet_curio_volumes(run_id: &str) -> PathBuf { - foc_localnet_docker_volumes_run_specific(run_id).join("curio") +/// e.g., ~/.foc-devnet/docker/volumes/run-specific//curio +pub fn foc_devnet_curio_volumes(run_id: &str) -> PathBuf { + foc_devnet_docker_volumes_run_specific(run_id).join("curio") } /// Returns the path to a specific curio SP volume directory (base-1 indexed) -/// e.g., ~/.foc-localnet/docker/volumes/run-specific//curio/1 -pub fn foc_localnet_curio_sp_volume(run_id: &str, sp_index: usize) -> PathBuf { - foc_localnet_curio_volumes(run_id).join(sp_index.to_string()) +/// e.g., ~/.foc-devnet/docker/volumes/run-specific//curio/1 +pub fn foc_devnet_curio_sp_volume(run_id: &str, sp_index: usize) -> PathBuf { + foc_devnet_curio_volumes(run_id).join(sp_index.to_string()) } /// Returns the path to the yugabyte volumes directory -/// e.g., ~/.foc-localnet/docker/volumes/run-specific//yugabyte -pub fn foc_localnet_yugabyte_volumes(run_id: &str) -> PathBuf { - foc_localnet_docker_volumes_run_specific(run_id).join("yugabyte") +/// e.g., ~/.foc-devnet/docker/volumes/run-specific//yugabyte +pub fn foc_devnet_yugabyte_volumes(run_id: &str) -> PathBuf { + foc_devnet_docker_volumes_run_specific(run_id).join("yugabyte") } /// Returns the path to a specific yugabyte instance volume directory (base-1 indexed) -/// e.g., ~/.foc-localnet/docker/volumes/run-specific//yugabyte/1 -pub fn foc_localnet_yugabyte_sp_volume(run_id: &str, sp_index: usize) -> PathBuf { - foc_localnet_yugabyte_volumes(run_id).join(sp_index.to_string()) +/// e.g., ~/.foc-devnet/docker/volumes/run-specific//yugabyte/1 +pub fn foc_devnet_yugabyte_sp_volume(run_id: &str, sp_index: usize) -> PathBuf { + foc_devnet_yugabyte_volumes(run_id).join(sp_index.to_string()) } /// Returns the path to the project root directory diff --git a/src/poison.rs b/src/poison.rs index a6f5d32..2685eec 100644 --- a/src/poison.rs +++ b/src/poison.rs @@ -2,11 +2,11 @@ use std::fs; use std::path::PathBuf; use tracing::{info, warn}; -use crate::paths::foc_localnet_state; +use crate::paths::foc_devnet_state; /// Get the path to the poison file fn poison_file_path() -> Result> { - let state_dir = foc_localnet_state(); + let state_dir = foc_devnet_state(); // Ensure state directory exists fs::create_dir_all(&state_dir)?; @@ -56,10 +56,16 @@ fn display_poison_contents(poison_path: &PathBuf) -> Result<(), Box Result<(), Box> { - // TODO: Implement actual recovery logic when more details are available warn!("Recovery logic not yet implemented. Please check system state manually."); warn!("You may need to manually clean up any running containers or inconsistent state."); + warn!("Run 'foc-devnet stop' to clean up containers, then try again."); Ok(()) } diff --git a/src/port_allocator.rs b/src/port_allocator.rs index 4f22ad7..96822fd 100644 --- a/src/port_allocator.rs +++ b/src/port_allocator.rs @@ -1,7 +1,7 @@ //! Port allocation module for managing dynamic port assignment. //! //! This module provides a `PortAllocator` that manages a contiguous range of ports -//! for the localnet cluster. All components (Lotus, Lotus-Miner, Curio, Yugabyte) +//! for the devnet cluster. All components (Lotus, Lotus-Miner, Curio, Yugabyte) //! dynamically allocate ports from this pool, ensuring no conflicts. use std::collections::HashSet; @@ -178,7 +178,7 @@ impl PortAllocator { "The following {} port(s) in the configured range are already in use: {}\n\ Please either:\n\ 1. Stop the processes using these ports, or\n\ - 2. Configure a different port range in ~/.foc-localnet/config.toml", + 2. Configure a different port range in ~/.foc-devnet/config.toml", unavailable_ports.len(), format_port_list(&unavailable_ports) ) diff --git a/src/run_id/mod.rs b/src/run_id/mod.rs index 1c63cc5..6109ffe 100644 --- a/src/run_id/mod.rs +++ b/src/run_id/mod.rs @@ -57,8 +57,8 @@ pub fn generate_run_id() -> String { /// Create a symlink to the latest run directory. pub fn create_latest_symlink(run_id: &str) -> Result<(), Box> { - let latest_link = crate::paths::foc_localnet_state_latest(); - let run_dir = crate::paths::foc_localnet_run_dir(run_id); + let latest_link = crate::paths::foc_devnet_state_latest(); + let run_dir = crate::paths::foc_devnet_run_dir(run_id); // Remove existing symlink if it exists if latest_link.exists() || latest_link.is_symlink() { diff --git a/src/run_id/persistence.rs b/src/run_id/persistence.rs index ae77a82..95e71f0 100644 --- a/src/run_id/persistence.rs +++ b/src/run_id/persistence.rs @@ -1,6 +1,6 @@ //! Run ID persistence for saving and loading the current run ID. //! -//! This module handles saving the current run ID to ~/.foc-localnet/state/current_runid.json +//! This module handles saving the current run ID to ~/.foc-devnet/state/current_runid.json //! and loading it when needed for stop/status commands. use serde::{Deserialize, Serialize}; @@ -19,7 +19,7 @@ pub struct RunIdMetadata { /// Get the path to the current run ID file fn current_run_id_file() -> PathBuf { - crate::paths::foc_localnet_state().join("current_runid.json") + crate::paths::foc_devnet_state().join("current_runid.json") } /// Save the current run ID to persistent storage. @@ -35,7 +35,7 @@ fn current_run_id_file() -> PathBuf { /// save_current_run_id("251203-1246-thirsty-wolf")?; /// ``` pub fn save_current_run_id(run_id: &str) -> Result<(), Box> { - let state_dir = crate::paths::foc_localnet_state(); + let state_dir = crate::paths::foc_devnet_state(); fs::create_dir_all(&state_dir)?; let metadata = RunIdMetadata { @@ -65,7 +65,7 @@ pub fn load_current_run_id() -> Result> { if !file_path.exists() { return Err( - "No current run ID found. Start a cluster first with 'foc-localnet start'".into(), + "No current run ID found. Start a cluster first with 'foc-devnet start'".into(), ); } diff --git a/src/utils/mod.rs b/src/utils/mod.rs new file mode 100644 index 0000000..041a794 --- /dev/null +++ b/src/utils/mod.rs @@ -0,0 +1,4 @@ +//! Utility modules + +pub mod retry; +pub mod system_info; diff --git a/src/utils/retry.rs b/src/utils/retry.rs new file mode 100644 index 0000000..4749fd0 --- /dev/null +++ b/src/utils/retry.rs @@ -0,0 +1,103 @@ +//! Retry utilities for validation operations. +//! +//! This module provides retry logic for validation and verification steps. +//! NEVER use these for transactions or deployments - only for read-only checks. + +use std::error::Error; +use std::thread; +use std::time::Duration; +use tracing::warn; + +/// Default maximum number of retry attempts for validation operations +pub const DEFAULT_MAX_RETRIES: u32 = 6; + +/// Default delay between retry attempts in seconds +pub const DEFAULT_RETRY_DELAY_SECS: u64 = 4; + +/// Retry a validation operation with exponential backoff. +/// +/// This function will attempt the provided operation up to `max_retries` times, +/// with a gentle exponential backoff between attempts (delay grows as: base + attempt). +/// +/// # Arguments +/// * `operation` - Closure that performs the validation operation +/// * `max_retries` - Maximum number of retry attempts +/// * `base_delay_secs` - Base delay in seconds (will be added to attempt number) +/// * `operation_name` - Name of the operation for logging +/// +/// # Returns +/// Result from the operation if successful, or the last error if all retries fail +pub fn retry_with_backoff( + mut operation: F, + max_retries: u32, + base_delay_secs: u64, + operation_name: &str, +) -> Result> +where + F: FnMut() -> Result>, +{ + let mut last_error = None; + + for attempt in 1..=max_retries { + match operation() { + Ok(result) => return Ok(result), + Err(e) => { + last_error = Some(e); + if attempt < max_retries { + // Gentle backoff: base + attempt (e.g., 2+1=3, 2+2=4, 2+3=5...) + let delay = base_delay_secs + attempt as u64; + warn!( + "{} failed (attempt {}/{}), retrying in {} seconds...", + operation_name, attempt, max_retries, delay + ); + thread::sleep(Duration::from_secs(delay)); + } + } + } + } + + Err(last_error.unwrap_or_else(|| "Operation failed with no error".into())) +} + +/// Retry a validation operation with fixed delay. +/// +/// This function will attempt the provided operation up to `max_retries` times, +/// with a fixed delay between attempts. +/// +/// # Arguments +/// * `operation` - Closure that performs the validation operation +/// * `max_retries` - Maximum number of retry attempts +/// * `delay_secs` - Fixed delay in seconds between retries +/// * `operation_name` - Name of the operation for logging +/// +/// # Returns +/// Result from the operation if successful, or the last error if all retries fail +pub fn retry_with_fixed_delay( + mut operation: F, + max_retries: u32, + delay_secs: u64, + operation_name: &str, +) -> Result> +where + F: FnMut() -> Result>, +{ + let mut last_error = None; + + for attempt in 1..=max_retries { + match operation() { + Ok(result) => return Ok(result), + Err(e) => { + last_error = Some(e); + if attempt < max_retries { + warn!( + "{} failed (attempt {}/{}), retrying in {} seconds...", + operation_name, attempt, max_retries, delay_secs + ); + thread::sleep(Duration::from_secs(delay_secs)); + } + } + } + } + + Err(last_error.unwrap_or_else(|| "Operation failed with no error".into())) +} diff --git a/src/utils/system_info.rs b/src/utils/system_info.rs new file mode 100644 index 0000000..d0b9005 --- /dev/null +++ b/src/utils/system_info.rs @@ -0,0 +1,190 @@ +//! System information utilities. +//! +//! This module provides functions to gather and display system information +//! such as CPU details, core count, and available memory. + +use tracing::info; + +/// Log system information including CPU, cores, threads, and memory. +pub fn log_system_info() { + info!("=== System Information ==="); + + // CPU information + if let Some(cpu_info) = get_cpu_info() { + info!("CPU: {}", cpu_info); + } + + // Core count + let num_cores = num_cpus::get_physical(); + info!("Physical CPU cores: {}", num_cores); + + // Thread count + let num_threads = num_cpus::get(); + info!("Logical CPU threads: {}", num_threads); + + // Memory information + if let Some(total_memory) = get_total_memory() { + info!("Total RAM: {}", format_bytes(total_memory)); + + if let Some(available_memory) = get_available_memory() { + info!("Available RAM: {}", format_bytes(available_memory)); + } + } + + info!("=========================="); +} + +/// Get CPU model information from /proc/cpuinfo (Linux only). +#[cfg(target_os = "linux")] +fn get_cpu_info() -> Option { + use std::fs; + + let contents = fs::read_to_string("/proc/cpuinfo").ok()?; + + for line in contents.lines() { + if line.starts_with("model name") { + if let Some(cpu_name) = line.split(':').nth(1) { + return Some(cpu_name.trim().to_string()); + } + } + } + + None +} + +/// Get CPU model information (macOS). +#[cfg(target_os = "macos")] +fn get_cpu_info() -> Option { + use std::process::Command; + + let output = Command::new("sysctl") + .arg("-n") + .arg("machdep.cpu.brand_string") + .output() + .ok()?; + + String::from_utf8(output.stdout) + .ok() + .map(|s| s.trim().to_string()) +} + +/// Get CPU model information (other platforms). +#[cfg(not(any(target_os = "linux", target_os = "macos")))] +fn get_cpu_info() -> Option { + None +} + +/// Get total system memory in bytes. +#[cfg(target_os = "linux")] +fn get_total_memory() -> Option { + use std::fs; + + let contents = fs::read_to_string("/proc/meminfo").ok()?; + + for line in contents.lines() { + if line.starts_with("MemTotal:") { + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() >= 2 { + let kb = parts[1].parse::().ok()?; + return Some(kb * 1024); // Convert KB to bytes + } + } + } + + None +} + +/// Get available system memory in bytes. +#[cfg(target_os = "linux")] +fn get_available_memory() -> Option { + use std::fs; + + let contents = fs::read_to_string("/proc/meminfo").ok()?; + + for line in contents.lines() { + if line.starts_with("MemAvailable:") { + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() >= 2 { + let kb = parts[1].parse::().ok()?; + return Some(kb * 1024); // Convert KB to bytes + } + } + } + + None +} + +/// Get total system memory (macOS). +#[cfg(target_os = "macos")] +fn get_total_memory() -> Option { + use std::process::Command; + + let output = Command::new("sysctl") + .arg("-n") + .arg("hw.memsize") + .output() + .ok()?; + + String::from_utf8(output.stdout) + .ok() + .and_then(|s| s.trim().parse::().ok()) +} + +/// Get available system memory (macOS). +#[cfg(target_os = "macos")] +fn get_available_memory() -> Option { + // On macOS, we can use vm_stat to get free memory + // This is an approximation as macOS memory management is complex + use std::process::Command; + + let output = Command::new("vm_stat").output().ok()?; + let output_str = String::from_utf8(output.stdout).ok()?; + + // Parse page size and free pages + let mut page_size = 4096u64; // Default page size + let mut free_pages = 0u64; + + for line in output_str.lines() { + if line.contains("page size of") { + if let Some(size_str) = line.split("page size of").nth(1) { + if let Some(size) = size_str.split_whitespace().next() { + page_size = size.parse().unwrap_or(4096); + } + } + } else if line.starts_with("Pages free:") { + if let Some(pages) = line.split(':').nth(1) { + free_pages = pages.trim().trim_end_matches('.').parse().unwrap_or(0); + } + } + } + + Some(free_pages * page_size) +} + +/// Get total/available memory (other platforms). +#[cfg(not(any(target_os = "linux", target_os = "macos")))] +fn get_total_memory() -> Option { + None +} + +#[cfg(not(any(target_os = "linux", target_os = "macos")))] +fn get_available_memory() -> Option { + None +} + +/// Format bytes into a human-readable string. +fn format_bytes(bytes: u64) -> String { + const KB: u64 = 1024; + const MB: u64 = KB * 1024; + const GB: u64 = MB * 1024; + + if bytes >= GB { + format!("{:.2} GB", bytes as f64 / GB as f64) + } else if bytes >= MB { + format!("{:.2} MB", bytes as f64 / MB as f64) + } else if bytes >= KB { + format!("{:.2} KB", bytes as f64 / KB as f64) + } else { + format!("{} bytes", bytes) + } +} diff --git a/src/version_info.rs b/src/version_info.rs index 6f5a111..d127a52 100644 --- a/src/version_info.rs +++ b/src/version_info.rs @@ -37,7 +37,7 @@ impl VersionInfo { /// Write version information to a version.txt file. /// /// Creates a version.txt file in the specified directory containing: -/// - foc-localnet version +/// - foc-devnet version /// - Git commit hash (with dirty indicator if uncommitted changes exist) /// - Git branch /// - Build timestamps @@ -66,7 +66,7 @@ pub fn write_version_file(dir: &Path, version_info: &VersionInfo) -> Result<(), }; let content = format!( - "foc-localnet {}\n\ + "foc-devnet {}\n\ Commit: {}{}\n\ Branch: {}\n\ Built (UTC): {} {}\n\ diff --git a/tests/build_curio_test.rs b/tests/build_curio_test.rs deleted file mode 100644 index 3e12cdb..0000000 --- a/tests/build_curio_test.rs +++ /dev/null @@ -1,227 +0,0 @@ -use std::fs; -use std::process::Command; -use tempfile; - -/// Test that the Curio build command help works -#[test] -fn test_curio_build_help() { - // Build the foc-localnet binary - let status = Command::new("cargo") - .args(["build"]) - .status() - .expect("Failed to build foc-localnet"); - - assert!(status.success(), "Failed to build foc-localnet binary"); - - // Test that the build command help works - let help_output = Command::new("./target/debug/foc-localnet") - .args(["build", "curio", "--help"]) - .output() - .expect("Failed to run build curio help"); - - assert!( - help_output.status.success(), - "Build curio help command failed" - ); - let help_text = String::from_utf8_lossy(&help_output.stdout); - assert!( - help_text.contains("Curio"), - "Help text doesn't mention Curio" - ); - assert!( - help_text.contains("Path to the Curio source directory"), - "Help text doesn't mention path option" - ); -} - -/// Test that the Curio build command handles invalid paths correctly -#[test] -fn test_curio_build_invalid_path() { - // Build the foc-localnet binary - let status = Command::new("cargo") - .args(["build"]) - .status() - .expect("Failed to build foc-localnet"); - - assert!(status.success(), "Failed to build foc-localnet binary"); - - // Test that invalid path fails gracefully - let invalid_output = Command::new("./target/debug/foc-localnet") - .args(["build", "curio", "/nonexistent/path"]) - .output() - .expect("Failed to run build with invalid path"); - - assert!( - !invalid_output.status.success(), - "Expected command to fail with nonexistent path" - ); - let error_text = String::from_utf8_lossy(&invalid_output.stderr); - assert!( - error_text.contains("does not exist"), - "Error message should mention path doesn't exist" - ); -} - -/// Test that the Curio build command accepts valid paths and starts the build process -#[test] -fn test_curio_build_valid_path() { - // Build the foc-localnet binary - let status = Command::new("cargo") - .args(["build"]) - .status() - .expect("Failed to build foc-localnet"); - - assert!(status.success(), "Failed to build foc-localnet binary"); - - // Create a temporary directory and clone a shallow copy of Curio - let temp_dir = tempfile::tempdir().expect("Failed to create temp directory"); - let curio_dir = temp_dir.path().join("curio"); - - // Clone a shallow copy of Curio for testing - let clone_status = Command::new("git") - .args([ - "clone", - "--depth", - "1", - "https://github.com/filecoin-project/curio.git", - ]) - .arg(curio_dir.to_str().unwrap()) - .status() - .expect("Failed to clone Curio repository"); - - assert!(clone_status.success(), "Failed to clone Curio repository"); - - // Create output directory - let output_dir = temp_dir.path().join("output"); - std::fs::create_dir(&output_dir).expect("Failed to create output directory"); - - // Test that the build command accepts the valid path and starts (but may timeout) - let build_command = Command::new("./target/debug/foc-localnet") - .args(["build", "curio"]) - .arg(curio_dir.to_str().unwrap()) - .args(["--output-dir", output_dir.to_str().unwrap()]) - .stdout(std::process::Stdio::piped()) - .stderr(std::process::Stdio::piped()) - .spawn(); - - match build_command { - Ok(mut child) => { - // Let it run for a reasonable time to allow the build to complete - std::thread::sleep(std::time::Duration::from_secs(30)); - - // Check if the process completed successfully - match child.try_wait() { - Ok(Some(status)) => { - if status.success() { - println!("{}", "Build process completed successfully"); - } else { - println!("Build process completed with status: {}", status); - } - } - Ok(None) => { - println!("{}", "Build process is still running, killing it"); - // Kill the process since we don't want it to run indefinitely - let _ = child.kill(); - } - Err(e) => { - panic!("Failed to check build process status: {}", e); - } - } - } - Err(e) => { - panic!("Failed to start build command with valid path: {}", e); - } - } - - // Check that the expected binary was created - let curio_binary = output_dir.join("curio"); - - // List contents of output directory for debugging - println!("{}", "Contents of output directory:"); - if let Ok(entries) = fs::read_dir(&output_dir) { - for entry in entries { - if let Ok(entry) = entry { - println!("{}", entry.path().display()); - } - } - } else { - println!("{}", "Could not read output directory"); - } - - // The build should create the expected binary - assert!( - curio_binary.exists(), - "Curio binary should be created in output directory" - ); - - // Verify it's executable - let curio_metadata = curio_binary - .metadata() - .expect("Failed to get curio binary metadata"); - assert!( - !curio_metadata.permissions().readonly(), - "Curio binary should be executable" - ); - - println!( - "{}", - "Curio binary was created successfully and is executable" - ); - - // At minimum, verify that the foc-localnet-builder Docker image was created - // (this happens during the build process) - let images_output = Command::new("docker") - .args([ - "images", - "foc-localnet-builder", - "--format", - "{{.Repository}}:{{.Tag}}", - ]) - .output() - .expect("Failed to check Docker images"); - - let images_text = String::from_utf8_lossy(&images_output.stdout); - // The image should exist since it's created early in the build process - assert!( - images_text.contains("foc-localnet-builder:latest"), - "Docker builder image should have been created during build process" - ); - println!("Docker images check: {}", images_text); -} - -/// Test Docker image building for Curio builds -#[test] -fn test_docker_image_building() { - // Test Docker image building (this is the core functionality we want to test) - let build_status = Command::new("docker") - .args(["build", "-t", "foc-localnet-builder-test", "./docker"]) - .status() - .expect("Failed to build Docker image"); - - assert!( - build_status.success(), - "Failed to build Docker builder image" - ); - - // Verify the image was created - let images_output = Command::new("docker") - .args([ - "images", - "foc-localnet-builder-test", - "--format", - "{{.Repository}}:{{.Tag}}", - ]) - .output() - .expect("Failed to check Docker images"); - - let images_text = String::from_utf8_lossy(&images_output.stdout); - assert!( - images_text.contains("foc-localnet-builder-test:latest"), - "Docker builder image was not created properly" - ); - - // Clean up: remove the test Docker image - let _ = Command::new("docker") - .args(["rmi", "foc-localnet-builder-test:latest"]) - .status(); -} diff --git a/tests/build_lotus_test.rs b/tests/build_lotus_test.rs deleted file mode 100644 index 8b4e76e..0000000 --- a/tests/build_lotus_test.rs +++ /dev/null @@ -1,240 +0,0 @@ -use std::fs; -use std::process::Command; -use tempfile; - -/// Test that the Lotus build command help works -#[test] -fn test_lotus_build_help() { - // Build the foc-localnet binary - let status = Command::new("cargo") - .args(["build"]) - .status() - .expect("Failed to build foc-localnet"); - - assert!(status.success(), "Failed to build foc-localnet binary"); - - // Test that the build command help works - let help_output = Command::new("./target/debug/foc-localnet") - .args(["build", "lotus", "--help"]) - .output() - .expect("Failed to run build lotus help"); - - assert!( - help_output.status.success(), - "Build lotus help command failed" - ); - let help_text = String::from_utf8_lossy(&help_output.stdout); - assert!( - help_text.contains("Lotus"), - "Help text doesn't mention Lotus" - ); - assert!( - help_text.contains("Path to the Lotus source directory"), - "Help text doesn't mention path option" - ); -} - -/// Test that the Lotus build command handles invalid paths correctly -#[test] -fn test_lotus_build_invalid_path() { - // Build the foc-localnet binary - let status = Command::new("cargo") - .args(["build"]) - .status() - .expect("Failed to build foc-localnet"); - - assert!(status.success(), "Failed to build foc-localnet binary"); - - // Test that invalid path fails gracefully - let invalid_output = Command::new("./target/debug/foc-localnet") - .args(["build", "lotus", "/nonexistent/path"]) - .output() - .expect("Failed to run build with invalid path"); - - assert!( - !invalid_output.status.success(), - "Expected command to fail with nonexistent path" - ); - let error_text = String::from_utf8_lossy(&invalid_output.stderr); - assert!( - error_text.contains("does not exist"), - "Error message should mention path doesn't exist" - ); -} - -/// Test that the Lotus build command accepts valid paths and starts the build process -#[test] -fn test_lotus_build_valid_path() { - // Build the foc-localnet binary - let status = Command::new("cargo") - .args(["build"]) - .status() - .expect("Failed to build foc-localnet"); - - assert!(status.success(), "Failed to build foc-localnet binary"); - - // Create a temporary directory and clone a shallow copy of Lotus - let temp_dir = tempfile::tempdir().expect("Failed to create temp directory"); - let lotus_dir = temp_dir.path().join("lotus"); - - // Clone a shallow copy of Lotus for testing - let clone_status = Command::new("git") - .args([ - "clone", - "--depth", - "1", - "https://github.com/filecoin-project/lotus.git", - ]) - .arg(lotus_dir.to_str().unwrap()) - .status() - .expect("Failed to clone Lotus repository"); - - assert!(clone_status.success(), "Failed to clone Lotus repository"); - - // Create output directory - let output_dir = temp_dir.path().join("output"); - std::fs::create_dir(&output_dir).expect("Failed to create output directory"); - - // Test that the build command accepts the valid path and starts (but may timeout) - let build_command = Command::new("./target/debug/foc-localnet") - .args(["build", "lotus"]) - .arg(lotus_dir.to_str().unwrap()) - .args(["--output-dir", output_dir.to_str().unwrap()]) - .stdout(std::process::Stdio::piped()) - .stderr(std::process::Stdio::piped()) - .spawn(); - - match build_command { - Ok(mut child) => { - // Let it run for a reasonable time to allow the build to complete - std::thread::sleep(std::time::Duration::from_secs(30)); - - // Check if the process completed successfully - match child.try_wait() { - Ok(Some(status)) => { - if status.success() { - println!("{}", "Build process completed successfully"); - } else { - println!("Build process completed with status: {}", status); - } - } - Ok(None) => { - println!("{}", "Build process is still running, killing it"); - // Kill the process since we don't want it to run indefinitely - let _ = child.kill(); - } - Err(e) => { - panic!("Failed to check build process status: {}", e); - } - } - } - Err(e) => { - panic!("Failed to start build command with valid path: {}", e); - } - } - - // Check that the expected binaries were created - let lotus_binary = output_dir.join("lotus"); - let lotus_miner_binary = output_dir.join("lotus-miner"); - - // List contents of output directory for debugging - println!("{}", "Contents of output directory:"); - if let Ok(entries) = fs::read_dir(&output_dir) { - for entry in entries { - if let Ok(entry) = entry { - println!("{}", entry.path().display()); - } - } - } else { - println!("{}", "Could not read output directory"); - } - - // The build should create the expected binaries - assert!( - lotus_binary.exists(), - "Lotus binary should be created in output directory" - ); - assert!( - lotus_miner_binary.exists(), - "Lotus-miner binary should be created in output directory" - ); - - // Verify they are executable - let lotus_metadata = lotus_binary - .metadata() - .expect("Failed to get lotus binary metadata"); - assert!( - !lotus_metadata.permissions().readonly(), - "Lotus binary should be executable" - ); - - let lotus_miner_metadata = lotus_miner_binary - .metadata() - .expect("Failed to get lotus-miner binary metadata"); - assert!( - !lotus_miner_metadata.permissions().readonly(), - "Lotus-miner binary should be executable" - ); - - println!( - "{}", - "Lotus and lotus-miner binaries were created successfully and are executable" - ); - - // At minimum, verify that the foc-localnet-builder Docker image was created - // (this happens during the build process) - let images_output = Command::new("docker") - .args([ - "images", - "foc-localnet-builder", - "--format", - "{{.Repository}}:{{.Tag}}", - ]) - .output() - .expect("Failed to check Docker images"); - - let images_text = String::from_utf8_lossy(&images_output.stdout); - // The image should exist since it's created early in the build process - assert!( - images_text.contains("foc-localnet-builder:latest"), - "Docker builder image should have been created during build process" - ); - println!("Docker images check: {}", images_text); -} - -/// Test Docker image building for Lotus builds -#[test] -fn test_docker_image_building() { - // Test Docker image building (this is the core functionality we want to test) - let build_status = Command::new("docker") - .args(["build", "-t", "foc-localnet-builder-test", "./docker"]) - .status() - .expect("Failed to build Docker image"); - - assert!( - build_status.success(), - "Failed to build Docker builder image" - ); - - // Verify the image was created - let images_output = Command::new("docker") - .args([ - "images", - "foc-localnet-builder-test", - "--format", - "{{.Repository}}:{{.Tag}}", - ]) - .output() - .expect("Failed to check Docker images"); - - let images_text = String::from_utf8_lossy(&images_output.stdout); - assert!( - images_text.contains("foc-localnet-builder-test:latest"), - "Docker builder image was not created properly" - ); - - // Clean up: remove the test Docker image - let _ = Command::new("docker") - .args(["rmi", "foc-localnet-builder-test:latest"]) - .status(); -} diff --git a/working.md b/working.md deleted file mode 100644 index 8c098c7..0000000 --- a/working.md +++ /dev/null @@ -1,277 +0,0 @@ -# Setup env vars -```bash -export FOC_CONTRACT_USDFC=0xB514FeE11119E0923950C09A181F1fa3aa62C80b ; -export FOC_CONTRACT_FWSS=0x4A8a81765bFBe09D6fDd167EF954a1D3401340e5 ; -export FOC_CONTRACT_MULTICALL=0x2e1F1424b41ad7b2E34b0a60501edFc82FEf5BE8 ; -export FOC_CONTRACT_SIMPLE=0x0000000000000000000000000000000000000000 ; -export FOC_CONTRACT_PAY=0xFD61fA68CB8F70dfC35a4AB244703e39BaB9F352 ; -export CURIO_DB_HOST=localhost; -export CURIO_DB_PORT=5703 ; -export CURIO_DB_CASSANDRA_PORT=5704; -export CURIO_DB_USER=yugabyte ; -export CURIO_DB_PASSWORD=yugabyte ; -export CURIO_DB_NAME=yugabyte ; -export CURIO_DB_LOAD_BALANCE=false; -export FULLNODE_API_INFO=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.-A_dOiryIy0L91-CkYi8vedSLEAfiKuPhN-21ijJX_I:/dns4/localhost/tcp/5700/http -export LOTUS_PATH=/home/redpanda/.foc-localnet/artifacts/docker/volumes/lotus-data; -export PDP_PRIVATE_KEY=cd3ec679d4c6928ff7db1854ee1720ad0e0f8c03299c093602360c431452705c; -``` - -# First run, setting up new-cluster -curio config new-cluster t01001 - -(finishes by itself) - -# Setup PDP layer - -curio config create --title pdp-only << 'EOF' -[HTTP] -DelegateTLS = true -DomainName = "pdp-sp-0.foc-localnet.internal" -Enable = true -ListenAddress = "0.0.0.0:4702" - -[Subsystems] -EnableCommP = true -EnableMoveStorage = true -EnablePDP = true -EnableParkPiece = true -EOF - -Layer pdp-only created/updated - -# Attach storage - -curio run --nosync --layers seal,post,pdp-only,gui - -Wait till localhost:4701 is available - -curio cli storage attach --init --seal /home/redpanda/.foc-localnet/artifacts/docker/volumes/curio/fast-storage -curio cli storage attach --init --store /home/redpanda/.foc-localnet/artifacts/docker/volumes/curio/long-term-storage - - -# PDPTool Magic - -┌─────────────────────────────────────────────────────────────────┐ -│ CLIENT SIDE (pdptool) │ -└─────────────────────────────────────────────────────────────────┘ - -1. pdptool create-service-secret - ├─ Generates ECDSA P-256 key pair - ├─ Saves PRIVATE key → pdpservice.json (keep secret!) - └─ Outputs PUBLIC key → Register with Curio server - -2. pdptool create-jwt-token "pdp" - ├─ Loads private key from pdpservice.json - ├─ Creates JWT claims: - │ { - │ "service_name": "pdp", - │ "exp": 1734480000 // 24 hours from now - │ } - ├─ Signs with ES256 algorithm using PRIVATE key - └─ Outputs JWT token: "eyJhbGciOiJFUzI1NiIs..." - -3. Send HTTP request with JWT: - Authorization: Bearer eyJhbGciOiJFUzI1NiIs... - - -pdptool create-service-secret > pdp_service_pubkey.txt -Saves: - - Pubkey in pdp_service_pubkey.txt - - PrivKey in PDPService.json - - -Give on-chain signing key to curio (PDP_SP_0 key): -```bash -curl -X POST -H "Content-Type: application/json" \ - -d "{\"jsonrpc\":\"2.0\",\"method\":\"CurioWeb.ImportPDPKey\",\"params\":[\"cd3ec679d4c6928ff7db1854ee1720ad0e0f8c03299c093602360c431452705c\"],\"id\":1}" \ - http://localhost:4701/api/webrpc/v0 -``` -result would be pubkey of the PDP_SP_0, verify it -type: `{"id":1,"jsonrpc":"2.0","result":"0x1988D2De200fD1aEC55931376e74073d90f64DAC"}` - -Use PDPTool output pubkey and send it over to curio, payload looks like this: -```json -{ - "jsonrpc": "2.0", - "method": "CurioWeb.AddPDPService", - "params": [ - "pdp", - "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGSPt+D+keBx6vPVUEoPrzFtwYf2+\nBEUcKynWk0u9iOfiV8OO5vkIECGEfDWKh7kmPcndcff6p44OWP0Z+j7Jpw==\n-----END PUBLIC KEY-----" - ], - "id": 2 -} -``` - -So, sending looks like: -```bash -echo "Creating service tokens..." -pdptool create-service-secret > pdp_service_key.txt - -echo "Creating JWT token..." -pdptool create-jwt-token pdp | grep -v "JWT Token:" > jwt_token.txt - -# Extract public key from the output and properly format it -PUB_KEY=$(cat pdp_service_key.txt | sed -n '/Public Key:/,/-----END PUBLIC KEY-----/p' | grep -v "Public Key:" | sed 's/^[[:space:]]*//') -echo "Public Key (formatted):" -echo "$PUB_KEY" - -JSON_PUB_KEY=$(echo "$PUB_KEY" | awk '{printf "%s\\n", $0}' | sed 's/\\n$//') - curl -X POST -H "Content-Type: application/json" \ - -d "{\"jsonrpc\":\"2.0\",\"method\":\"CurioWeb.AddPDPService\",\"params\":[\"pdp\",\"$JSON_PUB_KEY\"],\"id\":2}" \ - http://localhost:4701/api/webrpc/v0 -``` -output would be: {"id":2,"jsonrpc":"2.0","result":null} - -Ensure connectivity: -``` -# Test connectivity to the PDP service endpoint -echo "Testing PDP connectivity..." -pdptool ping --service-url http://localhost:4702 --service-name pdp -``` -It should return "Ping successful: Service is reachable and JWT token is valid." - - - - - - ---------------------- -We need to re-implement CurioStep to setup Curio SP properly, alongwith other steps. - -Create a temporary task tracker `upgrades.md` to track between AI runs. -After each milestone, add git commits - -- First, we need to accomodate for the fact that there will be multiple curio processes down the line. - - As such those nodes will be called 0, 1, 2, 3 etc - - For example, currently addresses are `PDP_SP_0` as the sole PDP service provider. This will be extended later, and made base 1. - - Actionable: Create as many miners pre-sealed sectors (currently we have only 2, one for lotus-miner, one for curio) as there are pdp service providers. So net, there will be one lotus-miner, and N pdp service provider pre-sealed sectors. - - Actionable: Create upto 5 (constant add: MAX_PDP_SP_COUNT) PDP SP addresses during genesis / key generation process. They will be PDP_SP_1. through PDP_SP_5 (base 1 not base 0). They may or may not be used in CurioStep, but their keys are generated. - - Actionable: Spawn as many `foc-yugabyte` containers as number of PDP service providers. They can be suffixed with -1, -2 etc. Each will be on separate yugabyte networks. (again, base 1) - - All `cargo run -- start --reset` should delete `~/.foc-localnet/artifacts/docker/volumes/curio/*` volumes - -CurioStep needs to be overhauled. It does too little and post-execute verifications are not proper. -New CurioStep will: -- have multi-file module with each file no longer than 250 lines of code -- May spawn up multiple curio node, currently only spawns 1, but should be configurable via config.toml in ~/.foc-localnet -- Each curio node: - - Pre-execute: verifies that lotus is running and blocks are being generated, similar to lotus-miner. - - Execute: Is a complex multi-step process for each curio SP node documented below. - - Post-execute: verifies that we can ping PDP endpoint properly and piece upload and download works properly via pdptool (no JWT tokens) - - -## curio execute - -- Each curio sp (with identifier X) should depend on volumes `~/.foc-localnet/artifacts/docker/volumes/curio/X/` -- Curio needs to run first inside `foc-curio` for base DB migration (DB migration on base layer). Each curio SP has their own yugabyte `foc-yugabyte` - - curio new-cluster "t01001" etc. - - Sets up curio "base" layer - - The way this is done is via: `curio config new-cluster ` example: `curio config new-cluster t01001` - - This does not start a daemon, but just sets up db and finishes when that is done. -- Provide curio 'pdp-layer' configs. It can be done via: - ``` - curio config create --title pdp-only << 'EOF' - [HTTP] - DelegateTLS = true - DomainName = "pdp-sp-0.foc-localnet.internal" - Enable = true - ListenAddress = "0.0.0.0:4702" - - [Subsystems] - EnableCommP = true - EnableMoveStorage = true - EnablePDP = true - EnableParkPiece = true - EOF - ``` - - setups up 'pdp-layer' - - we don't care about domain-name. anything can be put there - - DelegateTLS should be true always. - - This does not start a daemon, but just sets up db and finishes when that is done. -- Start curio daemon with `curio run --nosync --layers seal,post,pdp-only,gui` - - This spawns up curio daemon. However, curio does not know how to deal with storage right now. - - Tell curio to use certain "storage" locations. You can execute the following commands to setup - - "fast-storage" - - "long-term-storage" - - Commands: - ```sh - curio cli --machine 127.0.0.1:12300 storage attach \ - --init \ - --seal \ - --weight 10 \ - /home/redpanda/.foc-localnet/artifacts/docker/volumes/curio/X/fast-storage - curio cli --machine 127.0.0.1:12300 storage attach \ - --init \ - --store \ - --weight 10 \ - /home/redpanda/.foc-localnet/artifacts/docker/volumes/curio/X/long-term-storage - ``` - - However, this will not be used verbatim because we will be mounting into docker the host volumes - - Most probably these will be mounted into `/home/foc-user/curio/fast-storage` and `/home/foc-user/curio/long-term-storage` - - Tell curio about PDP private key it needs to use to communicate on-chain - ```bash - curl -X POST -H "Content-Type: application/json" \ - -d "{\"jsonrpc\":\"2.0\",\"method\":\"CurioWeb.ImportPDPKey\",\"params\":[\"\"],\"id\":1}" \ - http://localhost:4701/api/webrpc/v0 - ``` - - This presents back output of the format: `{"id":1,"jsonrpc":"2.0","result":"0x1988D2De200fD1aEC55931376e74073d90f64DAC"}` - - The result back is the public key / address of pdp-sp-x. this needs to be verified if they are same as what is mentioned in addresses.json - - -## curio post-execute - -1. Check if we can ping PDP services (use reqwest I guess?): - Check PDP subsystem is working (handlers are running?): - curl -X GET http://localhost:4702/pdp/ping - - Is the local curio storage roughly works (pull in PDPTool upload-file)? - -2. Check if storage, upload and download works - - first create a random file with random data (worth 1Kb) - - Upload it to curio via pdptool: - ```sh - pdptool upload-piece \ - --service-url http://localhost:4702 \ - --service-name public \ - --hash-type commp \ - README.md \ - --verbose - ``` - - this does not output anything. but we run the same command again to get a Piece CID. - - output would be of format: - ```sh - http.StatusOK - Piece already exists on the server. Piece CID: bafkzcibdyeoaqt65bbxel7udfzz676ar44327o527ugh2seukeme7anlcpi23rqq - Piece uploaded successfully. - ``` - - use this piece CID (`bafkzcibdyeoaqt65bbxel7udfzz676ar44327o527ugh2seukeme7anlcpi23rqq`) to download the file via (use reqwest): - - curl -X GET http://localhost:4702/piece/bafkzcibdyeoaqt65bbxel7udfzz676ar44327o527ugh2seukeme7anlcpi23rqq - - verify if the data retrieved is what was uploaded - - -Ensure these env vars (across all calls to foc-curio): -``` -# Derive from contract_addresses.json -export FOC_CONTRACT_USDFC=0xB514FeE11119E0923950C09A181F1fa3aa62C80b ; -export FOC_CONTRACT_FWSS=0x4A8a81765bFBe09D6fDd167EF954a1D3401340e5 ; -export FOC_CONTRACT_MULTICALL=0x2e1F1424b41ad7b2E34b0a60501edFc82FEf5BE8 ; -export FOC_CONTRACT_SIMPLE=0x0000000000000000000000000000000000000000 ; -export FOC_CONTRACT_PAY=0xFD61fA68CB8F70dfC35a4AB244703e39BaB9F352 ; - -export CURIO_DB_HOST=localhost; # yugabyte DNS name -export CURIO_DB_PORT=5703 ; # yugabyte dynamic port -export CURIO_DB_CASSANDRA_PORT=5704; yugabyte dynamic port -export CURIO_DB_USER=yugabyte ; -export CURIO_DB_PASSWORD=yugabyte ; -export CURIO_DB_NAME=yugabyte ; -export CURIO_DB_LOAD_BALANCE=false; - -# same as what is fed to lotus-miner -export FULLNODE_API_INFO=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.-A_dOiryIy0L91-CkYi8vedSLEAfiKuPhN-21ijJX_I:/dns4/localhost/tcp/5700/http - -# Private key from addresses.json -export PDP_PRIVATE_KEY=cd3ec679d4c6928ff7db1854ee1720ad0e0f8c03299c093602360c431452705c; -``` - - -curl -X GET http://172.23.0.3:5713/piece/bafkzcibd6adqne2c5wxovp3dz6hqxzgke7amhn3eflgdxzcyycrdso6seoghkorp \ No newline at end of file