perf: optimize ProviderScope with zero-cost debug validation #18
Workflow file for this run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Provider Benchmarks | |
| on: | |
| pull_request: | |
| paths: | |
| - 'packages/disco/lib/**' | |
| - 'packages/disco/benchmark/**' | |
| - '.github/workflows/benchmark.yaml' | |
| push: | |
| branches: | |
| - main | |
| - dev | |
| paths: | |
| - 'packages/disco/lib/**' | |
| - 'packages/disco/benchmark/**' | |
| workflow_dispatch: | |
| jobs: | |
| benchmark: | |
| name: Run Provider Benchmarks | |
| runs-on: ubuntu-latest | |
| permissions: | |
| contents: read | |
| pull-requests: write | |
| steps: | |
| - name: Checkout code | |
| uses: actions/checkout@v4 | |
| - name: Setup Flutter | |
| uses: subosito/flutter-action@v2.18.0 | |
| with: | |
| channel: "stable" | |
| cache: true | |
| - name: Install dependencies | |
| run: flutter pub get | |
| working-directory: packages/disco | |
| - name: Run benchmarks (Debug Mode - Worst Case) | |
| working-directory: packages/disco | |
| run: | | |
| echo "# Provider Benchmark Results (Debug Mode)" > benchmark_results.md | |
| echo "" >> benchmark_results.md | |
| echo "**Date**: $(date -u '+%Y-%m-%d %H:%M:%S UTC')" >> benchmark_results.md | |
| echo "**Mode**: Debug (worst-case with all validation)" >> benchmark_results.md | |
| echo "**Flutter**: $(flutter --version | head -n 1)" >> benchmark_results.md | |
| echo "**Dart**: $(dart --version | head -n 1)" >> benchmark_results.md | |
| echo "" >> benchmark_results.md | |
| echo "> Note: Benchmarks run in debug mode to measure worst-case performance." >> benchmark_results.md | |
| echo "> Release mode will be significantly faster (zero validation overhead)." >> benchmark_results.md | |
| echo "" >> benchmark_results.md | |
| echo "## Results" >> benchmark_results.md | |
| echo "" >> benchmark_results.md | |
| echo "| Benchmark | Time (ms) |" >> benchmark_results.md | |
| echo "|-----------|-----------|" >> benchmark_results.md | |
| # Run benchmark in debug mode (default for flutter test) | |
| flutter test benchmark/provider_benchmark.dart 2>&1 | tee benchmark_output.txt | |
| # Debug: Show what patterns we're looking for | |
| echo "=== Benchmark Output Preview ===" | |
| head -50 benchmark_output.txt | |
| echo "=== End Preview ===" | |
| # Define benchmark patterns and labels | |
| declare -a patterns=( | |
| "Create 100 simple eager providers:|Create 100 simple eager providers" | |
| "Create 100 simple lazy providers:|Create 100 simple lazy providers" | |
| "Create 50 providers with dependencies:|Create 50 providers with dependencies" | |
| "Retrieve 100 lazy provider values:|Retrieve 100 lazy provider values" | |
| "Create 100 ArgProviders:|Create 100 ArgProviders" | |
| "Access 100 providers in nested scopes:|Access 100 providers in nested scopes" | |
| "Complex dependency chain with 30 providers:|Complex dependency chain (30 providers)" | |
| "Mixed lazy and eager providers|Mixed lazy and eager (100 total)" | |
| "ArgProviders with dependencies|ArgProviders with dependencies (50)" | |
| "Large scale - 500 providers:|Large scale (500 providers)" | |
| "Deep dependency chain|Deep dependency chain (100 levels)" | |
| "Wide dependency tree|Wide dependency tree (100 dependents)" | |
| "Multiple nested scopes|Multiple nested scopes (5 levels)" | |
| ) | |
| # Extract benchmark results using loop | |
| for pattern_label in "${patterns[@]}"; do | |
| IFS='|' read -r pattern label <<< "$pattern_label" | |
| if grep -q "$pattern" benchmark_output.txt; then | |
| # Extract time value - match numbers before 'ms' | |
| time=$(grep "$pattern" benchmark_output.txt | sed -E 's/.*[: ]([0-9]+)ms.*/\1/' | head -1) | |
| if [ -n "$time" ] && [ "$time" != "" ]; then | |
| echo "| $label | $time |" >> benchmark_results.md | |
| else | |
| echo "| $label | N/A |" >> benchmark_results.md | |
| fi | |
| else | |
| echo "| $label | N/A |" >> benchmark_results.md | |
| fi | |
| done | |
| - name: Upload benchmark results | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: benchmark-results | |
| path: packages/disco/benchmark_results.md | |
| - name: Comment PR with results | |
| if: github.event_name == 'pull_request' | |
| uses: actions/github-script@v7 | |
| with: | |
| script: | | |
| const fs = require('fs'); | |
| const results = fs.readFileSync('packages/disco/benchmark_results.md', 'utf8'); | |
| // Check if we already commented | |
| const comments = await github.rest.issues.listComments({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| issue_number: context.issue.number, | |
| }); | |
| const botComment = comments.data.find(comment => | |
| comment.user.type === 'Bot' && | |
| comment.body.includes('Provider Benchmark Results') | |
| ); | |
| if (botComment) { | |
| // Update existing comment | |
| await github.rest.issues.updateComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| comment_id: botComment.id, | |
| body: results | |
| }); | |
| } else { | |
| // Create new comment | |
| await github.rest.issues.createComment({ | |
| issue_number: context.issue.number, | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| body: results | |
| }); | |
| } | |
| - name: Display results | |
| run: cat packages/disco/benchmark_results.md |