diff --git a/.claude/settings.json b/.claude/settings.json new file mode 100644 index 0000000..aa88323 --- /dev/null +++ b/.claude/settings.json @@ -0,0 +1,58 @@ +{ + "permissions": { + "allow": [ + "Bash(find:*)", + "Bash(bun install:*)", + "Bash(bun run:*)", + "Bash(grep:*)", + "Bash(curl:*)", + "Bash(bun:*)", + "Bash(mkdir:*)", + "Bash(ls:*)", + "Bash(cp:*)", + "Bash(mv:*)", + "Bash(rm:*)", + "Bash(diff:*)", + "Bash(chmod:*)", + "Bash(tree:*)", + "Bash(git add:*)", + "Bash(git commit:*)", + "Bash(./dist/mcp-docsrs:*)", + "WebFetch(domain:github.com)", + "WebFetch(domain:bun.sh)", + "WebFetch(domain:raw.githubusercontent.com)", + "WebFetch(domain:modelcontextprotocol.io)", + "WebFetch(domain:docs.rs)", + "mcp__mcp-docsrs__lookup_crate_docs", + "mcp__mcp-docsrs__search_crates", + "mcp__context7__resolve-library-id", + "mcp__context7__get-library-docs", + "mcp__memory__create_entities", + "mcp__memory__create_relations", + "mcp__memory__add_observations", + "mcp__memory__delete_entities", + "mcp__memory__delete_observations", + "mcp__memory__delete_relations", + "mcp__memory__read_graph", + "mcp__memory__search_nodes", + "mcp__memory__open_nodes", + "mcp__sequential-thinking__sequentialthinking", + "mcp__mcp-interactive__request_user_input", + "mcp__mcp-interactive__message_complete_notification", + "mcp__mcp-interactive__start_intensive_chat", + "mcp__mcp-interactive__ask_intensive_chat", + "mcp__mcp-interactive__stop_intensive_chat", + "mcp__mcp-deepwiki__deepwiki_fetch", + "mcp__ide__getDiagnostics" + ] + }, + "enableAllProjectMcpServers": false, + "enabledMcpjsonServers": [ + "context7", + "mcp-deepwiki", + "mcp-interactive", + "mcp-docsrs", + "sequential-thinking", + "memory" + ] +} \ No newline at end of file diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..107c161 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,43 @@ +# Dependencies +node_modules/ +.bun/ + +# Build outputs (except the ones we need) +dist/* + +# Development files +.github/ +test/ +scripts/ +plans/ +src/ +*.ts +*.test.ts + +# Configuration files +.env +.env.local +.env.*.local +*.log +.DS_Store + +# IDE files +.vscode/ +.idea/ +*.swp +*.swo + +# Git +.git/ +.gitignore + +# Documentation +*.md +LICENSE + +# CI/CD +.github/ +bun.lock +package.json +tsconfig.json +biome.json diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4f7b21a..742ec57 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,66 +1,26 @@ version: 2 updates: - - package-ecosystem: "npm" - directory: "/" + - package-ecosystem: github-actions + directory: / schedule: - interval: "weekly" - day: "monday" - time: "03:00" - open-pull-requests-limit: 10 - pull-request-branch-name: - separator: "-" - commit-message: - prefix: "chore" - prefix-development: "chore" - include: "scope" - labels: - - "dependencies" - - "bun" - assignees: - - "vexxvakan" - reviewers: - - "vexxvakan" + interval: weekly groups: - development-dependencies: - patterns: - - "@types/*" - - "typescript" - - "@biomejs/biome" + actions-minor: update-types: - - "minor" - - "patch" - production-dependencies: - patterns: - - "*" - exclude-patterns: - - "@types/*" - - "typescript" - - "@biomejs/biome" - update-types: - - "patch" - # Auto-merge rules - allow: - - dependency-type: "development" - - dependency-type: "production" - ignore: - # Ignore major updates for critical dependencies - - dependency-name: "@modelcontextprotocol/sdk" - update-types: ["version-update:semver-major"] + - minor + - patch - # Enable version updates for GitHub Actions - - package-ecosystem: "github-actions" - directory: "/" + - package-ecosystem: npm + directory: / schedule: - interval: "monthly" - day: "monday" - time: "03:00" - commit-message: - prefix: "ci" - include: "scope" - labels: - - "ci" - - "github-actions" - assignees: - - "vexxvakan" - pull-request-branch-name: - separator: "-" + interval: weekly + groups: + npm-development: + dependency-type: development + update-types: + - minor + - patch + npm-production: + dependency-type: production + update-types: + - patch diff --git a/.github/workflows/README.md b/.github/workflows/README.md deleted file mode 100644 index 44da1f1..0000000 --- a/.github/workflows/README.md +++ /dev/null @@ -1,262 +0,0 @@ -# GitHub Workflows Documentation - -## Overview - -This repository uses optimized GitHub Actions workflows designed for open-source projects to minimize resource usage while maintaining quality. - -## Workflow Strategy - -### For Pull Requests - -- **Minimal checks only** (Linux, essential tests) -- **No resource-heavy scans** on every PR -- **Use `full-ci` label** to trigger complete pipeline - -### For Main Branch - -- **Full CI/CD pipeline** with all platforms -- **Security scans** on code changes -- **Automated releases** with changelogs - -## Structure - -```text -.github/ -├── workflows/ -│ ├── pr-ci.yml # Fast PR validation (Linux only) -│ ├── ci.yml # Full CI/CD pipeline (main + labeled PRs) -│ ├── release.yml # Release automation with changelogs -│ ├── security.yml # Security scanning (scheduled + main) -│ ├── codeql.yml # CodeQL analysis (weekly schedule) -│ ├── dependency-update.yml # Bun lockfile updates -│ ├── pr-automation.yml # PR labeling and stale checks -│ ├── test-workflow.yml # Reusable: Test suite across platforms -│ ├── build-workflow.yml # Reusable: Build executables -│ ├── code-quality-workflow.yml # Reusable: Linting, type checking -│ ├── integration-test-workflow.yml # Reusable: Integration testing -│ └── README.md # This file -├── codeql/ -│ └── codeql-config.yml # CodeQL configuration -├── dependabot.yml # Automated dependency updates -├── labeler.yml # PR auto-labeling rules -├── secret-scanning.yml # Secret scanning config -└── scripts/ - └── generate-changelog.sh # Changelog generation script -``` - -## Main Workflows - -### PR Quick Checks (`pr-ci.yml`) - -- **Triggers**: All PRs (except docs-only changes) -- **Purpose**: Fast validation on Linux only -- **Jobs**: Lint → Type Check → Test → Build -- **Runtime**: ~2-3 minutes - -### Full CI/CD Pipeline (`ci.yml`) - -- **Triggers**: Push to main, PRs with `full-ci` label -- **Purpose**: Complete validation across all platforms -- **Jobs**: Test → Build → Quality → Integration → Status Check -- **Note**: Add `full-ci` label to PR for full platform testing - -### Release Automation (`release.yml`) - -- **Trigger**: Manual dispatch with version selection -- **Purpose**: Automated releases with professional changelogs -- **Features**: - - Semantic version bumping (major/minor/patch/custom) - - Beautiful changelog with categorized commits - - First-time contributor recognition - - Multi-platform binary builds - - SHA256 checksums for all artifacts - - GitHub Release creation with download links - - Pre-release support - -### Security Scanning (`security.yml`) - -- **Triggers**: Push to main (code changes), weekly schedule -- **Purpose**: Comprehensive security analysis -- **Features**: - - Dependency vulnerability scanning - - License compliance checking - - Semgrep SAST analysis - - Secret scanning with Gitleaks - - TypeScript strict security checks - -### CodeQL Analysis (`codeql.yml`) - -- **Triggers**: - - PRs with `codeql` label - - Push to main (code changes only) - - Monthly schedule (backup) - - Manual dispatch -- **Purpose**: Deep semantic security analysis -- **Note**: Resource-intensive, use `codeql` label to run on PRs - -### Dependency Updates (`dependency-update.yml`) - -- **Triggers**: Dependabot PRs, monthly schedule -- **Purpose**: Bun-specific dependency management -- **Features**: - - Automatic Bun lockfile updates on Dependabot PRs - - Monthly outdated dependency reports - - Bun compatibility validation - -### PR Automation (`pr-automation.yml`) - -- **Triggers**: PR events, weekly stale check -- **Purpose**: Automate PR management -- **Features**: - - Auto-labeling by file type and size - - Weekly stale PR/issue checks - - First-time contributor welcome (only with label) -- **Note**: Minimal automation to avoid spam - -## Reusable Workflows - -### Test Suite (`test-workflow.yml`) - -- Runs tests on Ubuntu, macOS, and Windows -- Tests with Bun 1.2.14 and latest -- Uploads coverage to Codecov - -### Build (`build-workflow.yml`) - -- Builds executables for all 7 platforms: - - Linux x64/ARM64 (GLIBC and MUSL variants) - - macOS x64/ARM64 (Intel and Apple Silicon) - - Windows x64 -- All builds include bytecode compilation for faster startup -- Uploads artifacts for 7 days - -### Code Quality (`code-quality-workflow.yml`) - -- Runs Biome linter -- TypeScript type checking -- Security vulnerability scanning -- Bundle size analysis - -### Integration Tests (`integration-test-workflow.yml`) - -- Tests built executables on their native platforms -- Validates MCP server functionality -- Tests all 7 platform builds -- Runs on main branch and PRs with full-ci label - -## Key Differences: pr-ci.yml vs ci.yml - -### pr-ci.yml (Quick Checks) - -- **Purpose**: Fast feedback for every PR -- **Scope**: Minimal validation -- **Platform**: Ubuntu Linux only -- **Jobs**: Single job with lint, typecheck, test, build -- **When**: Every PR automatically - -### ci.yml (Full Pipeline) - -- **Purpose**: Comprehensive validation -- **Scope**: Full test matrix and integration tests -- **Platforms**: Ubuntu, macOS, Windows -- **Jobs**: Parallel test/build/quality jobs, then integration tests -- **Runtime**: ~15-20 minutes (all platforms) -- **When**: Push to main OR PRs with "full-ci" label - -## Adding New Workflows - -1. Create new reusable workflow with `workflow_call` trigger -2. Place in main `workflows/` directory with `-workflow` suffix -3. Reference from main workflows using `uses: ./.github/workflows/name-workflow.yml` -4. Pass secrets with `secrets: inherit` - -## Environment Variables - -- `BUN_VERSION`: Default Bun version (1.2.14) -- `NODE_VERSION`: Node.js version for compatibility - -## Secrets Required - -- `CODECOV_TOKEN`: For coverage reporting (optional) -- `GITHUB_TOKEN`: Automatically provided by GitHub Actions - -## Dependency Management - -### Dependabot Configuration - -Located in `.github/dependabot.yml`: - -- **Package Updates**: Weekly checks for Bun/npm dependencies -- **GitHub Actions**: Monthly updates for workflow actions -- **Grouped Updates**: Development and production dependencies -- **Auto-merge**: Patch updates for production, minor+patch for dev - -### Bun Compatibility - -Since Dependabot doesn't natively support Bun yet: - -1. Dependabot creates PRs based on `package.json` -2. Our `dependency-update.yml` workflow automatically updates `bun.lock` -3. All dependencies are validated for Bun compatibility - -## Resource Usage Optimization - -This workflow setup is optimized for open-source projects: - -- **PRs run minimal checks** (Linux only, ~2-3 min) -- **Full CI requires label** (`full-ci`) to prevent abuse -- **Security scans on schedule** not every PR -- **Stale checks weekly** not daily -- **Path filters** skip workflows for docs changes - -## Triggering Special Workflows on PRs - -### Full CI Pipeline - -1. Add the `full-ci` label to the PR -2. The complete test matrix will run across all platforms - -### CodeQL Security Analysis - -1. Add the `codeql` label to the PR -2. Deep semantic security analysis will run - -## Running a Release - -1. Go to Actions → Release Automation -2. Click "Run workflow" -3. Select release type: - - `patch`: Bug fixes (1.0.0 → 1.0.1) - - `minor`: New features (1.0.0 → 1.1.0) - - `major`: Breaking changes (1.0.0 → 2.0.0) - - `custom`: Specify exact version -4. Optionally mark as pre-release -5. Workflow will: - - Update version in package.json - - Generate comprehensive changelog - - Build all platform binaries - - Create GitHub Release with artifacts - -## Commit Message Format - -For best changelog generation, use conventional commits: - -- `feat:` New features -- `fix:` Bug fixes -- `docs:` Documentation changes -- `perf:` Performance improvements -- `refactor:` Code refactoring -- `test:` Test additions/changes -- `build:` Build system changes -- `chore:` Maintenance tasks - -Add `!` for breaking changes: `feat!: new API` - -## Workflow Issues Fixed - -Recent improvements to the workflow structure: - -1. **Fixed workflow references**: Moved all reusable workflows to top-level directory (GitHub Actions requirement) -2. **Fixed label detection**: Changed from `github.event.label.name` to `github.event.pull_request.labels.*.name` for proper PR label checking -3. **Fixed job dependencies**: Added `integration-test` to `status-check` dependencies -4. **Improved status checks**: Handle skipped jobs properly in CI status validation diff --git a/.github/workflows/build-workflow.yml b/.github/workflows/build-workflow.yml index 452cad7..83e67b7 100644 --- a/.github/workflows/build-workflow.yml +++ b/.github/workflows/build-workflow.yml @@ -4,15 +4,13 @@ on: workflow_call: env: - BUN_VERSION: '1.2.14' - + BUN_VERSION: 'latest' jobs: build: name: Build ${{ matrix.target }} runs-on: ${{ matrix.os }} - timeout-minutes: 60 # Extended timeout for ARM64 runner availability - # Limit ARM64 concurrency to help with runner availability + timeout-minutes: 15 concurrency: group: arm64-${{ matrix.target }}-${{ github.ref }} cancel-in-progress: false @@ -21,33 +19,25 @@ jobs: matrix: include: # Linux x64 GLIBC builds - - os: ubuntu-latest - target: linux-x64 - build-cmd: bun run build:linux-x64 + # - os: ${{ vars.RUNNER }} + # target: linux-x64 + # build-cmd: bun run build:linux-x64 # Linux ARM64 GLIBC builds (native ARM64 runner) - - os: ubuntu-24.04-arm + - os: ${{ vars.RUNNER }} target: linux-arm64 build-cmd: bun run build:linux-arm64 - # Linux x64 MUSL builds (for Alpine, containers, static linking) - - os: ubuntu-latest - target: linux-x64-musl - build-cmd: bun run build:linux-x64-musl - # Linux ARM64 MUSL builds (native ARM64 runner) - - os: ubuntu-24.04-arm - target: linux-arm64-musl - build-cmd: bun run build:linux-arm64-musl - # macOS x64 builds (Intel-based runner) - - os: macos-13 - target: darwin-x64 - build-cmd: bun run build:darwin-x64 - # macOS ARM64 builds (Apple Silicon runner - latest) - - os: macos-latest - target: darwin-arm64 - build-cmd: bun run build:darwin-arm64 - # Windows build - - os: windows-latest - target: windows-x64 - build-cmd: bun run build:windows-x64 + # # macOS x64 builds (Intel-based runner) + # - os: macos-13 + # target: darwin-x64 + # build-cmd: bun run build:darwin-x64 + # # macOS ARM64 builds (Apple Silicon runner - latest) + # - os: macos-latest + # target: darwin-arm64 + # build-cmd: bun run build:darwin-arm64 + # # Windows build + # - os: windows-latest + # target: windows-x64 + # build-cmd: bun run build:windows-x64 steps: - name: Show runner info @@ -62,16 +52,16 @@ jobs: shell: bash - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Setup Bun uses: oven-sh/setup-bun@v2 with: bun-version: ${{ env.BUN_VERSION }} - no-cache: ${{ runner.os == 'Windows' }} # Disable cache on Windows due to issues + no-cache: ${{ runner.os == 'Windows' }} # Disable cache on Windows due to issues - name: Cache dependencies - uses: actions/cache@v4 + uses: ubicloud/cache@v4 with: path: | ~/.bun/install/cache @@ -87,7 +77,6 @@ jobs: - name: Build executable run: ${{ matrix.build-cmd }} - - name: Test executable run: | # First check if the binary exists @@ -109,18 +98,7 @@ jobs: else # Test binaries on their native architecture if [[ "${{ matrix.os }}" == "ubuntu-"* ]] && [[ "${{ matrix.target }}" == *"linux"* ]]; then - # Test MUSL binaries in Alpine container - if [[ "${{ matrix.target }}" == *"musl"* ]]; then - # Make binary executable before mounting - chmod +x ./dist/mcp-docsrs-${{ matrix.target }} - # IMPORTANT: Bun's MUSL builds are NOT fully static - they dynamically link libstdc++ - # This is a known limitation: https://github.com/oven-sh/bun/issues/16056 - # Test using oven/bun:alpine which has all necessary libraries - docker run --rm -v $PWD/dist:/workspace:ro -w /workspace oven/bun:alpine /workspace/mcp-docsrs-${{ matrix.target }} --version - else - # Test GLIBC binaries directly ./dist/mcp-docsrs-${{ matrix.target }} --version - fi elif [[ "${{ matrix.os }}" == "macos-"* ]] && [[ "${{ matrix.target }}" == *"darwin"* ]]; then # Test on native architecture (no Rosetta) ./dist/mcp-docsrs-${{ matrix.target }} --version @@ -137,7 +115,7 @@ jobs: if [[ "${{ matrix.os }}" == "ubuntu-24.04-arm" ]] && [[ "${{ matrix.target }}" == *"arm64"* ]]; then echo "Running tests on native ARM64..." bun test - elif [[ "${{ matrix.os }}" == "ubuntu-latest" ]] && [[ "${{ matrix.target }}" == "linux-x64" ]]; then + elif [[ "${{ matrix.os }}" == "${{ vars.RUNNER }}" ]] && [[ "${{ matrix.target }}" == "linux-x64" ]]; then echo "Running tests on native x64..." bun test elif [[ "${{ matrix.os }}" == "macos-"* ]] && [[ "${{ matrix.target }}" == "darwin-"* ]]; then @@ -171,4 +149,4 @@ jobs: name: mcp-docsrs-${{ matrix.target }} path: | dist/mcp-docsrs-${{ matrix.target }}* - retention-days: 30 \ No newline at end of file + retention-days: 3 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a249ce0..01b97ae 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,11 +1,8 @@ name: CI/CD Pipeline - on: push: - branches: [ main ] + branches: [main] pull_request: - types: [ labeled ] - # Only run when "full-ci" label is added workflow_dispatch: inputs: debug_enabled: @@ -13,14 +10,9 @@ on: required: false default: false type: boolean - jobs: test: name: Test Suite - if: | - github.event_name == 'push' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'full-ci')) uses: ./.github/workflows/test-workflow.yml secrets: inherit with: @@ -28,72 +20,11 @@ jobs: build: name: Build - needs: test uses: ./.github/workflows/build-workflow.yml secrets: inherit - code-quality: - name: Quality - needs: test - uses: ./.github/workflows/code-quality-workflow.yml - secrets: inherit - integration-test: name: Integration needs: build - if: | - github.event_name == 'push' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'full-ci')) uses: ./.github/workflows/integration-test-workflow.yml secrets: inherit - - status-check: - name: CI Status Check - runs-on: ubuntu-latest - needs: [test, build, code-quality, integration-test] - if: always() - - steps: - - name: Check CI Status - run: | - # Check all job results - FAILED=false - - if [ "${{ needs.test.result }}" != "success" ] && [ "${{ needs.test.result }}" != "skipped" ]; then - echo "❌ Test failed: ${{ needs.test.result }}" - FAILED=true - fi - - if [ "${{ needs.build.result }}" != "success" ] && [ "${{ needs.build.result }}" != "skipped" ]; then - echo "❌ Build failed: ${{ needs.build.result }}" - FAILED=true - fi - - if [ "${{ needs.code-quality.result }}" != "success" ] && [ "${{ needs.code-quality.result }}" != "skipped" ]; then - echo "❌ Code Quality failed: ${{ needs.code-quality.result }}" - FAILED=true - fi - - if [ "${{ needs.integration-test.result }}" != "success" ] && [ "${{ needs.integration-test.result }}" != "skipped" ]; then - echo "❌ Integration Test failed: ${{ needs.integration-test.result }}" - FAILED=true - fi - - if [ "$FAILED" = "true" ]; then - echo "❌ CI pipeline failed" - exit 1 - else - echo "✅ CI pipeline passed successfully!" - fi - - - name: Create status summary - if: always() - run: | - echo "## CI Pipeline Summary" >> $GITHUB_STEP_SUMMARY - echo "| Job | Status |" >> $GITHUB_STEP_SUMMARY - echo "|-----|--------|" >> $GITHUB_STEP_SUMMARY - echo "| Test Suite | ${{ needs.test.result }} |" >> $GITHUB_STEP_SUMMARY - echo "| Build | ${{ needs.build.result }} |" >> $GITHUB_STEP_SUMMARY - echo "| Code Quality | ${{ needs.code-quality.result }} |" >> $GITHUB_STEP_SUMMARY - echo "| Integration Test | ${{ needs.integration-test.result }} |" >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/.github/workflows/claude-code-review.yml b/.github/workflows/claude-code-review.yml deleted file mode 100644 index ecd27d0..0000000 --- a/.github/workflows/claude-code-review.yml +++ /dev/null @@ -1,75 +0,0 @@ -name: Claude Code Review - -on: - pull_request: - types: [opened, synchronize] - # Optional: Only run on specific file changes - # paths: - # - "src/**/*.ts" - # - "src/**/*.tsx" - # - "src/**/*.js" - # - "src/**/*.jsx" - -jobs: - claude-review: - # Optional: Filter by PR author - # if: | - # github.event.pull_request.user.login == 'external-contributor' || - # github.event.pull_request.user.login == 'new-developer' || - # github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR' - - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: read - issues: read - id-token: write - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - - name: Run Claude Code Review - id: claude-review - uses: anthropics/claude-code-action@beta - with: - anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} - - # Optional: Specify model (defaults to Claude Sonnet 4, uncomment for Claude Opus 4) - # model: "claude-opus-4-20250514" - - # Direct prompt for automated review (no @claude mention needed) - direct_prompt: | - Please review this pull request and provide feedback on: - - Code quality and best practices - - Potential bugs or issues - - Performance considerations - - Security concerns - - Test coverage - - Be constructive and helpful in your feedback. - - # Optional: Customize review based on file types - # direct_prompt: | - # Review this PR focusing on: - # - For TypeScript files: Type safety and proper interface usage - # - For API endpoints: Security, input validation, and error handling - # - For React components: Performance, accessibility, and best practices - # - For tests: Coverage, edge cases, and test quality - - # Optional: Different prompts for different authors - # direct_prompt: | - # ${{ github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR' && - # 'Welcome! Please review this PR from a first-time contributor. Be encouraging and provide detailed explanations for any suggestions.' || - # 'Please provide a thorough code review focusing on our coding standards and best practices.' }} - - # Optional: Add specific tools for running tests or linting - # allowed_tools: "Bash(npm run test),Bash(npm run lint),Bash(npm run typecheck)" - - # Optional: Skip review for certain conditions - # if: | - # !contains(github.event.pull_request.title, '[skip-review]') && - # !contains(github.event.pull_request.title, '[WIP]') - diff --git a/.github/workflows/claude.yml b/.github/workflows/claude.yml deleted file mode 100644 index 58d0fa2..0000000 --- a/.github/workflows/claude.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: Claude Code - -on: - issue_comment: - types: [created] - pull_request_review_comment: - types: [created] - issues: - types: [opened, assigned] - pull_request_review: - types: [submitted] - -jobs: - claude: - if: | - (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) || - (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) || - (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) || - (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: read - issues: read - id-token: write - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - - name: Run Claude Code - id: claude - uses: anthropics/claude-code-action@beta - with: - anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} - - # Optional: Specify model (defaults to Claude Sonnet 4, uncomment for Claude Opus 4) - # model: "claude-opus-4-20250514" - - # Optional: Customize the trigger phrase (default: @claude) - # trigger_phrase: "/claude" - - # Optional: Trigger when specific user is assigned to an issue - # assignee_trigger: "claude-bot" - - # Optional: Allow Claude to run specific commands - # allowed_tools: "Bash(npm install),Bash(npm run build),Bash(npm run test:*),Bash(npm run lint:*)" - - # Optional: Add custom instructions for Claude to customize its behavior for your project - # custom_instructions: | - # Follow our coding standards - # Ensure all new code has tests - # Use TypeScript for new files - - # Optional: Custom environment variables for Claude - # claude_env: | - # NODE_ENV: test - diff --git a/.github/workflows/code-quality-workflow.yml b/.github/workflows/code-quality-workflow.yml deleted file mode 100644 index 240648a..0000000 --- a/.github/workflows/code-quality-workflow.yml +++ /dev/null @@ -1,66 +0,0 @@ -name: Code Quality - -on: - workflow_call: - -env: - BUN_VERSION: '1.2.14' - -jobs: - quality: - name: Code Quality Checks - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Setup Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: ${{ env.BUN_VERSION }} - - - name: Cache dependencies - uses: actions/cache@v4 - with: - path: | - ~/.bun/install/cache - node_modules - key: ${{ runner.os }}-bun-quality-${{ hashFiles('**/bun.lock') }} - restore-keys: | - ${{ runner.os }}-bun-quality- - ${{ runner.os }}-bun- - - - name: Install dependencies - run: bun install --frozen-lockfile - - - name: Check formatting - run: bun run lint - - - name: Run type check with strict mode - run: bun run typecheck - - - name: Check for security vulnerabilities - run: | - bunx audit-ci --moderate - continue-on-error: true - - - name: Bundle size analysis - run: | - echo "Analyzing bundle size..." - bun run build - ls -lah dist/ - - # Calculate and report sizes - echo "## Bundle Size Report" >> $GITHUB_STEP_SUMMARY - echo "| Platform | Size |" >> $GITHUB_STEP_SUMMARY - echo "|----------|------|" >> $GITHUB_STEP_SUMMARY - - for file in dist/mcp-docsrs-*; do - if [ -f "$file" ]; then - SIZE=$(stat -c%s "$file" 2>/dev/null || stat -f%z "$file") - SIZE_MB=$((SIZE / 1024 / 1024)) - FILENAME=$(basename "$file") - echo "| $FILENAME | ${SIZE_MB} MB |" >> $GITHUB_STEP_SUMMARY - fi - done \ No newline at end of file diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml deleted file mode 100644 index fea6a0d..0000000 --- a/.github/workflows/codeql.yml +++ /dev/null @@ -1,77 +0,0 @@ -name: CodeQL Analysis - -on: - pull_request: - types: [ labeled ] - # Only run when "codeql" label is added - push: - branches: [ main ] - paths: - - 'src/**/*.ts' - - 'src/**/*.js' - - '.github/workflows/codeql.yml' - - '.github/codeql/**' - schedule: - # Run monthly as a backup - - cron: '0 2 1 * *' - workflow_dispatch: - -permissions: - actions: read - contents: read - security-events: write - -jobs: - analyze: - name: Analyze (${{ matrix.language }}) - runs-on: ubuntu-latest - timeout-minutes: 360 - if: | - github.event_name == 'push' || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' || - (github.event_name == 'pull_request' && contains(github.event.label.name, 'codeql')) - - strategy: - fail-fast: false - matrix: - language: [ 'javascript-typescript' ] - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Initialize CodeQL - uses: github/codeql-action/init@v3 - with: - languages: ${{ matrix.language }} - config-file: ./.github/codeql/codeql-config.yml - - - name: Setup Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: '1.2.14' - - - name: Install dependencies - run: | - bun install --frozen-lockfile - - - name: Build project - run: | - # Build to ensure all TypeScript is transpiled for analysis - bun run build || true - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 - with: - category: "/language:${{ matrix.language }}" - - - name: Generate Analysis Summary - if: always() - run: | - echo "## 🔍 CodeQL Analysis Summary" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "**Language**: ${{ matrix.language }}" >> $GITHUB_STEP_SUMMARY - echo "**Status**: Analysis completed" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "Results will be available in the Security tab once processed." >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/.github/workflows/dependency-update.yml b/.github/workflows/dependency-update.yml deleted file mode 100644 index e1afd70..0000000 --- a/.github/workflows/dependency-update.yml +++ /dev/null @@ -1,197 +0,0 @@ -name: Dependency Updates - -on: - pull_request: - paths: - - 'package.json' - types: [opened, synchronize] - schedule: - # Run monthly dependency checks - - cron: '0 4 1 * *' - workflow_dispatch: - -jobs: - update-bun-lockfile: - name: Update Bun Lockfile - runs-on: ubuntu-latest - if: | - github.event_name == 'pull_request' && - contains(github.event.pull_request.labels.*.name, 'dependencies') && - github.actor == 'dependabot[bot]' - - permissions: - contents: write - pull-requests: write - - steps: - - name: Checkout PR - uses: actions/checkout@v4 - with: - token: ${{ secrets.GITHUB_TOKEN }} - ref: ${{ github.event.pull_request.head.ref }} - - - name: Setup Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: '1.2.14' - - - name: Update Bun lockfile - run: | - # Remove any npm/yarn lockfiles if they exist - rm -f package-lock.json yarn.lock pnpm-lock.yaml - - # Update bun lockfile - bun install - - # Check if lockfile was modified - if git diff --quiet bun.lock; then - echo "No lockfile changes needed" - else - echo "Lockfile updated" - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" - git add bun.lock - git commit -m "chore: update bun.lock for dependency updates" - git push - fi - - check-outdated: - name: Check Outdated Dependencies - runs-on: ubuntu-latest - if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Setup Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: '1.2.14' - - - name: Check for outdated dependencies - run: | - echo "## 📦 Dependency Status Report" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "Generated on: $(date)" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - # Install dependencies first - bun install - - # Check for outdated packages - echo "### Checking for outdated dependencies..." >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - # Bun doesn't have a built-in outdated command, so we'll use npm-check-updates - if bunx npm-check-updates --format group >> $GITHUB_STEP_SUMMARY 2>&1; then - echo "All dependencies are up to date!" >> $GITHUB_STEP_SUMMARY - fi - - echo '```' >> $GITHUB_STEP_SUMMARY - - - name: Create issue if updates available - uses: actions/github-script@v7 - with: - script: | - const { data: issues } = await github.rest.issues.listForRepo({ - owner: context.repo.owner, - repo: context.repo.repo, - labels: 'dependencies,automated', - state: 'open' - }); - - const title = '📦 Weekly Dependency Update Report'; - const existingIssue = issues.find(issue => issue.title === title); - - const body = `## Dependency Update Report - - This is an automated report of available dependency updates. - - Please review the [workflow summary](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}) for details. - - ### Next Steps - 1. Review available updates - 2. Test compatibility with Bun - 3. Update dependencies as needed - - --- - *This issue was automatically created by the dependency update workflow.*`; - - if (existingIssue) { - await github.rest.issues.update({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: existingIssue.number, - body: body - }); - } else { - await github.rest.issues.create({ - owner: context.repo.owner, - repo: context.repo.repo, - title: title, - body: body, - labels: ['dependencies', 'automated'] - }); - } - - validate-bun-compatibility: - name: Validate Bun Compatibility - runs-on: ubuntu-latest - if: github.event_name == 'pull_request' - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Setup Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: '1.2.14' - - - name: Clean install - run: | - rm -rf node_modules - rm -f bun.lock - bun install - - - name: Verify Bun-specific features - run: | - echo "## 🐰 Bun Compatibility Check" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - # Check for Node.js specific imports that might not work with Bun - echo "### Checking for Node.js specific imports..." >> $GITHUB_STEP_SUMMARY - - PROBLEMATIC_IMPORTS=( - "node:cluster" - "node:async_hooks" - "node:trace_events" - "node:v8" - "node:vm" - "node:worker_threads" - ) - - FOUND_ISSUES=false - for import in "${PROBLEMATIC_IMPORTS[@]}"; do - if grep -r "$import" --include="*.ts" --include="*.js" --exclude-dir=node_modules . > /dev/null 2>&1; then - echo "⚠️ Found usage of $import which may have limited support in Bun" >> $GITHUB_STEP_SUMMARY - FOUND_ISSUES=true - fi - done - - if [ "$FOUND_ISSUES" = false ]; then - echo "✅ No problematic Node.js imports found!" >> $GITHUB_STEP_SUMMARY - fi - - echo "" >> $GITHUB_STEP_SUMMARY - echo "### Bun Version Info" >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - bun --version >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - - name: Test with Bun - run: | - bun test - bun run typecheck - bun run lint \ No newline at end of file diff --git a/.github/workflows/integration-test-workflow.yml b/.github/workflows/integration-test-workflow.yml index 0b9fb87..77c8ddd 100644 --- a/.github/workflows/integration-test-workflow.yml +++ b/.github/workflows/integration-test-workflow.yml @@ -2,9 +2,8 @@ name: Integration Tests on: workflow_call: - env: - BUN_VERSION: '1.2.14' + BUN_VERSION: 'latest' jobs: integration: @@ -14,40 +13,19 @@ jobs: fail-fast: false matrix: include: - # Test native binaries on their respective platforms - - os: ubuntu-latest - target: linux-x64 - artifact: mcp-docsrs-linux-x64 - - os: ubuntu-latest - target: linux-x64-musl - artifact: mcp-docsrs-linux-x64-musl - needs-docker: true - - os: ubuntu-24.04-arm + - os: ${{ vars.RUNNER }} target: linux-arm64 artifact: mcp-docsrs-linux-arm64 - - os: ubuntu-24.04-arm - target: linux-arm64-musl - artifact: mcp-docsrs-linux-arm64-musl - needs-docker: true - - os: macos-13 # Intel Mac for x64 binary - target: darwin-x64 - artifact: mcp-docsrs-darwin-x64 - - os: macos-latest # Apple Silicon for ARM64 binary - target: darwin-arm64 - artifact: mcp-docsrs-darwin-arm64 - - os: windows-latest - target: windows-x64 - artifact: mcp-docsrs-windows-x64 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Setup Bun uses: oven-sh/setup-bun@v2 with: bun-version: ${{ env.BUN_VERSION }} - no-cache: ${{ runner.os == 'Windows' }} # Disable cache on Windows due to issues + no-cache: ${{ runner.os == 'Windows' }} # Disable cache on Windows due to issues - name: Download artifact uses: actions/download-artifact@v4 @@ -59,19 +37,7 @@ jobs: if: matrix.os != 'windows-latest' run: chmod +x dist/${{ matrix.artifact }} - - name: Build MUSL test Docker image - if: matrix.needs-docker == true - run: docker build -t mcp-docsrs-musl-test -f test/integration/Dockerfile.musl-test . - - - name: Run integration tests (MUSL) - if: matrix.needs-docker == true - run: | - docker run --rm -v $PWD:/workspace mcp-docsrs-musl-test -c " - chmod +x /workspace/test/integration/test-musl.sh && - /workspace/test/integration/test-musl.sh /workspace/dist/${{ matrix.artifact }} ${{ matrix.target }} - " - - - name: Run integration tests (Native) + - name: Run integration tests if: matrix.needs-docker != true shell: bash run: | @@ -84,11 +50,8 @@ jobs: # Run integration test suite FLAGS="" - if [[ "${{ matrix.target }}" == *"-musl" ]]; then - FLAGS="--musl" - fi if [ "${{ matrix.os }}" == "windows-latest" ]; then FLAGS="$FLAGS --windows" fi - + bun test/integration/test-binary.ts "$EXECUTABLE" "${{ matrix.target }}" $FLAGS diff --git a/.github/workflows/pr-automation.yml b/.github/workflows/pr-automation.yml deleted file mode 100644 index a272f54..0000000 --- a/.github/workflows/pr-automation.yml +++ /dev/null @@ -1,128 +0,0 @@ -name: PR Automation - -on: - pull_request: - types: [opened, synchronize] - issues: - types: [opened] - schedule: - # Run weekly for stale checks - - cron: '0 0 * * 0' - -permissions: - contents: read - pull-requests: write - issues: write - -jobs: - auto-label: - name: Auto Label - runs-on: ubuntu-latest - if: github.event_name == 'pull_request' - - steps: - - name: Label based on files changed - uses: actions/labeler@v5 - with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" - sync-labels: true - - - name: Label based on PR size - uses: actions/github-script@v7 - with: - script: | - const { data: pr } = await github.rest.pulls.get({ - owner: context.repo.owner, - repo: context.repo.repo, - pull_number: context.issue.number - }); - - const additions = pr.additions; - const deletions = pr.deletions; - const total = additions + deletions; - - let sizeLabel = ''; - if (total < 10) { - sizeLabel = 'size/XS'; - } else if (total < 50) { - sizeLabel = 'size/S'; - } else if (total < 250) { - sizeLabel = 'size/M'; - } else if (total < 500) { - sizeLabel = 'size/L'; - } else { - sizeLabel = 'size/XL'; - } - - // Remove other size labels - const { data: labels } = await github.rest.issues.listLabelsOnIssue({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number - }); - - const sizeLabels = labels - .filter(label => label.name.startsWith('size/')) - .filter(label => label.name !== sizeLabel); - - for (const label of sizeLabels) { - await github.rest.issues.removeLabel({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number, - name: label.name - }).catch(() => {}); // Ignore if label doesn't exist - } - - // Add the appropriate size label - await github.rest.issues.addLabels({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number, - labels: [sizeLabel] - }).catch(() => {}); // Ignore if label already exists - - # Only welcome if they have 'first-time-contributor' label - # This prevents spam on every PR - welcome-first-time: - name: Welcome First Time Contributors - runs-on: ubuntu-latest - if: | - github.event_name == 'pull_request' && - github.event.action == 'opened' && - contains(github.event.pull_request.labels.*.name, 'first-time-contributor') - - steps: - - name: Welcome message - uses: actions/github-script@v7 - with: - script: | - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number, - body: `Welcome @${context.payload.pull_request.user.login}! 🎉 - - Thank you for your first contribution! A maintainer will review your PR soon.` - }); - - stale-check: - name: Mark Stale Items - runs-on: ubuntu-latest - if: github.event_name == 'schedule' - - steps: - - name: Mark stale issues and PRs - uses: actions/stale@v9 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: 'This issue has been inactive for 60 days.' - stale-pr-message: 'This PR has been inactive for 30 days.' - stale-issue-label: 'stale' - stale-pr-label: 'stale' - days-before-issue-stale: 60 - days-before-pr-stale: 30 - days-before-close: 14 - exempt-pr-labels: 'pinned,security,work-in-progress,full-ci,codeql' - exempt-issue-labels: 'pinned,security,bug' - operations-per-run: 30 \ No newline at end of file diff --git a/.github/workflows/pr-ci.yml b/.github/workflows/pr-ci.yml deleted file mode 100644 index f773732..0000000 --- a/.github/workflows/pr-ci.yml +++ /dev/null @@ -1,70 +0,0 @@ -name: PR Checks - -on: - pull_request: - types: [opened, synchronize, reopened] - paths-ignore: - - '**.md' - - 'docs/**' - - '.github/*.md' - - 'LICENSE' - - '.gitignore' - - '.vscode/**' - - '.claude/**' - -permissions: - contents: read - -jobs: - quick-check: - name: Quick PR Validation - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Setup Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: '1.2.14' - - - name: Cache dependencies - uses: actions/cache@v4 - with: - path: | - ~/.bun/install/cache - node_modules - key: ${{ runner.os }}-bun-pr-${{ hashFiles('**/bun.lock') }} - restore-keys: | - ${{ runner.os }}-bun-pr- - ${{ runner.os }}-bun- - - - name: Install dependencies - run: bun install --frozen-lockfile - - - name: Run linter - id: lint - run: bun run lint - - - name: Run type check - id: typecheck - run: bun run typecheck - - - name: Run tests - id: tests - run: bun test - - - name: Test build - id: build - run: bun run build:linux-x64 - - - name: Summary - if: always() - run: | - echo "## 📋 PR Check Summary" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "- **Linting**: ${{ steps.lint.outcome }}" >> $GITHUB_STEP_SUMMARY - echo "- **Type Check**: ${{ steps.typecheck.outcome }}" >> $GITHUB_STEP_SUMMARY - echo "- **Tests**: ${{ steps.tests.outcome }}" >> $GITHUB_STEP_SUMMARY - echo "- **Build**: ${{ steps.build.outcome }}" >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e3e578e..19d98bc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -31,14 +31,14 @@ permissions: jobs: prepare-release: name: Prepare Release - runs-on: ubuntu-latest + runs-on: ${{ vars.RUNNER }} outputs: version: ${{ steps.version.outputs.version }} changelog: ${{ steps.changelog.outputs.changelog }} steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} @@ -261,11 +261,11 @@ jobs: build-docker: name: Build Docker Images needs: [prepare-release, build-release] - runs-on: ubuntu-latest + runs-on: ${{ vars.RUNNER }} env: REGISTRY: ghcr.io IMAGE_NAME: ${{ github.repository }} - + strategy: matrix: include: @@ -276,7 +276,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -294,11 +294,11 @@ jobs: - name: Download binary artifact uses: actions/download-artifact@v4 with: - name: mcp-docsrs-linux-${{ matrix.arch }}-musl + name: mcp-docsrs-linux-${{ matrix.arch }}-arm64 path: dist/ - name: Make binary executable - run: chmod +x dist/mcp-docsrs-linux-${{ matrix.arch }}-musl + run: chmod +x dist/mcp-docsrs-linux-${{ matrix.arch }}-arm64 - name: Extract metadata id: meta @@ -319,14 +319,14 @@ jobs: tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: | - BINARY_NAME=mcp-docsrs-linux-${{ matrix.arch }}-musl + BINARY_NAME=mcp-docsrs-linux-${{ matrix.arch }}-arm64 cache-from: type=gha cache-to: type=gha,mode=max create-docker-manifest: name: Create Docker Multi-Arch Manifest needs: [prepare-release, build-docker] - runs-on: ubuntu-latest + runs-on: ${{ vars.RUNNER }} env: REGISTRY: ghcr.io IMAGE_NAME: ${{ github.repository }} @@ -342,28 +342,28 @@ jobs: - name: Create and push manifest run: | VERSION="${{ needs.prepare-release.outputs.version }}" - + # Create version-specific manifest docker manifest create ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${VERSION} \ ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:x64 \ ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:arm64 - + # Create latest manifest docker manifest create ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest \ ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:x64 \ ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:arm64 - + # Annotate manifests with architecture info docker manifest annotate ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${VERSION} \ ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:x64 --arch amd64 docker manifest annotate ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${VERSION} \ ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:arm64 --arch arm64 - + docker manifest annotate ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest \ ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:x64 --arch amd64 docker manifest annotate ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest \ ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:arm64 --arch arm64 - + # Push manifests docker manifest push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${VERSION} docker manifest push ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest @@ -371,11 +371,11 @@ jobs: create-release: name: Create GitHub Release needs: [prepare-release, build-release, create-docker-manifest] - runs-on: ubuntu-latest + runs-on: ${{ vars.RUNNER }} steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: ref: main fetch-depth: 0 @@ -423,8 +423,6 @@ jobs: |----------|--------------|------|----------| | 🐧 Linux | x64 | GLIBC | [mcp-docsrs-linux-x64](https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-linux-x64) | | 🐧 Linux | ARM64 | GLIBC | [mcp-docsrs-linux-arm64](https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-linux-arm64) | - | 🐧 Linux | x64 | MUSL | [mcp-docsrs-linux-x64-musl](https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-linux-x64-musl) | - | 🐧 Linux | ARM64 | MUSL | [mcp-docsrs-linux-arm64-musl](https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-linux-arm64-musl) | | 🍎 macOS | x64 | Intel | [mcp-docsrs-darwin-x64](https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-darwin-x64) | | 🍎 macOS | ARM64 | Apple Silicon | [mcp-docsrs-darwin-arm64](https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-darwin-arm64) | | 🪟 Windows | x64 | - | [mcp-docsrs-windows-x64.exe](https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-windows-x64.exe) | @@ -436,10 +434,10 @@ jobs: ```bash # Pull the latest image (multi-arch: x64 and ARM64) docker pull ghcr.io/${{ github.repository }}:latest - + # Or use a specific version docker pull ghcr.io/${{ github.repository }}:${VERSION} - + # Run the server docker run --rm -i ghcr.io/${{ github.repository }}:latest ``` @@ -458,18 +456,7 @@ jobs: curl -L https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-linux-x64 -o mcp-docsrs # ARM64 (AWS Graviton, Raspberry Pi 4+) curl -L https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-linux-arm64 -o mcp-docsrs - - chmod +x mcp-docsrs - ./mcp-docsrs --version - ``` - #### Linux (MUSL - Alpine, Docker, Static) - ```bash - # x64/AMD64 - curl -L https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-linux-x64-musl -o mcp-docsrs - # ARM64 - curl -L https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-linux-arm64-musl -o mcp-docsrs - chmod +x mcp-docsrs ./mcp-docsrs --version ``` @@ -480,7 +467,7 @@ jobs: curl -L https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-darwin-x64 -o mcp-docsrs # Apple Silicon (M1/M2/M3) curl -L https://github.com/${{ github.repository }}/releases/download/v${VERSION}/mcp-docsrs-darwin-arm64 -o mcp-docsrs - + chmod +x mcp-docsrs ./mcp-docsrs --version ``` @@ -528,8 +515,6 @@ jobs: files: | artifacts/mcp-docsrs-linux-x64/mcp-docsrs-linux-x64 artifacts/mcp-docsrs-linux-arm64/mcp-docsrs-linux-arm64 - artifacts/mcp-docsrs-linux-x64-musl/mcp-docsrs-linux-x64-musl - artifacts/mcp-docsrs-linux-arm64-musl/mcp-docsrs-linux-arm64-musl artifacts/mcp-docsrs-darwin-x64/mcp-docsrs-darwin-x64 artifacts/mcp-docsrs-darwin-arm64/mcp-docsrs-darwin-arm64 artifacts/mcp-docsrs-windows-x64/mcp-docsrs-windows-x64.exe @@ -549,4 +534,4 @@ jobs: echo "### Next Steps" >> $GITHUB_STEP_SUMMARY echo "1. Review the release notes" >> $GITHUB_STEP_SUMMARY echo "2. Test the released binaries and Docker images" >> $GITHUB_STEP_SUMMARY - echo "3. Announce the release" >> $GITHUB_STEP_SUMMARY \ No newline at end of file + echo "3. Announce the release" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml deleted file mode 100644 index cf762f5..0000000 --- a/.github/workflows/security.yml +++ /dev/null @@ -1,311 +0,0 @@ -name: Security Scanning - -on: - push: - branches: [ main ] - paths: - - 'src/**' - - 'package.json' - - 'bun.lock' - - '.github/workflows/security.yml' - schedule: - # Run security scan weekly on Monday at 2 AM UTC - - cron: '0 2 * * 1' - workflow_dispatch: - -permissions: - contents: read - security-events: write - -jobs: - dependency-audit: - name: Dependency Audit - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Setup Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: '1.2.14' - - - name: Install dependencies - run: bun install --frozen-lockfile - - - name: Run Bun audit - run: | - echo "## 🔒 Security Audit Report" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - # Note: Bun doesn't have a built-in audit command yet, so we use alternative tools - # Check with npm audit for known vulnerabilities (works with package.json) - if bunx better-npm-audit audit --level moderate > audit-results.txt 2>&1; then - echo "✅ No security vulnerabilities found!" >> $GITHUB_STEP_SUMMARY - else - echo "⚠️ Security vulnerabilities detected:" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - cat audit-results.txt >> $GITHUB_STEP_SUMMARY || true - echo '```' >> $GITHUB_STEP_SUMMARY - fi - continue-on-error: true - - - name: Check for known vulnerabilities - uses: aquasecurity/trivy-action@master - with: - scan-type: 'fs' - scan-ref: '.' - format: 'sarif' - output: 'trivy-results.sarif' - severity: 'CRITICAL,HIGH,MEDIUM' - ignore-unfixed: true - - - name: Upload Trivy results to GitHub Security - uses: github/codeql-action/upload-sarif@v3 - if: always() - with: - sarif_file: 'trivy-results.sarif' - - license-check: - name: License Compliance - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Setup Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: '1.2.14' - - - name: Install dependencies - run: bun install --frozen-lockfile - - - name: Check licenses - run: | - echo "## 📋 License Compliance Report" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - # Install license checker - bunx license-checker --summary --excludePrivatePackages > license-summary.txt - - # Display summary - echo "### License Summary" >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - cat license-summary.txt >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - # Check for problematic licenses - PROBLEMATIC_LICENSES="GPL|AGPL|LGPL|SSPL|EUPL" - if bunx license-checker --excludePrivatePackages --onlyAllow "MIT;Apache-2.0;BSD-2-Clause;BSD-3-Clause;ISC;CC0-1.0;CC-BY-3.0;CC-BY-4.0;Unlicense;WTFPL" > /dev/null 2>&1; then - echo "" >> $GITHUB_STEP_SUMMARY - echo "✅ All licenses are compatible!" >> $GITHUB_STEP_SUMMARY - else - echo "" >> $GITHUB_STEP_SUMMARY - echo "⚠️ Some licenses may need review" >> $GITHUB_STEP_SUMMARY - fi - continue-on-error: true - - semgrep: - name: Semgrep SAST - runs-on: ubuntu-latest - container: - image: semgrep/semgrep - if: (github.actor != 'dependabot[bot]') - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Run Semgrep CI - run: | - # Run Semgrep with local rules (no app token required) - semgrep ci \ - --config=auto \ - --config=p/security-audit \ - --config=p/typescript \ - --config=p/javascript \ - --config=p/nodejs \ - --config=p/owasp-top-ten \ - --json \ - --output=semgrep-results.json || true - - # Generate summary - echo "## 🔍 SAST Scan Results" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - if [ -f semgrep-results.json ]; then - FINDINGS=$(jq '.results | length' semgrep-results.json) - if [ "$FINDINGS" -eq 0 ]; then - echo "✅ No security issues found by Semgrep!" >> $GITHUB_STEP_SUMMARY - else - echo "⚠️ Found $FINDINGS potential security issues" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - # Show top 5 findings - echo "### Top Findings:" >> $GITHUB_STEP_SUMMARY - jq -r '.results[:5] | .[] | "- **\(.check_id)**: \(.path) (line \(.start.line))"' semgrep-results.json >> $GITHUB_STEP_SUMMARY 2>/dev/null || true - fi - fi - - - name: Generate SARIF - if: always() - run: | - # Convert to SARIF for GitHub Security tab - semgrep ci \ - --config=auto \ - --config=p/security-audit \ - --config=p/typescript \ - --config=p/javascript \ - --config=p/nodejs \ - --config=p/owasp-top-ten \ - --sarif \ - --output=semgrep.sarif || true - - - name: Upload SARIF - uses: github/codeql-action/upload-sarif@v3 - if: always() && hashFiles('semgrep.sarif') != '' - with: - sarif_file: semgrep.sarif - category: semgrep - - typescript-strict-checks: - name: TypeScript Security Checks - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Setup Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: '1.2.14' - - - name: Install dependencies - run: bun install --frozen-lockfile - - - name: TypeScript strict null checks - run: | - # Create a strict tsconfig for security checks - cat > tsconfig.strict.json << 'EOF' - { - "extends": "./tsconfig.json", - "compilerOptions": { - "strict": true, - "noImplicitAny": true, - "strictNullChecks": true, - "strictFunctionTypes": true, - "strictBindCallApply": true, - "strictPropertyInitialization": true, - "noImplicitThis": true, - "alwaysStrict": true, - "noUnusedLocals": true, - "noUnusedParameters": true, - "noImplicitReturns": true, - "noFallthroughCasesInSwitch": true, - "noUncheckedIndexedAccess": true - } - } - EOF - - # Run strict type checking - echo "## 🔒 TypeScript Strict Security Checks" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - if bunx tsc --project tsconfig.strict.json --noEmit; then - echo "✅ All TypeScript strict checks passed!" >> $GITHUB_STEP_SUMMARY - else - echo "⚠️ Some TypeScript strict checks failed. Review the output above." >> $GITHUB_STEP_SUMMARY - fi - - secrets-scan: - name: Secret Scanning - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Run Gitleaks - uses: gitleaks/gitleaks-action@v2 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Check for hardcoded secrets - run: | - echo "## 🔐 Secret Scanning Report" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - # Pattern checks for common secret patterns - PATTERNS=( - "password\s*=\s*[\"'][^\"']+[\"']" - "api[_-]?key\s*=\s*[\"'][^\"']+[\"']" - "token\s*=\s*[\"'][^\"']+[\"']" - "secret\s*=\s*[\"'][^\"']+[\"']" - "private[_-]?key" - ) - - FOUND_ISSUES=false - - for pattern in "${PATTERNS[@]}"; do - if grep -r -i -E "$pattern" --include="*.ts" --include="*.js" --include="*.json" --exclude-dir=node_modules --exclude-dir=dist . > /dev/null 2>&1; then - echo "⚠️ Potential secrets found matching pattern: $pattern" >> $GITHUB_STEP_SUMMARY - FOUND_ISSUES=true - fi - done - - if [ "$FOUND_ISSUES" = false ]; then - echo "✅ No hardcoded secrets detected!" >> $GITHUB_STEP_SUMMARY - fi - - dependency-review: - name: Dependency Review - runs-on: ubuntu-latest - if: github.event_name == 'pull_request' - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Dependency Review - uses: actions/dependency-review-action@v4 - with: - fail-on-severity: moderate - deny-licenses: GPL-3.0, AGPL-3.0, LGPL-3.0 - comment-summary-in-pr: always - - security-report: - name: Security Report Summary - runs-on: ubuntu-latest - needs: [dependency-audit, license-check, semgrep, typescript-strict-checks, secrets-scan] - if: always() - - steps: - - name: Generate Security Summary - run: | - echo "# 🛡️ Security Scan Summary" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "| Check | Status |" >> $GITHUB_STEP_SUMMARY - echo "|-------|--------|" >> $GITHUB_STEP_SUMMARY - echo "| Dependency Audit | ${{ needs.dependency-audit.result }} |" >> $GITHUB_STEP_SUMMARY - echo "| License Compliance | ${{ needs.license-check.result }} |" >> $GITHUB_STEP_SUMMARY - echo "| Semgrep SAST | ${{ needs.semgrep.result }} |" >> $GITHUB_STEP_SUMMARY - echo "| TypeScript Checks | ${{ needs.typescript-strict-checks.result }} |" >> $GITHUB_STEP_SUMMARY - echo "| Secrets Scan | ${{ needs.secrets-scan.result }} |" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - if [ "${{ needs.dependency-audit.result }}" != "success" ] || \ - [ "${{ needs.license-check.result }}" != "success" ] || \ - [ "${{ needs.semgrep.result }}" != "success" ] || \ - [ "${{ needs.typescript-strict-checks.result }}" != "success" ] || \ - [ "${{ needs.secrets-scan.result }}" != "success" ]; then - echo "⚠️ **Some security checks require attention**" >> $GITHUB_STEP_SUMMARY - else - echo "✅ **All security checks passed!**" >> $GITHUB_STEP_SUMMARY - fi \ No newline at end of file diff --git a/.github/workflows/test-workflow.yml b/.github/workflows/test-workflow.yml index 742b82b..485e6ef 100644 --- a/.github/workflows/test-workflow.yml +++ b/.github/workflows/test-workflow.yml @@ -9,9 +9,6 @@ on: default: false type: boolean -env: - BUN_VERSION: '1.2.14' - jobs: test: name: Test on ${{ matrix.os }} (Bun ${{ matrix.bun-version }}) @@ -19,12 +16,13 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - bun-version: ['1.2.14', 'latest'] + os: + - ${{ vars.RUNNER }} + bun-version: ['latest'] steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 @@ -32,10 +30,10 @@ jobs: uses: oven-sh/setup-bun@v2 with: bun-version: ${{ matrix.bun-version }} - no-cache: ${{ runner.os == 'Windows' }} # Disable cache on Windows due to issues + no-cache: ${{ runner.os == 'Windows' }} # Disable cache on Windows due to issues - name: Cache dependencies - uses: actions/cache@v4 + uses: ubicloud/cache@v4 with: path: | ~/.bun/install/cache @@ -50,22 +48,17 @@ jobs: - name: Run linter run: bun run lint - if: matrix.os == 'ubuntu-latest' && matrix.bun-version == '1.2.14' - - name: Run type check run: bun run typecheck - - name: Run tests run: bun test --coverage env: LOG_EXPECTED_ERRORS: ${{ inputs.debug_enabled }} - - name: Upload coverage reports - if: matrix.os == 'ubuntu-latest' && matrix.bun-version == '1.2.14' uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} files: ./coverage/lcov.info flags: unittests name: codecov-umbrella - fail_ci_if_error: false \ No newline at end of file + fail_ci_if_error: false diff --git a/.markdownlint.json b/.markdownlint.json new file mode 100644 index 0000000..ec552bc --- /dev/null +++ b/.markdownlint.json @@ -0,0 +1,6 @@ +{ + "MD033": false, + "MD013": false, + "MD022": false, + "MD034": false +} \ No newline at end of file diff --git a/.mcp.json b/.mcp.json new file mode 100644 index 0000000..4b5ced8 --- /dev/null +++ b/.mcp.json @@ -0,0 +1,49 @@ +{ + "mcpServers": { + "context7": { + "type": "sse", + "url": "https://mcp.context7.com/sse" + }, + "mcp-deepwiki": { + "type": "stdio", + "command": "npx", + "args": [ + "-y", + "mcp-deepwiki@latest" + ], + "env": {} + }, + "mcp-interactive": { + "type": "stdio", + "command": "npx", + "args": [ + "-y", + "interactive-mcp@1.9.0" + ], + "env": {} + }, + "sequential-thinking": { + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-sequential-thinking" + ] + }, + "memory": { + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-memory" + ] + }, + "mcp-docsrs": { + "type": "stdio", + "command": "./dist/mcp-docsrs", + "args": [ + "--cache-ttl=7200000", + "--max-cache-size=200", + "--db-path=./.cache" + ] + } + } +} \ No newline at end of file diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000..c6ad5ad --- /dev/null +++ b/.prettierignore @@ -0,0 +1,5 @@ +.DS_Store +.licenses/ +dist/ +node_modules/ +coverage/ diff --git a/.prettierrc.yml b/.prettierrc.yml new file mode 100644 index 0000000..0a60144 --- /dev/null +++ b/.prettierrc.yml @@ -0,0 +1,16 @@ +# See: https://prettier.io/docs/en/configuration + +printWidth: 120 +tabWidth: 2 +useTabs: false +semi: true +singleQuote: true +quoteProps: as-needed +jsxSingleQuote: false +trailingComma: all +bracketSpacing: true +bracketSameLine: true +arrowParens: always +proseWrap: always +htmlWhitespaceSensitivity: css +endOfLine: lf diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..81d59fb --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,13 @@ +{ + "editor.defaultFormatter": "biomejs.biome", + "editor.formatOnPaste": true, + "editor.formatOnSave": true, + "biome.lsp.trace.server": "verbose", + "biome.requireConfiguration": true, + "biome.suggestInstallingGlobally": false, + "trailing-spaces.trimOnSave": true, + "typescript.tsdk": "node_modules/typescript/lib", + "[shellscript]": { + "editor.defaultFormatter": "foxundermoon.shell-format" + } +} \ No newline at end of file diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..0a194ed --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,129 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. Claude needs to ultrathink and be very precise and detailed. + +## Claude Instructions + +- You are a senior software engineer with expertise in TypeScript, Node.js ecosystem, API integrations and Bun. +- Always prioritize code quality, type safety, and maintainability. +- You are working on an MCP (Model Context Protocol) server that interfaces with docs.rs. +- You are using Bun as the runtime and package manager and build tool. +- You are building a tool that helps AI assistants access Rust documentation. +- If not specified by the user, always ask if you should spawn multiple subagents that work in parallel to complete the tasks faster before beginning the task. +- Instruct your subagents to use their respective task plans to complete the tasks from the plans directory if they are available. +- Instead of assuming bun commands look up the bun documentation: https://bun.sh/docs/cli/(run, init, add, update, publish, etc.), https://bun.sh/docs/api/(http, fetch, etc.) and https://bun.sh/docs/bundler + +## Development Process for New Features + +When developing a new feature, follow this test-driven development approach: + +1. **Plan and Research Phase** + - Create a rough plan and concept for the feature + - Think deeply about the implementation approach + - Use web search extensively to research: + - Appropriate libraries to use + - Best practices for the feature domain + - API documentation for chosen libraries + - Similar implementations for reference + - Document your findings and decisions + +2. **Test Implementation Phase** + - Write thorough unit and integration tests FIRST based on: + - The API documentation of chosen libraries + - Expected behavior of the feature + - Edge cases and error scenarios + - Ensure tests are comprehensive and cover all planned functionality + - Tests should be written to match our existing test suite patterns + +3. **Feature Implementation Phase** + - Only after tests are complete, start writing the actual implementation + - Write code to satisfy the tests you've created + - Let the tests guide your implementation + - **Never write software first and then tests later** + +This test-driven approach ensures better design, more reliable code, and easier maintenance. + +## Project Overview + +This is an MCP server implementation that provides access to Rust crate documentation via the docs.rs JSON API. The project enables AI assistants to fetch and parse Rust documentation, making it easier to provide accurate information about Rust crates, their APIs, and usage. The server includes intelligent caching and supports fetching documentation for specific crate versions and items. + +## Essential Commands + +- **Format code**: `bun run lint:fix` +- **Build project**: + - `bun run build` (current platform) + - `bun run build:all` (all 7 platforms, all with bytecode for fast startup) + - `bun run build:bytecode` (standalone bytecode build, requires Bun runtime - for development only) + - Platform-specific: `bun run build:linux-x64`, `build:linux-arm64`, `build:darwin-x64`, `build:darwin-arm64`, `build:windows-x64` (all include bytecode) +- **Run tests**: `bun test` or `bun test --watch` +- **Run linter**: `bun run lint` +- **Type check**: `bun run typecheck` +- **Run specific test**: `bun test test_name` +- **Run with verbose output**: `DEBUG=mcp:* bun run src/cli.ts` +- **Clean cache**: `rm -rf ~/.mcp-docsrs/cache.db` (Adjust to the correct dbPath directory. The cache.db file will be created inside the specified directory. Check possible ENV DB_PATH, .env files and if not present ask the user for one to set it. Recommend ":memory:" for in-memory cache) +- **Check build sizes**: `bun run check:sizes` (run after `bun run build:all`) + - Use this to update the Build Output table in README.md with accurate sizes + - Follow the tip in the output: copy the generated table and replace the existing one in README.md's "Build Output" section + +## Running the Application + +- **Development**: `bun run src/cli.ts` +- **Interactive testing**: run `bun inspector` and let the user test the server and report back to you. Use this ONLY if you are sure that you need the user to test the server and when you cannot test it yourself. + +## Important Guidelines + +- Ensure EVERY and ALL tests pass with `bun test` not just the ones you are working on. +- Fix IDE errors and warnings by using the IDE Diagnostics MCP +- Use descriptive variable and function names following TypeScript conventions +- Document logic with comments in the code +- Prefer Bun's built-in APIs over Node.js equivalents when available +- Handle errors gracefully with proper error types from `src/errors.ts` +- Use functional programming style with arrow functions instead of classes +- Use TypeScript types instead of interfaces + +## Configuration Files + +- **Package Manager**: Bun (see `package.json`) +- **TypeScript**: `tsconfig.json` (strict mode, ES2024 target) +- **Formatting/Linting**: `biome.json` (2 tabs indentation, 100 char width) +- **Cache Storage**: Default in `~/.mcp-docsrs/cache.db` file (specify directory path, cache.db will be created automatically) +- **Build Outputs**: Executables in `dist/` directory + +## Available MCP Servers + +- mcp-interactive to ask questions about the task to the user. Do not use interactive mode if the user is on macOS since it is currently broken. +- context7 to retrieve in repository documentation files from many external repos. If its not there you can also ask the user to add it manually. Use it to answer questions about external dependencies first. +- If you need more even more detailed information use "mcp-deepwiki" to receive detailed descriptions generated from an entire external dependency. + +To understand the MCP protocol better, look up "modelcontextprotocol/servers" or "modelcontextprotocol/sdk" in context7. + +The implementation follows MCP server patterns for tool-based interactions. + +### Key components + +- `tools/`: Tools for the MCP server +- `server.ts`: MCP server implementation +- `cache.ts`: LRU cache with SQLite persistence for API responses +- `docs-fetcher.ts`: HTTP client for docs.rs JSON API +- `rustdoc-parser.ts`: Parser for rustdoc JSON format +- `types.ts`: Zod schemas and TypeScript types +- `errors.ts`: Error handling and logging +- `cli.ts`: CLI entry point +- `index.ts`: Entry point for the MCP server + +The project uses the official `@modelcontextprotocol/sdk` for MCP protocol implementation. + +## Important Instruction Reminders + +- Do what has been asked; nothing more, nothing less. Do not deviate from the instructions. +- ALWAYS create a copy of the file you are editing before making changes and name it with the suffix `-new.(ts, json, etc.)`. Then ask the user if you should keep the new or the original implementation and list the changes that you made. If you are keeping the new file, delete the original file and rename the new file to the original file name, essentially removing -new from the file name. +- NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User. +- At the end of any task, make sure to lint, typecheck, run tests and finally build the code using `build:all`. +- ALWAYS check file sizes after building with `ls -lh dist/` and update the README.md Build Output table if sizes have changed. + +## Memories + +- Bun uses bun.lock now, not bun.lockb anymore +- For testing the MCP server functionality, always use the "tinc" crate at version "0.1.6" as it has rustdoc JSON available +- For testing with another library, use "clap" which also has rustdoc JSON available +- Do NOT use "serde" for testing as it doesn't have rustdoc JSON available yet diff --git a/CLAUDE.md b/CLAUDE.md new file mode 120000 index 0000000..47dc3e3 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +AGENTS.md \ No newline at end of file diff --git a/LICENSE b/LICENSE index 3c3f37f..261eeb9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,201 @@ -MIT License - -Copyright (c) 2025 Cartera Mesh - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..5bdab12 --- /dev/null +++ b/README.md @@ -0,0 +1,507 @@ +# 🦀 MCP Rust Docs Server + +[![MCP Protocol](https://img.shields.io/badge/MCP-Model%20Context%20Protocol-blue?style=for-the-badge)](https://modelcontextprotocol.io) +[![Rust Docs](https://img.shields.io/badge/docs.rs-Documentation-orange?style=for-the-badge&logo=rust)](https://docs.rs) +[![Bun](https://img.shields.io/badge/Bun-1.2.14%2B-black?style=for-the-badge&logo=bun)](https://bun.sh) + +>A **Model Context Protocol** (MCP) server for **fetching Rust crate documentation** from [docs.rs](https://docs.rs) using the **rustdoc JSON API** + +[Features](#features) • [Installation](#installation) • [Usage](#usage) • [Building](#building) • [Development](#development) • [Notes](#notes) • [Contributing](#contributing) • [License](#license) + +## ✨ Features + + +- 🚀 **Fast Documentation Fetching** - Direct access to rustdoc JSON API for comprehensive crate documentation +- 🔍 **Item-Level Lookup** - Query specific structs, functions, traits, and more within crates +- 💾 **Smart Caching** - Built-in LRU cache with SQLite backend for optimal performance +- 🎯 **Version Support** - Fetch docs for specific versions or use semver ranges +- 🖥️ **Cross-Platform** - Standalone executables for Linux, macOS, and Windows +- 📦 **Zero Dependencies** - Single executable with everything bundled +- 🔧 **TypeScript** - Full type safety with modern ES modules +- 🗜️ **Compression Support** - Automatic Zstd decompression for efficient data transfer + +## 📦 Installation + + +### Using Bun + +```bash +bun install +bun run build:bytecode # or bun run build:all for all platforms +``` + +### Using Pre-built Executables + +Download the latest release for your platform from the [Releases](https://github.com/cateramesh/mcp-docsrs/releases) page: + +#### Linux + +- **x64/AMD64 (GLIBC)**: `mcp-docsrs-linux-x64` - For Ubuntu, Debian, Fedora, etc. +- **ARM64 (GLIBC)**: `mcp-docsrs-linux-arm64` - For ARM64 systems, AWS Graviton + +#### macOS + +- **Intel**: `mcp-docsrs-darwin-x64` - For Intel-based Macs +- **Apple Silicon**: `mcp-docsrs-darwin-arm64` - For M1/M2/M3 Macs + +#### Windows + +- **x64**: `mcp-docsrs-windows-x64.exe` - For 64-bit Windows + +### Using Docker + +Pull and run the latest multi-arch image (supports both x64 and ARM64): + +```bash +# Pull the latest image +docker pull ghcr.io/cateramesh/mcp-docsrs:latest + +# Run the server +docker run --rm -i ghcr.io/cateramesh/mcp-docsrs:latest + +# Run with custom configuration +docker run --rm -i ghcr.io/cateramesh/mcp-docsrs:latest \ + --cache-ttl 7200000 --max-cache-size 200 +``` + +Available tags: + +- `latest` - Latest stable release (multi-arch) +- `v1.0.0` - Specific version (multi-arch) +- `x64` - Latest x64/AMD64 build +- `arm64` - Latest ARM64 build + +## 🚀 Usage + + +### Starting the Server + +#### Using npm or Bun + +```bash +# Production mode +npm start +# or +bun start + +# Development mode with hot reload +npm run dev +# or +bun run dev +``` + +#### Using Executable + +```bash +# Show help +mcp-docsrs --help + +# Run with default settings +mcp-docsrs + +# Run with custom configuration +mcp-docsrs --cache-ttl 7200000 --max-cache-size 200 +``` + +### 🛠️ Available Tools + +#### `lookup_crate_docs` + +Fetches comprehensive documentation for an entire Rust crate. + +**Parameters:** + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| `crateName` | string | ✅ | Name of the Rust crate | +| `version` | string | ❌ | Specific version or semver range (e.g., "1.0.0", "~4") | +| `target` | string | ❌ | Target platform (e.g., "i686-pc-windows-msvc") | +| `formatVersion` | string | ❌ | Rustdoc JSON format version | + +**Example:** + +```json +{ + "tool": "lookup_crate_docs", + "arguments": { + "crateName": "serde", + "version": "latest" + } +} +``` + +#### `lookup_item_docs` + +Fetches documentation for a specific item within a crate. + +**Parameters:** + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| `crateName` | string | ✅ | Name of the Rust crate | +| `itemPath` | string | ✅ | Path to the item (e.g., "struct.MyStruct", "fn.my_function") | +| `version` | string | ❌ | Specific version or semver range | +| `target` | string | ❌ | Target platform | + +**Example:** + +```json +{ + "tool": "lookup_item_docs", + "arguments": { + "crateName": "tokio", + "itemPath": "runtime.Runtime" + } +} +``` + +#### `search_crates` + +Search for Rust crates on crates.io with fuzzy/partial name matching. + +**Parameters:** + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| `query` | string | ✅ | Search query for crate names (supports partial matches) | +| `limit` | number | ❌ | Maximum number of results to return (default: 10) | + +**Example:** + +```json +{ + "tool": "search_crates", + "arguments": { + "query": "serde", + "limit": 5 + } +} +``` + +### 📊 Resources + +The server provides resources for querying and inspecting the cache database: + +#### `cache://stats` + +Returns cache statistics including total entries, size, and oldest entry. + +**Example:** + +```json +{ + "totalEntries": 42, + "totalSize": 1048576, + "oldestEntry": "2024-01-15T10:30:00.000Z" +} +``` + +#### `cache://entries?limit={limit}&offset={offset}` + +Lists cached entries with metadata. Supports pagination. + +**Parameters:** + +- `limit` - Number of entries to return (default: 100) +- `offset` - Number of entries to skip (default: 0) + +**Example:** + +```json +[ + { + "key": "serde/latest/x86_64-unknown-linux-gnu", + "timestamp": "2024-01-15T14:20:00.000Z", + "ttl": 3600000, + "expiresAt": "2024-01-15T15:20:00.000Z", + "size": 524288 + } +] +``` + +#### `cache://query?sql={sql}` + +Execute SQL queries on the cache database (SELECT queries only for safety). + +**Example:** + +```sql +cache://query?sql=SELECT key, timestamp FROM cache WHERE key LIKE '%tokio%' ORDER BY timestamp DESC +``` + +**Note:** SQL queries in the URI should be URL-encoded. The server will automatically decode them. + +#### `cache://config` + +Returns the current server configuration including all runtime parameters. + +**Example response:** + +```json +{ + "cacheTtl": 7200000, + "maxCacheSize": 200, + "requestTimeout": 30000, + "dbPath": "/Users/vexx/Repos/mcp-docsrs/.cache" +} +``` + +### ⚙️ Configuration + +Configure the server using environment variables or command-line arguments: + +| Variable | CLI Flag | Default | Description | +|----------|----------|---------|-------------| +| `CACHE_TTL` | `--cache-ttl` | 3600000 | Cache time-to-live in milliseconds | +| `MAX_CACHE_SIZE` | `--max-cache-size` | 100 | Maximum number of cached entries | +| `REQUEST_TIMEOUT` | `--request-timeout` | 30000 | HTTP request timeout in milliseconds | +| `DB_PATH` | `--db-path` | :memory: | Path to SQLite database file (use `:memory:` for in-memory) | + +**Example:** + +```bash +# Environment variables +CACHE_TTL=7200000 MAX_CACHE_SIZE=200 npm start + +# Command-line arguments (executable) +./mcp-docsrs --cache-ttl 7200000 --max-cache-size 200 + +# Use persistent database to cache documentation between sessions +./mcp-docsrs --db-path ~/.mcp-docsrs + +# Or with environment variable +DB_PATH=~/.mcp-docsrs npm start +``` + +### 🔌 MCP Configuration + +Add to your MCP configuration file: + +```json +{ + "mcpServers": { + "rust-docs": { + "command": "node", + "args": ["/path/to/mcp-docsrs/dist/index.js"] + } + } +} +``` + +Or using the executable: + +```json +{ + "mcpServers": { + "rust-docs": { + "command": "/path/to/mcp-docsrs" + } + } +} +``` + +Or using Docker: + +```json +{ + "mcpServers": { + "rust-docs": { + "command": "docker", + "args": ["run", "--rm", "-i", "ghcr.io/cateramesh/mcp-docsrs:latest"] + } + } +} +``` + + + +## 🏗️ Building + + +### Prerequisites + +- Bun v1.2.14 or later +- macOS, Linux, or Windows + +### Build Commands + +```bash +# Build for current platform +bun run build + +# Build with bytecode compilation (standalone, requires Bun runtime) +bun run build:bytecode + +# Build for all platforms (7 targets, all with bytecode for fast startup) +bun run build:all + +# Linux builds (GLIBC - standard) +bun run build:linux-x64 # Linux x64/AMD64 +bun run build:linux-arm64 # Linux ARM64 + +# macOS builds +bun run build:darwin-x64 # macOS Intel +bun run build:darwin-arm64 # macOS Apple Silicon + +# Windows build +bun run build:windows-x64 # Windows x64 +``` + +### Build Output + +All executables are created in the `dist/` directory with bytecode compilation for fast startup: + +| File | Platform | Type | Size | +|------|----------|------|------| +| `mcp-docsrs-linux-x64` | Linux x64/AMD64 | GLIBC + Bytecode | 99MB | +| `mcp-docsrs-linux-arm64` | Linux ARM64 | GLIBC + Bytecode | 93MB | +| `mcp-docsrs-darwin-x64` | macOS Intel | Bytecode | 64MB | +| `mcp-docsrs-darwin-arm64` | macOS Apple Silicon | Bytecode | 58MB | +| `mcp-docsrs-windows-x64.exe` | Windows x64 | Bytecode | 113MB | + + + +## 👨‍💻 Development + + +### Development Workflow + +```bash +# Install dependencies +bun install + +# Run in development mode +bun run dev + +# Run tests +bun test + +# Lint code +bun run lint + +# Type checking +bun run typecheck + +# Check build sizes (updates README table) +bun run check:sizes # Run after building +``` + +### Testing + +The project includes comprehensive tests for all major components: + +```bash +# Run all tests +bun test + +# Run tests in watch mode +bun test --watch + +# Run specific test file +bun test cache.test.ts + +# Run tests with full error logging (including expected errors) +LOG_EXPECTED_ERRORS=true bun test +``` + +#### Test Output + +Tests are configured to provide clean output by default: + +- ✅ Expected errors (like `CrateNotFoundError` in 404 tests) show as green checkmarks: `✓ Expected CrateNotFoundError thrown` +- ❌ Unexpected errors are shown with full stack traces in red +- ℹ️ Info logs are shown to track test execution + +This makes it easy to distinguish between: + +- Tests that verify error handling (expected errors) +- Actual test failures (unexpected errors) + +To see full error details for debugging, set `LOG_EXPECTED_ERRORS=true`. + +### Project Structure + +```text +mcp-docsrs/ +├── src/ # Source code +│ ├── index.ts # CLI entry point with argument parsing +│ ├── index.ts # MCP server entry point +│ ├── server.ts # MCP server implementation with tool/resource handlers +│ ├── cache.ts # LRU cache with SQLite persistence +│ ├── docs-fetcher.ts # HTTP client for docs.rs JSON API +│ ├── rustdoc-parser.ts # Parser for rustdoc JSON format +│ ├── errors.ts # Custom error types and error handling +│ ├── types.ts # TypeScript types and Zod schemas +│ └── tools/ # MCP tool implementations +│ ├── index.ts # Tool exports and registration +│ ├── lookup-crate.ts # Fetch complete crate documentation +│ ├── lookup-item.ts # Fetch specific item documentation +│ └── search-crates.ts # Search crates on crates.io +├── test/ # Test files +│ ├── cache.test.ts # Cache functionality tests +│ ├── cache-status.test.ts # Cache status and metrics tests +│ ├── docs-fetcher.test.ts # API client tests +│ ├── integration.test.ts # End-to-end integration tests +│ ├── persistent-cache.test.ts # SQLite cache persistence tests +│ ├── rustdoc-parser.test.ts # JSON parser tests +│ └── search-crates.test.ts # Crate search tests +├── scripts/ # Development and testing scripts +│ ├── test-crates-search.ts # Manual crate search testing +│ ├── test-mcp.ts # MCP server testing +│ ├── test-persistent-cache.ts # Cache persistence testing +│ ├── test-resources.ts # Resource endpoint testing +│ └── test-zstd.ts # Zstandard compression testing +├── plans/ # Project planning documents +│ └── feature-recommendations.md # Future feature ideas +├── dist/ # Build output (platform executables) +├── .github/ # GitHub Actions workflows +│ ├── workflows/ # CI/CD pipeline definitions +│ └── ... # Various automation configs +├── CLAUDE.md # AI assistant instructions +├── README.md # Project documentation +├── LICENSE # Apache 2.0 license +├── package.json # Project dependencies and scripts +├── tsconfig.json # TypeScript configuration +├── biome.json # Code formatter/linter config +└── bun.lock # Bun package lock file +``` + + + +## 📝 Notes + + +- 📅 The rustdoc JSON feature on docs.rs started on **2025-05-23**, so releases before that date won't have JSON available +- 🔄 The server automatically handles redirects and format version compatibility +- ⚡ Cached responses significantly improve performance for repeated lookups +- 📦 Built executables include all dependencies - no runtime installation required + +## 🤝 Contributing + + +Contributions are welcome! Please feel free to submit a Pull Request. + +1. Fork the repository +2. Create your feature branch (`git checkout -b feature/amazing-feature`) +3. Commit your changes (`git commit -m 'Add some amazing feature'`) +4. Push to the branch (`git push origin feature/amazing-feature`) +5. Open a Pull Request + + + +## 🙏 Acknowledgments + +- [docs.rs](https://docs.rs) for providing the Rust documentation API +- [Model Context Protocol](https://github.com/modelcontextprotocol) for the MCP specification +- The Rust community for excellent documentation standards + +## 📄 License + + +This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details. + +--- + +Made with ❤️ for the Rust community + +[Report Bug](https://github.com/your-repo/issues) • [Request Feature](https://github.com/your-repo/issues) diff --git a/biome.json b/biome.json new file mode 100644 index 0000000..14a4d37 --- /dev/null +++ b/biome.json @@ -0,0 +1,105 @@ +{ + "$schema": "https://biomejs.dev/schemas/2.0.0-beta.6/schema.json", + "vcs": { + "enabled": true, + "clientKind": "git", + "useIgnoreFile": true + }, + "files": { + "ignoreUnknown": false, + "includes": [ + "**", + "!**/dist/**", + "!**/src/varint.ts", + "!**/src/utf8.ts", + "!**/src/types.ts", + "!**/src/helpers.ts", + "!**/src/extern.ts", + "!**/src/decimals.ts", + "!**/src/binary.ts" + ] + }, + "formatter": { + "enabled": true, + "indentStyle": "space", + "indentWidth": 2 + }, + "assist": { + "actions": { + "source": { + "organizeImports": "on" + } + } + }, + "linter": { + "enabled": true, + "rules": { + "recommended": true, + "correctness": { + "noConstantMathMinMaxClamp": "error", + "noUndeclaredVariables": "error", + "noUnusedImports": "error", + "noUnusedFunctionParameters": "error", + "noUnusedPrivateClassMembers": "error", + "useExhaustiveDependencies": { + "level": "error", + "options": { + "reportUnnecessaryDependencies": false + } + }, + "noUnusedVariables": "error" + }, + "style": { + "noParameterProperties": "error", + "noYodaExpression": "error", + "useConsistentBuiltinInstantiation": "error", + "useFragmentSyntax": "error", + "useShorthandAssign": "error", + "noNonNullAssertion": "off", + "noParameterAssign": "error", + "useAsConstAssertion": "error", + "useDefaultParameterLast": "error", + "useEnumInitializers": "error", + "useSelfClosingElements": "error", + "useSingleVarDeclarator": "error", + "noUnusedTemplateLiteral": "error", + "useNumberNamespace": "error", + "noInferrableTypes": "error", + "noUselessElse": "error", + "useArrayLiterals": "error" + }, + "suspicious": { + "useAwait": "error", + "noEvolvingTypes": "error", + "noExplicitAny": "off" + }, + "complexity": { + "noUselessStringConcat": "error", + "noUselessUndefinedInitialization": "error", + "noVoid": "error", + "useDateNow": "error", + "noBannedTypes": "off", + "noForEach": "off" + } + } + }, + "javascript": { + "globals": ["Bun"], + "formatter": { + "arrowParentheses": "always", + "quoteStyle": "double", + "bracketSameLine": false, + "semicolons": "asNeeded", + "bracketSpacing": true, + "trailingCommas": "none", + "quoteProperties": "asNeeded", + "enabled": true, + "attributePosition": "auto", + "indentWidth": 2, + "indentStyle": "space", + "jsxQuoteStyle": "double", + "lineEnding": "lf", + "lineWidth": 100 + } + } +} diff --git a/bun.lock b/bun.lock new file mode 100644 index 0000000..647c929 --- /dev/null +++ b/bun.lock @@ -0,0 +1,234 @@ +{ + "lockfileVersion": 1, + "configVersion": 0, + "workspaces": { + "": { + "name": "mcp-rust-docs", + "dependencies": { + "@modelcontextprotocol/sdk": "latest", + "fzstd": "latest", + "zod": "latest", + }, + "devDependencies": { + "@biomejs/biome": "latest", + "@types/bun": "latest", + "typescript": "latest", + }, + }, + }, + "packages": { + "@biomejs/biome": ["@biomejs/biome@2.3.10", "", { "optionalDependencies": { "@biomejs/cli-darwin-arm64": "2.3.10", "@biomejs/cli-darwin-x64": "2.3.10", "@biomejs/cli-linux-arm64": "2.3.10", "@biomejs/cli-linux-arm64-musl": "2.3.10", "@biomejs/cli-linux-x64": "2.3.10", "@biomejs/cli-linux-x64-musl": "2.3.10", "@biomejs/cli-win32-arm64": "2.3.10", "@biomejs/cli-win32-x64": "2.3.10" }, "bin": { "biome": "bin/biome" } }, "sha512-/uWSUd1MHX2fjqNLHNL6zLYWBbrJeG412/8H7ESuK8ewoRoMPUgHDebqKrPTx/5n6f17Xzqc9hdg3MEqA5hXnQ=="], + + "@biomejs/cli-darwin-arm64": ["@biomejs/cli-darwin-arm64@2.3.10", "", { "os": "darwin", "cpu": "arm64" }, "sha512-M6xUjtCVnNGFfK7HMNKa593nb7fwNm43fq1Mt71kpLpb+4mE7odO8W/oWVDyBVO4ackhresy1ZYO7OJcVo/B7w=="], + + "@biomejs/cli-darwin-x64": ["@biomejs/cli-darwin-x64@2.3.10", "", { "os": "darwin", "cpu": "x64" }, "sha512-Vae7+V6t/Avr8tVbFNjnFSTKZogZHFYl7MMH62P/J1kZtr0tyRQ9Fe0onjqjS2Ek9lmNLmZc/VR5uSekh+p1fg=="], + + "@biomejs/cli-linux-arm64": ["@biomejs/cli-linux-arm64@2.3.10", "", { "os": "linux", "cpu": "arm64" }, "sha512-hhPw2V3/EpHKsileVOFynuWiKRgFEV48cLe0eA+G2wO4SzlwEhLEB9LhlSrVeu2mtSn205W283LkX7Fh48CaxA=="], + + "@biomejs/cli-linux-arm64-musl": ["@biomejs/cli-linux-arm64-musl@2.3.10", "", { "os": "linux", "cpu": "arm64" }, "sha512-B9DszIHkuKtOH2IFeeVkQmSMVUjss9KtHaNXquYYWCjH8IstNgXgx5B0aSBQNr6mn4RcKKRQZXn9Zu1rM3O0/A=="], + + "@biomejs/cli-linux-x64": ["@biomejs/cli-linux-x64@2.3.10", "", { "os": "linux", "cpu": "x64" }, "sha512-wwAkWD1MR95u+J4LkWP74/vGz+tRrIQvr8kfMMJY8KOQ8+HMVleREOcPYsQX82S7uueco60L58Wc6M1I9WA9Dw=="], + + "@biomejs/cli-linux-x64-musl": ["@biomejs/cli-linux-x64-musl@2.3.10", "", { "os": "linux", "cpu": "x64" }, "sha512-QTfHZQh62SDFdYc2nfmZFuTm5yYb4eO1zwfB+90YxUumRCR171tS1GoTX5OD0wrv4UsziMPmrePMtkTnNyYG3g=="], + + "@biomejs/cli-win32-arm64": ["@biomejs/cli-win32-arm64@2.3.10", "", { "os": "win32", "cpu": "arm64" }, "sha512-o7lYc9n+CfRbHvkjPhm8s9FgbKdYZu5HCcGVMItLjz93EhgJ8AM44W+QckDqLA9MKDNFrR8nPbO4b73VC5kGGQ=="], + + "@biomejs/cli-win32-x64": ["@biomejs/cli-win32-x64@2.3.10", "", { "os": "win32", "cpu": "x64" }, "sha512-pHEFgq7dUEsKnqG9mx9bXihxGI49X+ar+UBrEIj3Wqj3UCZp1rNgV+OoyjFgcXsjCWpuEAF4VJdkZr3TrWdCbQ=="], + + "@hono/node-server": ["@hono/node-server@1.19.7", "", { "peerDependencies": { "hono": "^4" } }, "sha512-vUcD0uauS7EU2caukW8z5lJKtoGMokxNbJtBiwHgpqxEXokaHCBkQUmCHhjFB1VUTWdqj25QoMkMKzgjq+uhrw=="], + + "@modelcontextprotocol/sdk": ["@modelcontextprotocol/sdk@1.25.1", "", { "dependencies": { "@hono/node-server": "^1.19.7", "ajv": "^8.17.1", "ajv-formats": "^3.0.1", "content-type": "^1.0.5", "cors": "^2.8.5", "cross-spawn": "^7.0.5", "eventsource": "^3.0.2", "eventsource-parser": "^3.0.0", "express": "^5.0.1", "express-rate-limit": "^7.5.0", "jose": "^6.1.1", "json-schema-typed": "^8.0.2", "pkce-challenge": "^5.0.0", "raw-body": "^3.0.0", "zod": "^3.25 || ^4.0", "zod-to-json-schema": "^3.25.0" }, "peerDependencies": { "@cfworker/json-schema": "^4.1.1" }, "optionalPeers": ["@cfworker/json-schema"] }, "sha512-yO28oVFFC7EBoiKdAn+VqRm+plcfv4v0xp6osG/VsCB0NlPZWi87ajbCZZ8f/RvOFLEu7//rSRmuZZ7lMoe3gQ=="], + + "@types/bun": ["@types/bun@1.3.5", "", { "dependencies": { "bun-types": "1.3.5" } }, "sha512-RnygCqNrd3srIPEWBd5LFeUYG7plCoH2Yw9WaZGyNmdTEei+gWaHqydbaIRkIkcbXwhBT94q78QljxN0Sk838w=="], + + "@types/node": ["@types/node@24.0.3", "", { "dependencies": { "undici-types": "~7.8.0" } }, "sha512-R4I/kzCYAdRLzfiCabn9hxWfbuHS573x+r0dJMkkzThEa7pbrcDWK+9zu3e7aBOouf+rQAciqPFMnxwr0aWgKg=="], + + "accepts": ["accepts@2.0.0", "", { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng=="], + + "ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="], + + "ajv-formats": ["ajv-formats@3.0.1", "", { "dependencies": { "ajv": "^8.0.0" } }, "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ=="], + + "body-parser": ["body-parser@2.2.0", "", { "dependencies": { "bytes": "^3.1.2", "content-type": "^1.0.5", "debug": "^4.4.0", "http-errors": "^2.0.0", "iconv-lite": "^0.6.3", "on-finished": "^2.4.1", "qs": "^6.14.0", "raw-body": "^3.0.0", "type-is": "^2.0.0" } }, "sha512-02qvAaxv8tp7fBa/mw1ga98OGm+eCbqzJOKoRt70sLmfEEi+jyBYVTDGfCL/k06/4EMk/z01gCe7HoCH/f2LTg=="], + + "bun-types": ["bun-types@1.3.5", "", { "dependencies": { "@types/node": "*" } }, "sha512-inmAYe2PFLs0SUbFOWSVD24sg1jFlMPxOjOSSCYqUgn4Hsc3rDc7dFvfVYjFPNHtov6kgUeulV4SxbuIV/stPw=="], + + "bytes": ["bytes@3.1.2", "", {}, "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="], + + "call-bind-apply-helpers": ["call-bind-apply-helpers@1.0.2", "", { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ=="], + + "call-bound": ["call-bound@1.0.4", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "get-intrinsic": "^1.3.0" } }, "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg=="], + + "content-disposition": ["content-disposition@1.0.0", "", { "dependencies": { "safe-buffer": "5.2.1" } }, "sha512-Au9nRL8VNUut/XSzbQA38+M78dzP4D+eqg3gfJHMIHHYa3bg067xj1KxMUWj+VULbiZMowKngFFbKczUrNJ1mg=="], + + "content-type": ["content-type@1.0.5", "", {}, "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA=="], + + "cookie": ["cookie@0.7.2", "", {}, "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w=="], + + "cookie-signature": ["cookie-signature@1.2.2", "", {}, "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg=="], + + "cors": ["cors@2.8.5", "", { "dependencies": { "object-assign": "^4", "vary": "^1" } }, "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g=="], + + "cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="], + + "debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "depd": ["depd@2.0.0", "", {}, "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="], + + "dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="], + + "ee-first": ["ee-first@1.1.1", "", {}, "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="], + + "encodeurl": ["encodeurl@2.0.0", "", {}, "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg=="], + + "es-define-property": ["es-define-property@1.0.1", "", {}, "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g=="], + + "es-errors": ["es-errors@1.3.0", "", {}, "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="], + + "es-object-atoms": ["es-object-atoms@1.1.1", "", { "dependencies": { "es-errors": "^1.3.0" } }, "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA=="], + + "escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="], + + "etag": ["etag@1.8.1", "", {}, "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="], + + "eventsource": ["eventsource@3.0.7", "", { "dependencies": { "eventsource-parser": "^3.0.1" } }, "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA=="], + + "eventsource-parser": ["eventsource-parser@3.0.2", "", {}, "sha512-6RxOBZ/cYgd8usLwsEl+EC09Au/9BcmCKYF2/xbml6DNczf7nv0MQb+7BA2F+li6//I+28VNlQR37XfQtcAJuA=="], + + "express": ["express@5.1.0", "", { "dependencies": { "accepts": "^2.0.0", "body-parser": "^2.2.0", "content-disposition": "^1.0.0", "content-type": "^1.0.5", "cookie": "^0.7.1", "cookie-signature": "^1.2.1", "debug": "^4.4.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "finalhandler": "^2.1.0", "fresh": "^2.0.0", "http-errors": "^2.0.0", "merge-descriptors": "^2.0.0", "mime-types": "^3.0.0", "on-finished": "^2.4.1", "once": "^1.4.0", "parseurl": "^1.3.3", "proxy-addr": "^2.0.7", "qs": "^6.14.0", "range-parser": "^1.2.1", "router": "^2.2.0", "send": "^1.1.0", "serve-static": "^2.2.0", "statuses": "^2.0.1", "type-is": "^2.0.1", "vary": "^1.1.2" } }, "sha512-DT9ck5YIRU+8GYzzU5kT3eHGA5iL+1Zd0EutOmTE9Dtk+Tvuzd23VBU+ec7HPNSTxXYO55gPV/hq4pSBJDjFpA=="], + + "express-rate-limit": ["express-rate-limit@7.5.0", "", { "peerDependencies": { "express": "^4.11 || 5 || ^5.0.0-beta.1" } }, "sha512-eB5zbQh5h+VenMPM3fh+nw1YExi5nMr6HUCR62ELSP11huvxm/Uir1H1QEyTkk5QX6A58pX6NmaTMceKZ0Eodg=="], + + "fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="], + + "fast-uri": ["fast-uri@3.1.0", "", {}, "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA=="], + + "finalhandler": ["finalhandler@2.1.0", "", { "dependencies": { "debug": "^4.4.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "on-finished": "^2.4.1", "parseurl": "^1.3.3", "statuses": "^2.0.1" } }, "sha512-/t88Ty3d5JWQbWYgaOGCCYfXRwV1+be02WqYYlL6h0lEiUAMPM8o8qKGO01YIkOHzka2up08wvgYD0mDiI+q3Q=="], + + "forwarded": ["forwarded@0.2.0", "", {}, "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow=="], + + "fresh": ["fresh@2.0.0", "", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="], + + "function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="], + + "fzstd": ["fzstd@0.1.1", "", {}, "sha512-dkuVSOKKwh3eas5VkJy1AW1vFpet8TA/fGmVA5krThl8YcOVE/8ZIoEA1+U1vEn5ckxxhLirSdY837azmbaNHA=="], + + "get-intrinsic": ["get-intrinsic@1.3.0", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" } }, "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ=="], + + "get-proto": ["get-proto@1.0.1", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g=="], + + "gopd": ["gopd@1.2.0", "", {}, "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg=="], + + "has-symbols": ["has-symbols@1.1.0", "", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="], + + "hasown": ["hasown@2.0.2", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="], + + "hono": ["hono@4.11.1", "", {}, "sha512-KsFcH0xxHes0J4zaQgWbYwmz3UPOOskdqZmItstUG93+Wk1ePBLkLGwbP9zlmh1BFUiL8Qp+Xfu9P7feJWpGNg=="], + + "http-errors": ["http-errors@2.0.0", "", { "dependencies": { "depd": "2.0.0", "inherits": "2.0.4", "setprototypeof": "1.2.0", "statuses": "2.0.1", "toidentifier": "1.0.1" } }, "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ=="], + + "iconv-lite": ["iconv-lite@0.6.3", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw=="], + + "inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="], + + "ipaddr.js": ["ipaddr.js@1.9.1", "", {}, "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g=="], + + "is-promise": ["is-promise@4.0.0", "", {}, "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ=="], + + "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], + + "jose": ["jose@6.1.3", "", {}, "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ=="], + + "json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], + + "json-schema-typed": ["json-schema-typed@8.0.2", "", {}, "sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA=="], + + "math-intrinsics": ["math-intrinsics@1.1.0", "", {}, "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g=="], + + "media-typer": ["media-typer@1.1.0", "", {}, "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw=="], + + "merge-descriptors": ["merge-descriptors@2.0.0", "", {}, "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g=="], + + "mime-db": ["mime-db@1.54.0", "", {}, "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ=="], + + "mime-types": ["mime-types@3.0.1", "", { "dependencies": { "mime-db": "^1.54.0" } }, "sha512-xRc4oEhT6eaBpU1XF7AjpOFD+xQmXNB5OVKwp4tqCuBpHLS/ZbBDrc07mYTDqVMg6PfxUjjNp85O6Cd2Z/5HWA=="], + + "ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "negotiator": ["negotiator@1.0.0", "", {}, "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg=="], + + "object-assign": ["object-assign@4.1.1", "", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="], + + "object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="], + + "on-finished": ["on-finished@2.4.1", "", { "dependencies": { "ee-first": "1.1.1" } }, "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg=="], + + "once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="], + + "parseurl": ["parseurl@1.3.3", "", {}, "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="], + + "path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="], + + "path-to-regexp": ["path-to-regexp@8.2.0", "", {}, "sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ=="], + + "pkce-challenge": ["pkce-challenge@5.0.0", "", {}, "sha512-ueGLflrrnvwB3xuo/uGob5pd5FN7l0MsLf0Z87o/UQmRtwjvfylfc9MurIxRAWywCYTgrvpXBcqjV4OfCYGCIQ=="], + + "proxy-addr": ["proxy-addr@2.0.7", "", { "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg=="], + + "qs": ["qs@6.14.0", "", { "dependencies": { "side-channel": "^1.1.0" } }, "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w=="], + + "range-parser": ["range-parser@1.2.1", "", {}, "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="], + + "raw-body": ["raw-body@3.0.0", "", { "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", "iconv-lite": "0.6.3", "unpipe": "1.0.0" } }, "sha512-RmkhL8CAyCRPXCE28MMH0z2PNWQBNk2Q09ZdxM9IOOXwxwZbN+qbWaatPkdkWIKL2ZVDImrN/pK5HTRz2PcS4g=="], + + "require-from-string": ["require-from-string@2.0.2", "", {}, "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw=="], + + "router": ["router@2.2.0", "", { "dependencies": { "debug": "^4.4.0", "depd": "^2.0.0", "is-promise": "^4.0.0", "parseurl": "^1.3.3", "path-to-regexp": "^8.0.0" } }, "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ=="], + + "safe-buffer": ["safe-buffer@5.2.1", "", {}, "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="], + + "safer-buffer": ["safer-buffer@2.1.2", "", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="], + + "send": ["send@1.2.0", "", { "dependencies": { "debug": "^4.3.5", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "fresh": "^2.0.0", "http-errors": "^2.0.0", "mime-types": "^3.0.1", "ms": "^2.1.3", "on-finished": "^2.4.1", "range-parser": "^1.2.1", "statuses": "^2.0.1" } }, "sha512-uaW0WwXKpL9blXE2o0bRhoL2EGXIrZxQ2ZQ4mgcfoBxdFmQold+qWsD2jLrfZ0trjKL6vOw0j//eAwcALFjKSw=="], + + "serve-static": ["serve-static@2.2.0", "", { "dependencies": { "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "parseurl": "^1.3.3", "send": "^1.2.0" } }, "sha512-61g9pCh0Vnh7IutZjtLGGpTA355+OPn2TyDv/6ivP2h/AdAVX9azsoxmg2/M6nZeQZNYBEwIcsne1mJd9oQItQ=="], + + "setprototypeof": ["setprototypeof@1.2.0", "", {}, "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="], + + "shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="], + + "shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="], + + "side-channel": ["side-channel@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3", "side-channel-list": "^1.0.0", "side-channel-map": "^1.0.1", "side-channel-weakmap": "^1.0.2" } }, "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw=="], + + "side-channel-list": ["side-channel-list@1.0.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3" } }, "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA=="], + + "side-channel-map": ["side-channel-map@1.0.1", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3" } }, "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA=="], + + "side-channel-weakmap": ["side-channel-weakmap@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3", "side-channel-map": "^1.0.1" } }, "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A=="], + + "statuses": ["statuses@2.0.2", "", {}, "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw=="], + + "toidentifier": ["toidentifier@1.0.1", "", {}, "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA=="], + + "type-is": ["type-is@2.0.1", "", { "dependencies": { "content-type": "^1.0.5", "media-typer": "^1.1.0", "mime-types": "^3.0.0" } }, "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw=="], + + "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], + + "undici-types": ["undici-types@7.8.0", "", {}, "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw=="], + + "unpipe": ["unpipe@1.0.0", "", {}, "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ=="], + + "vary": ["vary@1.1.2", "", {}, "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg=="], + + "which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="], + + "wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="], + + "zod": ["zod@4.2.1", "", {}, "sha512-0wZ1IRqGGhMP76gLqz8EyfBXKk0J2qo2+H3fi4mcUP/KtTocoX08nmIAHl1Z2kJIZbZee8KOpBCSNPRgauucjw=="], + + "zod-to-json-schema": ["zod-to-json-schema@3.25.0", "", { "peerDependencies": { "zod": "^3.25 || ^4" } }, "sha512-HvWtU2UG41LALjajJrML6uQejQhNJx+JBO9IflpSja4R03iNWfKXrj6W2h7ljuLyc1nKS+9yDyL/9tD1U/yBnQ=="], + + "http-errors/statuses": ["statuses@2.0.1", "", {}, "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ=="], + } +} diff --git a/package.json b/package.json new file mode 100644 index 0000000..8350e77 --- /dev/null +++ b/package.json @@ -0,0 +1,56 @@ +{ + "name": "mcp-docsrs", + "version": "1.0.0", + "description": "MCP server for fetching Rust crate documentation via docs.rs JSON API", + "type": "module", + "main": "./dist/mcp-docsrs", + "bin": { + "mcp-docsrs": "./src/index.ts" + }, + "scripts": { + "start": "./dist/mcp-docsrs", + "dev": "bun run src/index.ts", + "build": "bun build ./src/index.ts --compile --minify --sourcemap --outfile dist/mcp-docsrs", + "build:bytecode": "bun build ./src/index.ts --compile --minify --sourcemap --bytecode --outfile dist/mcp-docsrs", + "build:all": "bun run build:linux-x64 && bun run build:linux-arm64 && bun run build:darwin-arm64 && bun run build:windows-x64", + "build:linux-x64": "bun build ./src/index.ts --compile --minify --bytecode --target=bun-linux-x64 --outfile dist/mcp-docsrs-linux-x64", + "build:linux-arm64": "bun build ./src/index.ts --compile --minify --bytecode --target=bun-linux-arm64 --outfile dist/mcp-docsrs-linux-arm64", + "build:darwin-arm64": "bun build ./src/index.ts --compile --minify --bytecode --target=bun-darwin-arm64 --outfile dist/mcp-docsrs-darwin-arm64", + "build:windows-x64": "bun build ./src/index.ts --compile --minify --bytecode --target=bun-windows-x64 --outfile dist/mcp-docsrs-windows-x64.exe", + "typecheck": "tsc --noEmit", + "inspector": "DANGEROUSLY_OMIT_AUTH=true bunx @modelcontextprotocol/inspector -- ./dist/mcp-docsrs --cache-ttl=7200000 --max-cache-size=200 --db-path=/Users/vexx/Repos/mcp-docsrs/.cache", + "lint": "biome check ./src && biome check ./test", + "lint:fix": "biome check ./src --fix && biome check ./test --fix", + "lint:fixunsafe": "biome check ./src --fix --unsafe && biome check ./test --fix --unsafe", + "test": "BUN_ENV=test bun test test/unit", + "test:unit": "BUN_ENV=test bun test test/unit", + "test:integration": "BUN_ENV=test bun test test/integration/api test/integration/persistence test/integration/e2e", + "test:integration:api": "BUN_ENV=test bun test test/integration/api", + "test:integration:binary": "BUN_ENV=test bun test test/integration/binary/runner.test.ts", + "test:all": "bun run test:unit && bun run test:integration", + "test:watch": "BUN_ENV=test bun test test/unit --watch", + "check:sizes": "bun run scripts/check-build-sizes.ts" + }, + "keywords": [ + "mcp", + "rust", + "docs", + "documentation", + "bun" + ], + "author": "Marius Modlich ", + "license": "Apache-2.0", + "dependencies": { + "@modelcontextprotocol/sdk": "1.25.1", + "fzstd": "0.1.1", + "zod": "4.2.1" + }, + "devDependencies": { + "@biomejs/biome": "2.3.10", + "@types/bun": "1.3.5", + "typescript": "5.9.3" + }, + "engines": { + "bun": ">=1.2.14" + } +} diff --git a/src/cache.ts b/src/cache.ts new file mode 100644 index 0000000..d6a2176 --- /dev/null +++ b/src/cache.ts @@ -0,0 +1,252 @@ +import { Database } from "bun:sqlite" +import { mkdirSync } from "node:fs" +import { dirname } from "node:path" +import { CacheError, ErrorLogger } from "./errors.js" + +// Convert directory path to cache database file path +const toCacheDbPath = (dbPath: string | undefined): string | undefined => { + if (!dbPath || dbPath === ":memory:") { + return dbPath + } + // If it already ends with .db, assume it's a full file path + if (dbPath.endsWith(".db")) { + return dbPath + } + // Otherwise, treat it as a directory and append cache.db + return dbPath.endsWith("/") ? `${dbPath}cache.db` : `${dbPath}/cache.db` +} + +// Create or get cache database +const createCacheDb = (dbPath = ":memory:") => { + try { + // Convert directory path to cache.db file path + const normalizedPath = toCacheDbPath(dbPath) || dbPath + + // Create directory if using file-based database + if (normalizedPath !== ":memory:") { + const dir = dirname(normalizedPath) + mkdirSync(dir, { recursive: true }) + } + + const db = new Database(normalizedPath) + + // Create cache table if it doesn't exist + db.run(` + CREATE TABLE IF NOT EXISTS cache ( + key TEXT PRIMARY KEY, + data TEXT NOT NULL, + timestamp INTEGER NOT NULL, + ttl INTEGER NOT NULL + ) + `) + + // Create index for faster lookups + db.run("CREATE INDEX IF NOT EXISTS idx_cache_timestamp ON cache(timestamp)") + + return db + } catch (error) { + throw new CacheError("set", `Failed to create cache database: ${(error as Error).message}`) + } +} + +// Create a cache with SQLite (in-memory or file-based) +export const createCache = (maxSize = 100, dbPath?: string) => { + // Convert directory path to cache.db file path + const normalizedPath = toCacheDbPath(dbPath) + + // Log the cache path for clarity + if (normalizedPath && normalizedPath !== ":memory:") { + ErrorLogger.logInfo("Creating cache database", { path: normalizedPath }) + } + + const db = createCacheDb(normalizedPath) + + // Prepare statements for better performance + const getStmt = db.prepare("SELECT data, timestamp, ttl FROM cache WHERE key = ?") + const setStmt = db.prepare(` + INSERT OR REPLACE INTO cache (key, data, timestamp, ttl) + VALUES (?, ?, ?, ?) + `) + const deleteStmt = db.prepare("DELETE FROM cache WHERE key = ?") + const clearStmt = db.prepare("DELETE FROM cache") + const countStmt = db.prepare("SELECT COUNT(*) as count FROM cache") + const oldestStmt = db.prepare("SELECT key FROM cache ORDER BY timestamp ASC LIMIT 1") + + // Clean up expired entries + const cleanupExpired = () => { + const now = Date.now() + db.run("DELETE FROM cache WHERE timestamp + ttl < ?", [now]) + } + + // Get a value from cache with metadata + const getWithMetadata = (key: string): { data: T | null; isHit: boolean } => { + try { + cleanupExpired() + + const result = getStmt.get(key) as { + data: string + timestamp: number + ttl: number + } | null + + if (!result) return { data: null, isHit: false } + + // Check if entry is expired + if (Date.now() > result.timestamp + result.ttl) { + deleteStmt.run(key) + return { data: null, isHit: false } + } + + return { data: JSON.parse(result.data), isHit: true } + } catch (error) { + ErrorLogger.log(error as Error) + throw new CacheError("get", `Failed to get key '${key}': ${(error as Error).message}`) + } + } + + // Get a value from cache (legacy method) + const get = (key: string): T | null => { + const { data } = getWithMetadata(key) + return data + } + + // Set a value in cache + const set = (key: string, value: T, ttl: number): void => { + try { + // Enforce max size + const count = (countStmt.get() as { count: number }).count + if (count >= maxSize) { + // Remove oldest entry + const oldest = oldestStmt.get() as { key: string } | null + if (oldest) { + deleteStmt.run(oldest.key) + } + } + + setStmt.run(key, JSON.stringify(value), Date.now(), ttl) + } catch (error) { + ErrorLogger.log(error as Error) + throw new CacheError("set", `Failed to set key '${key}': ${(error as Error).message}`) + } + } + + // Delete a value from cache + const remove = (key: string): void => { + try { + deleteStmt.run(key) + } catch (error) { + ErrorLogger.log(error as Error) + throw new CacheError("delete", `Failed to delete key '${key}': ${(error as Error).message}`) + } + } + + // Clear all cache + const clear = (): void => { + try { + clearStmt.run() + } catch (error) { + ErrorLogger.log(error as Error) + throw new CacheError("clear", `Failed to clear cache: ${(error as Error).message}`) + } + } + + // Execute a raw SQL query + const query = (sql: string, params: any[] = []): any[] => { + try { + cleanupExpired() + return db.prepare(sql).all(...params) + } catch (error) { + ErrorLogger.log(error as Error) + throw new CacheError("query", `Failed to execute query: ${(error as Error).message}`) + } + } + + // Get cache statistics + const getStats = (): { totalEntries: number; totalSize: number; oldestEntry: Date | null } => { + try { + const count = (countStmt.get() as { count: number }).count + const sizeResult = db.prepare("SELECT SUM(LENGTH(data)) as totalSize FROM cache").get() as { + totalSize: number | null + } + const oldestResult = db.prepare("SELECT MIN(timestamp) as oldest FROM cache").get() as { + oldest: number | null + } + + return { + totalEntries: count, + totalSize: sizeResult.totalSize || 0, + oldestEntry: oldestResult.oldest ? new Date(oldestResult.oldest) : null + } + } catch (error) { + ErrorLogger.log(error as Error) + throw new CacheError("stats", `Failed to get cache statistics: ${(error as Error).message}`) + } + } + + // List all cache entries with metadata + const listEntries = ( + limit = 100, + offset = 0 + ): Array<{ key: string; timestamp: Date; ttl: number; expiresAt: Date; size: number }> => { + try { + cleanupExpired() + const entries = db + .prepare( + `SELECT key, timestamp, ttl, LENGTH(data) as size + FROM cache + ORDER BY timestamp DESC + LIMIT ? OFFSET ?` + ) + .all(limit, offset) as Array<{ + key: string + timestamp: number + ttl: number + size: number + }> + + return entries.map((entry) => ({ + key: entry.key, + timestamp: new Date(entry.timestamp), + ttl: entry.ttl, + expiresAt: new Date(entry.timestamp + entry.ttl), + size: entry.size + })) + } catch (error) { + ErrorLogger.log(error as Error) + throw new CacheError("list", `Failed to list cache entries: ${(error as Error).message}`) + } + } + + // Close the database + const close = (): void => { + try { + // Finalize all prepared statements first + getStmt.finalize() + setStmt.finalize() + deleteStmt.finalize() + clearStmt.finalize() + countStmt.finalize() + oldestStmt.finalize() + + // Then close the database connection + db.close() + } catch (error) { + ErrorLogger.log(error as Error) + throw new CacheError("close", `Failed to close cache database: ${(error as Error).message}`) + } + } + + return { + get, + getWithMetadata, + set, + delete: remove, + clear, + close, + query, + getStats, + listEntries + } +} + +export type Cache = ReturnType> diff --git a/src/docs-fetcher.ts b/src/docs-fetcher.ts new file mode 100644 index 0000000..b5c3403 --- /dev/null +++ b/src/docs-fetcher.ts @@ -0,0 +1,236 @@ +import { decompress } from "fzstd" +import { createCache } from "./cache.js" +import { + CrateNotFoundError, + DecompressionError, + ErrorLogger, + JSONParseError, + NetworkError, + TimeoutError +} from "./errors.js" +import type { ServerConfig } from "./types.js" + +// Build the docs.rs JSON URL for a crate +const buildJsonUrl = ( + crateName: string, + version?: string, + target?: string, + formatVersion?: number +): string => { + let url = `https://docs.rs/crate/${crateName}` + + // Add version (latest by default) + url += `/${version || "latest"}` + + // Add target if specified + if (target) { + url += `/${target}` + } + + // Add JSON endpoint + url += "/json" + + // Add format version if specified + if (formatVersion) { + url += `/${formatVersion}` + } + + return url +} + +// Create a docs fetcher with caching +export const createDocsFetcher = (config: ServerConfig = {}) => { + const cache = createCache(config.maxCacheSize || 100, config.dbPath) + const timeout = config.requestTimeout || 30000 + const cacheTtl = config.cacheTtl || 3600000 // 1 hour default + + // Fetch rustdoc JSON for a crate with cache status + const fetchCrateJsonWithStatus = async ( + crateName: string, + version?: string, + target?: string, + formatVersion?: number + ): Promise<{ data: any; fromCache: boolean }> => { + const url = buildJsonUrl(crateName, version, target, formatVersion) + const cacheKey = url + + // Check cache first + const { data: cached } = cache.getWithMetadata(cacheKey) + if (cached) { + ErrorLogger.logInfo("Cache hit for rustdoc JSON", { url, crateName }) + return { data: cached, fromCache: true } + } + + ErrorLogger.logInfo("Fetching rustdoc JSON", { + url, + crateName, + version, + target, + formatVersion + }) + + try { + // Use Bun's native fetch with AbortController for timeout + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), timeout) + + // Note: We remove zstd from Accept-Encoding because docs.rs always serves with zstd + // and we need to handle decompression manually + const response = await fetch(url, { + signal: controller.signal, + headers: { + "User-Agent": "mcp-docsrs/1.0.0", + "Accept-Encoding": "gzip, deflate, br" + } + }) + + clearTimeout(timeoutId) + + if (response.status === 404) { + throw new CrateNotFoundError(crateName, version) + } + + if (!response.ok) { + throw new NetworkError(url, response.status, response.statusText) + } + + // Check if response needs manual decompression (shouldn't be needed with Bun's auto-decompression) + const encoding = response.headers.get("content-encoding") + const contentType = response.headers.get("content-type") + let data: any + + ErrorLogger.logInfo("Response received", { + url, + status: response.status, + contentType, + encoding + }) + + if (encoding === "zstd" || encoding === "Zstd") { + try { + // docs.rs always serves rustdoc JSON with zstd compression + const buffer = await response.arrayBuffer() + ErrorLogger.logInfo("Decompressing zstd content", { + url, + bufferSize: buffer.byteLength + }) + + // Use fzstd which handles memory allocation better than other libraries + // fzstd reads frame headers to determine memory requirements + const decompressed = decompress(new Uint8Array(buffer)) + const jsonText = new TextDecoder().decode(decompressed) + + ErrorLogger.logInfo("Decompression successful", { + url, + decompressedSize: jsonText.length + }) + + try { + data = JSON.parse(jsonText) + } catch (parseError) { + throw new JSONParseError(jsonText, parseError as Error, url) + } + } catch (error) { + if (error instanceof JSONParseError) { + throw error + } + throw new DecompressionError(url, "zstd", (error as Error).message) + } + } else { + // Normal JSON response (Bun handles decompression automatically) + try { + // First get the response as text to have better error reporting + const responseText = await response.text() + + if (!responseText || responseText.trim().length === 0) { + throw new JSONParseError("", new Error("Empty response body"), url) + } + + try { + data = JSON.parse(responseText) + } catch (parseError) { + throw new JSONParseError(responseText, parseError as Error, url) + } + } catch (error) { + if (error instanceof JSONParseError) { + throw error + } + throw new Error(`Failed to read response body: ${(error as Error).message}`) + } + } + + // Validate that we have the expected rustdoc structure + if (!data || typeof data !== "object") { + throw new JSONParseError( + JSON.stringify(data), + new Error("Response is not a valid object"), + url + ) + } + + // Cache the successful response + cache.set(cacheKey, data, cacheTtl) + ErrorLogger.logInfo("Successfully cached rustdoc JSON", { url, cacheKey }) + + return { data, fromCache: false } + } catch (error) { + ErrorLogger.log(error as Error) + + if (error instanceof Error) { + if (error.name === "AbortError") { + throw new TimeoutError(url, timeout) + } + // Re-throw our custom errors + if ( + error instanceof JSONParseError || + error instanceof NetworkError || + error instanceof CrateNotFoundError || + error instanceof DecompressionError || + error instanceof TimeoutError + ) { + throw error + } + } + // Wrap unknown errors + throw new NetworkError(url, undefined, undefined, (error as Error).message) + } + } + + // Clear the cache + const clearCache = (): void => { + cache.clear() + } + + // Close the cache database + const close = (): void => { + cache.close() + } + + // Get cache statistics + const getCacheStats = () => { + return cache.getStats() + } + + // Get cache entries + const getCacheEntries = (limit: number, offset: number) => { + return cache.listEntries(limit, offset) + } + + // Query cache database + const queryCacheDb = (sql: string) => { + // Only allow SELECT queries for safety + if (!sql || !sql.trim().toUpperCase().startsWith("SELECT")) { + throw new Error("Only SELECT queries are allowed for safety") + } + return cache.query(sql) + } + + return { + fetchCrateJson: fetchCrateJsonWithStatus, + clearCache, + close, + getCacheStats, + getCacheEntries, + queryCacheDb + } +} diff --git a/src/errors.ts b/src/errors.ts new file mode 100644 index 0000000..cce4481 --- /dev/null +++ b/src/errors.ts @@ -0,0 +1,342 @@ +/** + * Custom error classes for the mcp-docsrs-rustdoc MCP server + * All errors are fully typed with detailed context information + */ + +/** + * Base error class for all mcp-docsrs errors + */ +export abstract class MCPDocsRsError extends Error { + readonly timestamp: Date + readonly context?: Record + + constructor(message: string, context?: Record) { + super(message) + this.name = this.constructor.name + this.timestamp = new Date() + this.context = context + Error.captureStackTrace(this, this.constructor) + } + + toJSON() { + return { + name: this.name, + message: this.message, + timestamp: this.timestamp, + context: this.context, + stack: this.stack + } + } +} + +/** + * Error thrown when JSON parsing fails + */ +export class JSONParseError extends MCPDocsRsError { + readonly rawData: string + readonly parseError: Error + + constructor(rawData: string, parseError: Error, url?: string) { + const preview = rawData.length > 200 ? `${rawData.substring(0, 200)}...` : rawData + const message = `Failed to parse JSON: ${parseError.message}` + + super(message, { + url, + dataLength: rawData.length, + dataPreview: preview, + contentType: typeof rawData, + parseErrorName: parseError.name + }) + + this.rawData = rawData + this.parseError = parseError + } +} + +/** + * Error thrown when network requests fail + */ +export class NetworkError extends MCPDocsRsError { + readonly statusCode?: number + readonly statusText?: string + readonly url: string + + constructor(url: string, statusCode?: number, statusText?: string, details?: string) { + const message = statusCode + ? `Network request failed: HTTP ${statusCode} ${statusText || ""}${details ? ` - ${details}` : ""}` + : `Network request failed: ${details || "Unknown error"}` + + super(message, { + url, + statusCode, + statusText, + details + }) + + this.url = url + this.statusCode = statusCode + this.statusText = statusText + } +} + +/** + * Error thrown when a crate is not found + */ +export class CrateNotFoundError extends MCPDocsRsError { + readonly crateName: string + readonly version?: string + + constructor(crateName: string, version?: string, details?: string) { + const versionStr = version ? ` version ${version}` : "" + const message = `Crate '${crateName}'${versionStr} not found. ${details || "Note: docs.rs started building rustdoc JSON on 2023-05-23, so older releases may not have JSON available yet."}` + + super(message, { + crateName, + version, + details + }) + + this.crateName = crateName + this.version = version + } +} + +/** + * Error thrown when rustdoc JSON is not available + */ +export class RustdocNotAvailableError extends MCPDocsRsError { + readonly crateName: string + readonly version?: string + readonly reason?: string + + constructor(crateName: string, version?: string, reason?: string) { + const versionStr = version ? ` version ${version}` : "" + const message = `Rustdoc JSON not available for crate '${crateName}'${versionStr}. ${reason || "The crate may not have been built with rustdoc JSON support."}` + + super(message, { + crateName, + version, + reason + }) + + this.crateName = crateName + this.version = version + this.reason = reason + } +} + +/** + * Error thrown when request times out + */ +export class TimeoutError extends MCPDocsRsError { + readonly url: string + readonly timeoutMs: number + + constructor(url: string, timeoutMs: number) { + const message = `Request timeout after ${timeoutMs}ms` + + super(message, { + url, + timeoutMs + }) + + this.url = url + this.timeoutMs = timeoutMs + } +} + +/** + * Error thrown when decompression fails + */ +export class DecompressionError extends MCPDocsRsError { + readonly encoding: string + readonly url: string + + constructor(url: string, encoding: string, details?: string) { + const message = `Failed to decompress ${encoding} content: ${details || "Unknown error"}` + + super(message, { + url, + encoding, + details + }) + + this.url = url + this.encoding = encoding + } +} + +/** + * Error thrown when cache operations fail + */ +export class CacheError extends MCPDocsRsError { + readonly operation: "get" | "set" | "delete" | "clear" | "close" | "query" | "stats" | "list" + + constructor( + operation: "get" | "set" | "delete" | "clear" | "close" | "query" | "stats" | "list", + details?: string + ) { + const message = `Cache operation '${operation}' failed: ${details || "Unknown error"}` + + super(message, { + operation, + details + }) + + this.operation = operation + } +} + +/** + * Error thrown when parsing rustdoc data structures fails + */ +export class RustdocParseError extends MCPDocsRsError { + readonly itemPath?: string + readonly expectedType?: string + + constructor(message: string, itemPath?: string, expectedType?: string) { + super(message, { + itemPath, + expectedType + }) + + this.itemPath = itemPath + this.expectedType = expectedType + } +} + +/** + * Error thrown when an item is not found in rustdoc + */ +export class ItemNotFoundError extends MCPDocsRsError { + readonly crateName: string + readonly itemPath: string + + constructor(crateName: string, itemPath: string) { + const message = `Item '${itemPath}' not found in crate '${crateName}'` + + super(message, { + crateName, + itemPath + }) + + this.crateName = crateName + this.itemPath = itemPath + } +} + +/** + * Error thrown when an operation is aborted + */ +export class AbortError extends MCPDocsRsError { + constructor(operation: string, reason?: string) { + const message = `Operation aborted: ${operation}${reason ? ` - ${reason}` : ""}` + + super(message, { + operation, + reason + }) + + this.name = "AbortError" // Ensure the name matches what fetch throws + } +} + +/** + * Error logger utility functions + */ +const formatError = (error: Error): string => { + if (error instanceof MCPDocsRsError) { + const lines = [`[${error.timestamp.toISOString()}] ${error.name}: ${error.message}`] + + if (error.context && Object.keys(error.context).length > 0) { + lines.push("Context:") + for (const [key, value] of Object.entries(error.context)) { + lines.push(` ${key}: ${JSON.stringify(value)}`) + } + } + + if (error.stack) { + lines.push("Stack trace:") + lines.push(error.stack) + } + + return lines.join("\n") + } + + return `[${new Date().toISOString()}] ${error.name || "Error"}: ${error.message}\n${error.stack || ""}` +} + +export const ErrorLogger = { + log(error: Error): void { + // During tests, check if this is an expected error + if (process.env.NODE_ENV === "test" || process.env.BUN_ENV === "test") { + // In test environment, use a different format for expected errors + if ( + error instanceof CrateNotFoundError || + error instanceof TimeoutError || + error instanceof RustdocParseError || + error instanceof AbortError || + error instanceof NetworkError || + error.name === "AbortError" || // For native AbortError + error.message === "Test interception" // For URL validation tests + ) { + // Show a brief indicator that the expected error was caught + if (process.env.LOG_EXPECTED_ERRORS === "true") { + // Full logging if explicitly requested + console.error(`\x1b[32m[EXPECTED ERROR] ${error.name}: ${error.message}\x1b[0m`) + } else { + // Brief indicator in green to show test is working correctly + console.error(`\x1b[32m✓ Expected ${error.name} thrown\x1b[0m`) + } + return + } + } + console.error(formatError(error)) + }, + + logWarning(message: string, context?: Record): void { + const timestamp = new Date().toISOString() + const contextStr = context ? ` - Context: ${JSON.stringify(context)}` : "" + console.error(`[${timestamp}] WARNING: ${message}${contextStr}`) + }, + + logInfo(message: string, context?: Record): void { + // Skip info logging during tests or when silent mode is enabled + if (process.env.SILENT_LOGS === "true" || process.env.MCP_TEST === "true") { + return + } + const timestamp = new Date().toISOString() + const contextStr = context ? ` - Context: ${JSON.stringify(context)}` : "" + console.error(`[${timestamp}] INFO: ${message}${contextStr}`) + } +} + +/** + * Type guard to check if an error is an MCPDocsRsError + */ +export function isMCPDocsRsError(error: unknown): error is MCPDocsRsError { + return error instanceof MCPDocsRsError +} + +/** + * Type guard for specific error types + */ +export function isNetworkError(error: unknown): error is NetworkError { + return error instanceof NetworkError +} + +export function isJSONParseError(error: unknown): error is JSONParseError { + return error instanceof JSONParseError +} + +export function isCrateNotFoundError(error: unknown): error is CrateNotFoundError { + return error instanceof CrateNotFoundError +} + +export function isTimeoutError(error: unknown): error is TimeoutError { + return error instanceof TimeoutError +} + +export function isAbortError(error: unknown): error is AbortError { + return error instanceof AbortError || (error instanceof Error && error.name === "AbortError") +} diff --git a/src/index.ts b/src/index.ts new file mode 100644 index 0000000..024e0be --- /dev/null +++ b/src/index.ts @@ -0,0 +1,169 @@ +#!/usr/bin/env bun + +import { parseArgs } from "node:util" +import { createRustDocsServer } from "./server.js" +import type { ServerConfig } from "./types.js" + +// Parse command line arguments +const { values, positionals } = parseArgs({ + args: process.argv.slice(2), + options: { + help: { type: "boolean", short: "h" }, + version: { type: "boolean", short: "v" }, + port: { type: "string" }, + stdio: { type: "boolean" }, + "cache-ttl": { type: "string" }, + "max-cache-size": { type: "string" }, + "request-timeout": { type: "string" }, + "db-path": { type: "string" } + }, + allowPositionals: true +}) + +// Show help if requested +if (values.help || positionals.includes("help")) { + console.log(` +MCP Rust Docs Server + +A Model Context Protocol server for fetching Rust crate documentation from docs.rs + +Usage: + mcp-docsrs [options] + +Options: + -h, --help Show this help message + --version Show version information + --port HTTP server port (default: 3331) + --stdio Use stdio transport instead of HTTP + --cache-ttl Cache TTL in milliseconds (default: 3600000) + --max-cache-size Maximum cache entries (default: 100) + --request-timeout Request timeout in milliseconds (default: 30000) + --db-path Path to cache directory (cache.db will be created inside) or ":memory:" (default: :memory:) + +Environment Variables: + PORT HTTP server port + CACHE_TTL Cache TTL in milliseconds + MAX_CACHE_SIZE Maximum cache entries + REQUEST_TIMEOUT Request timeout in milliseconds + DB_PATH Path to cache directory (cache.db will be created inside) + +Examples: + # Run HTTP server (default) + mcp-docsrs + + # Run on custom port + mcp-docsrs --port 8080 + + # Run with stdio transport + mcp-docsrs --stdio + + # Run with custom cache settings + mcp-docsrs --cache-ttl 7200000 --max-cache-size 200 + + # Run with persistent database + mcp-docsrs --db-path /path/to/cache/directory + +MCP Integration: + For stdio mode with Claude Desktop, add to your claude_desktop_config.json: + { + "mcpServers": { + "rust-docs": { + "command": "mcp-docsrs", + "args": ["--stdio"] + } + } + } +`) + process.exit(0) +} + +// Show version if requested +if (values.version) { + const packageJson = require("../package.json") + console.log(`mcp-docsrs v${packageJson.version}`) + process.exit(0) +} + +// Check transport mode +const useStdio = values.stdio || false +const port = Number.parseInt((values.port as string) || process.env.PORT || "3331", 10) + +// Configuration from command line and environment variables +const cacheTtl = Number.parseInt( + (values["cache-ttl"] as string) || process.env.CACHE_TTL || "3600000", + 10 +) +const maxCacheSize = Number.parseInt( + (values["max-cache-size"] as string) || process.env.MAX_CACHE_SIZE || "100", + 10 +) +const requestTimeout = Number.parseInt( + (values["request-timeout"] as string) || process.env.REQUEST_TIMEOUT || "30000", + 10 +) +const dbPath = (values["db-path"] as string) || process.env.DB_PATH + +// Validate configuration +if (!useStdio && (Number.isNaN(port) || port <= 0 || port > 65535)) { + console.error("Error: Invalid port value") + process.exit(1) +} + +if (Number.isNaN(cacheTtl) || cacheTtl <= 0) { + console.error("Error: Invalid cache TTL value") + process.exit(1) +} + +if (Number.isNaN(maxCacheSize) || maxCacheSize <= 0) { + console.error("Error: Invalid max cache size value") + process.exit(1) +} + +if (Number.isNaN(requestTimeout) || requestTimeout <= 0) { + console.error("Error: Invalid request timeout value") + process.exit(1) +} + +// Create config object after validation +const config: ServerConfig = { + cacheTtl, + maxCacheSize, + requestTimeout, + dbPath, + port: useStdio ? undefined : port, + useStdio +} + +// Error handling +process.on("uncaughtException", (error) => { + console.error("Uncaught exception:", error) + process.exit(1) +}) + +process.on("unhandledRejection", (reason, promise) => { + console.error("Unhandled rejection at:", promise, "reason:", reason) + process.exit(1) +}) + +// Create and start server +const { start, cleanup } = createRustDocsServer(config) + +// Handle graceful shutdown +process.on("SIGINT", () => { + console.error("\nShutting down gracefully...") + cleanup() + process.exit(0) +}) + +process.on("SIGTERM", () => { + console.error("\nShutting down gracefully...") + cleanup() + process.exit(0) +}) + +// Start the server +start().catch((error) => { + console.error("Failed to start MCP server:", error) + cleanup() + process.exit(1) +}) diff --git a/src/rustdoc-parser.ts b/src/rustdoc-parser.ts new file mode 100644 index 0000000..23c2a6a --- /dev/null +++ b/src/rustdoc-parser.ts @@ -0,0 +1,316 @@ +import { ErrorLogger, RustdocParseError } from "./errors.js" +import type { RustdocItem, RustdocJson } from "./types.js" + +// Helper functions +const getFirstLine = (docs: string): string => { + const firstLine = docs.split("\n")[0].trim() + return firstLine.length > 100 ? `${firstLine.substring(0, 97)}...` : firstLine +} + +const getItemKind = (item: RustdocItem): string => { + if (!item.inner) return "Unknown" + + if (item.inner.struct) return "Struct" + if (item.inner.enum) return "Enum" + if (item.inner.function) return "Function" + if (item.inner.trait) return "Trait" + if (item.inner.module) return "Module" + if (item.inner.typedef) return "Type Alias" + if (item.inner.impl) return "Implementation" + + return "Unknown" +} + +// Extract modules from a parent item +const extractModules = (json: RustdocJson, parentId: string): string[] => { + const modules: string[] = [] + const parentItem = json.index[parentId] + + if (parentItem?.inner?.module?.items) { + for (const itemId of parentItem.inner.module.items) { + const item = json.index[itemId] + if (item?.inner?.module && item.visibility === "public") { + const desc = item.docs ? `: ${getFirstLine(item.docs)}` : "" + modules.push(`- **${item.name}**${desc}`) + } + } + } + + return modules +} + +// Extract types (structs, enums, traits) from a parent item +const extractTypes = ( + json: RustdocJson, + parentId: string +): { + structs: string[] + enums: string[] + traits: string[] +} => { + const result = { + structs: [] as string[], + enums: [] as string[], + traits: [] as string[] + } + + const parentItem = json.index[parentId] + const items = parentItem?.inner?.module?.items || [] + + for (const itemId of items) { + const item = json.index[itemId] + if (!item || item.visibility !== "public") continue + + const desc = item.docs ? `: ${getFirstLine(item.docs)}` : "" + const entry = `- **${item.name}**${desc}` + + if (item.inner?.struct) { + result.structs.push(entry) + } else if (item.inner?.enum) { + result.enums.push(entry) + } else if (item.inner?.trait) { + result.traits.push(entry) + } + } + + return result +} + +// Extract functions from a parent item +const extractFunctions = (json: RustdocJson, parentId: string): string[] => { + const functions: string[] = [] + const parentItem = json.index[parentId] + const items = parentItem?.inner?.module?.items || [] + + for (const itemId of items) { + const item = json.index[itemId] + if (item?.inner?.function && item.visibility === "public") { + const desc = item.docs ? `: ${getFirstLine(item.docs)}` : "" + functions.push(`- **${item.name}**${desc}`) + } + } + + return functions +} + +// Format struct details +const formatStruct = (struct: any): string[] => { + const sections: string[] = [] + sections.push(`\n**Struct Type:** ${struct.struct_type}`) + + if (struct.fields && struct.fields.length > 0) { + sections.push("\n**Fields:** (field IDs available, would need to resolve)") + } + + if (struct.impls && struct.impls.length > 0) { + sections.push(`\n**Implementations:** ${struct.impls.length} impl block(s)`) + } + + return sections +} + +// Format enum details +const formatEnum = (enumData: any): string[] => { + const sections: string[] = [] + + if (enumData.variants && enumData.variants.length > 0) { + sections.push(`\n**Variants:** ${enumData.variants.length} variant(s)`) + } + + if (enumData.impls && enumData.impls.length > 0) { + sections.push(`\n**Implementations:** ${enumData.impls.length} impl block(s)`) + } + + return sections +} + +// Format function details +const formatFunction = (func: any): string[] => { + const sections: string[] = [] + + if (func.header) { + const attrs: string[] = [] + if (func.header.const) attrs.push("const") + if (func.header.async) attrs.push("async") + if (func.header.unsafe) attrs.push("unsafe") + + if (attrs.length > 0) { + sections.push(`\n**Attributes:** ${attrs.join(", ")}`) + } + } + + return sections +} + +// Format trait details +const formatTrait = (trait: any): string[] => { + const sections: string[] = [] + const attrs: string[] = [] + if (trait.is_auto) attrs.push("auto") + if (trait.is_unsafe) attrs.push("unsafe") + + if (attrs.length > 0) { + sections.push(`\n**Attributes:** ${attrs.join(", ")}`) + } + + if (trait.items && trait.items.length > 0) { + sections.push(`\n**Items:** ${trait.items.length} associated item(s)`) + } + + return sections +} + +// Format a single item +const formatItem = (item: RustdocItem, kind?: string): string => { + const sections: string[] = [] + + // Name and type + if (item.name) { + sections.push(`# ${item.name}`) + } + + // Kind/Type + const itemKind = kind || getItemKind(item) + if (itemKind) { + sections.push(`\n**Type:** ${itemKind}`) + } + + // Visibility + if (item.visibility && item.visibility !== "public") { + sections.push(`**Visibility:** ${item.visibility}`) + } + + // Documentation + if (item.docs) { + sections.push(`\n## Documentation\n${item.docs}`) + } + + // Deprecation notice + if (item.deprecation) { + sections.push("\n⚠️ **Deprecated**") + } + + // Additional details based on inner type + if (item.inner) { + if (item.inner.struct) { + sections.push(...formatStruct(item.inner.struct)) + } else if (item.inner.enum) { + sections.push(...formatEnum(item.inner.enum)) + } else if (item.inner.function) { + sections.push(...formatFunction(item.inner.function)) + } else if (item.inner.trait) { + sections.push(...formatTrait(item.inner.trait)) + } + } + + return sections.join("\n") +} + +// Parse the main crate information +export const parseCrateInfo = (json: RustdocJson): string => { + try { + if (!json || typeof json !== "object") { + throw new RustdocParseError("Invalid rustdoc JSON structure: not an object") + } + + if (!json.root || !json.index) { + throw new RustdocParseError("Invalid rustdoc JSON structure: missing root or index") + } + + const rootItem = json.index[json.root] + if (!rootItem) { + throw new RustdocParseError(`Root item '${json.root}' not found in index`) + } + + const sections: string[] = [] + + // Crate name and version + if (rootItem.name) { + let header = `# Crate: ${rootItem.name}` + if (json.crate_version) { + header += ` v${json.crate_version}` + } + sections.push(header) + } + + // Documentation + if (rootItem.docs) { + sections.push(`\n## Documentation\n${rootItem.docs}`) + } + + // Main modules + const modules = extractModules(json, json.root) + if (modules.length > 0) { + sections.push(`\n## Modules\n${modules.join("\n")}`) + } + + // Main types + const types = extractTypes(json, json.root) + if (types.structs.length > 0) { + sections.push(`\n## Structs\n${types.structs.join("\n")}`) + } + if (types.enums.length > 0) { + sections.push(`\n## Enums\n${types.enums.join("\n")}`) + } + if (types.traits.length > 0) { + sections.push(`\n## Traits\n${types.traits.join("\n")}`) + } + + // Main functions + const functions = extractFunctions(json, json.root) + if (functions.length > 0) { + sections.push(`\n## Functions\n${functions.join("\n")}`) + } + + return sections.join("\n") + } catch (error) { + ErrorLogger.log(error as Error) + if (error instanceof RustdocParseError) { + throw error + } + throw new RustdocParseError(`Failed to parse crate info: ${(error as Error).message}`) + } +} + +// Find and parse a specific item by path +export const findItem = (json: RustdocJson, itemPath: string): string | null => { + try { + if (!json || typeof json !== "object") { + throw new RustdocParseError("Invalid rustdoc JSON structure: not an object") + } + + if (!json.paths || !json.index) { + throw new RustdocParseError("Invalid rustdoc JSON structure: missing paths or index") + } + + // First try to find by path in the paths index + for (const [id, pathInfo] of Object.entries(json.paths)) { + const fullPath = pathInfo.path.join("::") + if (fullPath.endsWith(itemPath) || pathInfo.path[pathInfo.path.length - 1] === itemPath) { + const item = json.index[id] + if (item) { + return formatItem(item, pathInfo.kind) + } + } + } + + // Fallback: search through all items by name + const searchName = itemPath.split(".").pop() || itemPath + for (const [, item] of Object.entries(json.index)) { + if (item.name === searchName) { + return formatItem(item) + } + } + + return null + } catch (error) { + ErrorLogger.log(error as Error) + if (error instanceof RustdocParseError) { + throw error + } + throw new RustdocParseError( + `Failed to find item '${itemPath}': ${(error as Error).message}`, + itemPath + ) + } +} diff --git a/src/server.ts b/src/server.ts new file mode 100644 index 0000000..491f8b4 --- /dev/null +++ b/src/server.ts @@ -0,0 +1,440 @@ +import { randomUUID } from "node:crypto" +import { McpServer, ResourceTemplate } from "@modelcontextprotocol/sdk/server/mcp.js" +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js" +import { WebStandardStreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/webStandardStreamableHttp.js" +import { createDocsFetcher } from "./docs-fetcher.js" +import { ErrorLogger } from "./errors.js" +import { + createLookupCrateHandler, + createLookupItemHandler, + createSearchCratesHandler, + lookupCrateInputSchema, + lookupCratePrompt, + lookupCrateTool, + lookupItemInputSchema, + lookupItemPrompt, + lookupItemTool, + searchCratesInputSchema, + searchCratesPrompt, + searchCratesTool, + suggestSimilarCrates +} from "./tools/index.js" +import type { ServerConfig } from "./types.js" + +// Create MCP server handlers +const createHandlers = (config: ServerConfig = {}) => { + const fetcher = createDocsFetcher(config) + + // Create tool handlers + const lookupCrateHandler = createLookupCrateHandler(fetcher) + const lookupItemHandler = createLookupItemHandler(fetcher) + const searchCratesHandler = createSearchCratesHandler() + + // Enhanced lookup crate handler with suggestions + const enhancedLookupCrateHandler = async (args: any) => { + const result = await lookupCrateHandler(args) + + // If crate not found, suggest similar crates + if (result.isError && result.content[0].text.includes("not found")) { + const suggestions = await suggestSimilarCrates(args.crateName) + // Only show suggestions if we found actual alternatives + if (suggestions.length > 0 && !suggestions.includes(args.crateName)) { + result.content[0].text += `\n\nDid you mean one of these crates?\n${suggestions.map((s) => `- ${s}`).join("\n")}` + } + } + + return result + } + + // Cache query functions + const getCacheStats = () => { + return fetcher.getCacheStats() + } + + const getCacheEntries = (limit: number, offset: number) => { + return fetcher.getCacheEntries(limit, offset) + } + + const queryCacheDb = (sql: string) => { + return fetcher.queryCacheDb(sql) + } + + // Get server configuration + const getServerConfig = () => { + return { + cacheTtl: config.cacheTtl || 3600000, + maxCacheSize: config.maxCacheSize || 100, + requestTimeout: config.requestTimeout || 30000, + dbPath: config.dbPath || ":memory:" + } + } + + // Cleanup function + const cleanup = () => { + fetcher.close() + } + + return { + handleLookupCrate: enhancedLookupCrateHandler, + handleLookupItem: lookupItemHandler, + handleSearchCrates: searchCratesHandler, + cleanup, + getCacheStats, + getCacheEntries, + queryCacheDb, + getServerConfig + } +} + +// Create and configure the MCP server +export const createRustDocsServer = (config: ServerConfig = {}) => { + const server = new McpServer({ + name: "mcp-docsrs", + version: "1.0.0" + }) + + const handlers = createHandlers(config) + + // Register tools + server.registerTool( + lookupCrateTool.name, + { + annotations: lookupCrateTool.annotations, + description: lookupCrateTool.description, + inputSchema: lookupCrateInputSchema as any + }, + handlers.handleLookupCrate as any + ) + + server.registerTool( + lookupItemTool.name, + { + annotations: lookupItemTool.annotations, + description: lookupItemTool.description, + inputSchema: lookupItemInputSchema as any + }, + handlers.handleLookupItem as any + ) + + server.registerTool( + searchCratesTool.name, + { + annotations: searchCratesTool.annotations, + description: searchCratesTool.description, + inputSchema: searchCratesInputSchema as any + }, + handlers.handleSearchCrates as any + ) + + // Setup prompts with argument schemas + server.prompt( + lookupCratePrompt.name, + lookupCratePrompt.description, + lookupCratePrompt.argsSchema as any, + lookupCratePrompt.handler as any + ) + + server.prompt( + lookupItemPrompt.name, + lookupItemPrompt.description, + lookupItemPrompt.argsSchema as any, + lookupItemPrompt.handler as any + ) + + server.prompt( + searchCratesPrompt.name, + searchCratesPrompt.description, + searchCratesPrompt.argsSchema as any, + searchCratesPrompt.handler as any + ) + + // Register cache query resources + server.resource( + "cache-stats", + new ResourceTemplate("cache://stats", { + list: async () => ({ + resources: [ + { + name: "Cache Statistics", + uri: "cache://stats", + description: "Get cache statistics including total entries and size" + } + ] + }) + }), + (uri) => { + try { + const stats = handlers.getCacheStats() + return { + contents: [ + { + uri: uri.href, + mimeType: "application/json", + text: JSON.stringify(stats, null, 2) + } + ] + } + } catch (error) { + ErrorLogger.log(error as Error) + return { + contents: [ + { + uri: uri.href, + mimeType: "text/plain", + text: `Error retrieving cache statistics: ${(error as Error).message}` + } + ] + } + } + } + ) + + server.resource( + "cache-entries", + new ResourceTemplate("cache://entries?limit={limit}&offset={offset}", { + list: async () => ({ + resources: [ + { + name: "Cache Entries", + uri: "cache://entries?limit=10&offset=0", + description: "List cached documentation entries" + } + ] + }) + }), + (uri, args) => { + try { + const limit = args.limit ? Number.parseInt(args.limit as string, 10) : 100 + const offset = args.offset ? Number.parseInt(args.offset as string, 10) : 0 + const entries = handlers.getCacheEntries(limit, offset) + return { + contents: [ + { + uri: uri.href, + mimeType: "application/json", + text: JSON.stringify({ entries }, null, 2) + } + ] + } + } catch (error) { + ErrorLogger.log(error as Error) + return { + contents: [ + { + uri: uri.href, + mimeType: "text/plain", + text: `Error retrieving cache entries: ${(error as Error).message}` + } + ] + } + } + } + ) + + server.resource( + "cache-query", + new ResourceTemplate("cache://query?sql={sql}", { + list: async () => ({ + resources: [ + { + name: "Cache Query", + uri: "cache://query?sql=SELECT key FROM cache LIMIT 10", + description: + "Execute SELECT queries on the cache database. Example: SELECT key, LENGTH(data) as size FROM cache WHERE key LIKE '%serde%'" + } + ] + }) + }), + (uri, args) => { + try { + // Only allow SELECT queries for safety + const sql = decodeURIComponent(args.sql as string) + if (!sql || !sql.trim().toUpperCase().startsWith("SELECT")) { + throw new Error("Only SELECT queries are allowed for safety") + } + const results = handlers.queryCacheDb(sql) + return { + contents: [ + { + uri: uri.href, + mimeType: "application/json", + text: JSON.stringify(results, null, 2) + } + ] + } + } catch (error) { + ErrorLogger.log(error as Error) + return { + contents: [ + { + uri: uri.href, + mimeType: "text/plain", + text: `Error executing query: ${(error as Error).message}` + } + ] + } + } + } + ) + + server.resource( + "server-config", + new ResourceTemplate("cache://config", { + list: async () => ({ + resources: [ + { + name: "Server Configuration", + uri: "cache://config", + description: "Get current server configuration (cache TTL, max size, DB path, etc.)" + } + ] + }) + }), + (uri) => { + try { + const config = handlers.getServerConfig() + return { + contents: [ + { + uri: uri.href, + mimeType: "application/json", + text: JSON.stringify(config, null, 2) + } + ] + } + } catch (error) { + ErrorLogger.log(error as Error) + return { + contents: [ + { + uri: uri.href, + mimeType: "text/plain", + text: `Error retrieving server configuration: ${(error as Error).message}` + } + ] + } + } + } + ) + + // Start the server + const start = async (): Promise => { + try { + if (config.useStdio) { + // Stdio transport + const transport = new StdioServerTransport() + await server.connect(transport) + ErrorLogger.logInfo("MCP Rust Docs Server is running (stdio)", { config }) + } else { + // HTTP transport + const port = config.port || 3331 + const transports: Record = {} + + // Use Bun's built-in HTTP server + Bun.serve({ + port, + async fetch(req) { + const url = new URL(req.url) + + if (url.pathname !== "/mcp") { + return new Response("Not Found", { status: 404 }) + } + + const sessionId = req.headers.get("mcp-session-id") || undefined + let transport: WebStandardStreamableHTTPServerTransport + + if (req.method === "POST") { + const body: unknown = await req.json() + + if (sessionId && transports[sessionId]) { + transport = transports[sessionId] + } else if ( + !sessionId && + typeof body === "object" && + body !== null && + "method" in body && + body.method === "initialize" + ) { + transport = new WebStandardStreamableHTTPServerTransport({ + sessionIdGenerator: () => randomUUID(), + onsessioninitialized: (id) => { + transports[id] = transport + ErrorLogger.logInfo("Session initialized", { sessionId: id }) + }, + onsessionclosed: (id) => { + delete transports[id] + ErrorLogger.logInfo("Session closed", { sessionId: id }) + } + }) + + transport.onclose = () => { + if (transport.sessionId) { + delete transports[transport.sessionId] + } + } + + await server.connect(transport) + } else { + return Response.json( + { + jsonrpc: "2.0", + error: { code: -32000, message: "Invalid session" }, + id: null + }, + { status: 400 } + ) + } + + return transport.handleRequest(req, { parsedBody: body }) + } + + if (req.method === "GET" || req.method === "DELETE") { + if (!sessionId || !transports[sessionId]) { + return new Response("Invalid session", { status: 400 }) + } + return transports[sessionId].handleRequest(req) + } + + return new Response("Method Not Allowed", { status: 405 }) + } + }) + + ErrorLogger.logInfo("MCP Rust Docs Server is running (HTTP)", { + config, + url: `http://localhost:${port}/mcp` + }) + } + } catch (error) { + ErrorLogger.log(error as Error) + throw error + } + } + + // Cleanup on exit + const cleanup = () => { + handlers.cleanup() + } + + // Setup error handlers + process.on("SIGINT", () => { + cleanup() + process.exit(0) + }) + + process.on("SIGTERM", () => { + cleanup() + process.exit(0) + }) + + process.on("uncaughtException", (error) => { + ErrorLogger.log(error as Error) + cleanup() + process.exit(1) + }) + + return { + start, + cleanup, + server + } +} diff --git a/src/tools/index.ts b/src/tools/index.ts new file mode 100644 index 0000000..38552cc --- /dev/null +++ b/src/tools/index.ts @@ -0,0 +1,25 @@ +// Export all tools and their components +export { + createLookupCrateHandler, + lookupCrateInputSchema, + lookupCratePrompt, + lookupCratePromptSchema, + lookupCrateTool +} from "./lookup-crate.js" + +export { + createLookupItemHandler, + lookupItemInputSchema, + lookupItemPrompt, + lookupItemPromptSchema, + lookupItemTool +} from "./lookup-item.js" + +export { + createSearchCratesHandler, + searchCratesInputSchema, + searchCratesPrompt, + searchCratesPromptSchema, + searchCratesTool, + suggestSimilarCrates +} from "./search-crates.js" diff --git a/src/tools/lookup-crate.ts b/src/tools/lookup-crate.ts new file mode 100644 index 0000000..99c440d --- /dev/null +++ b/src/tools/lookup-crate.ts @@ -0,0 +1,149 @@ +import { z } from "zod" +import { ErrorLogger, isCrateNotFoundError, isJSONParseError, isMCPDocsRsError } from "../errors.js" +import { parseCrateInfo } from "../rustdoc-parser.js" +import type { DocsFetcher, DocsResponse, LookupCrateArgs } from "../types.js" + +// Input schema for lookup_crate_docs tool +export const lookupCrateInputSchema = { + crateName: z.string().describe("Name of the Rust crate to lookup documentation for"), + version: z + .string() + .optional() + .describe('Specific version (e.g., "1.0.0") or semver range (e.g., "~4")'), + target: z.string().optional().describe('Target platform (e.g., "i686-pc-windows-msvc")'), + formatVersion: z.number().optional().describe("Rustdoc JSON format version") +} + +// Tool metadata +export const lookupCrateTool = { + name: "lookup_crate_docs", + description: "Lookup documentation for a Rust crate from docs.rs", + annotations: { + title: "Lookup Rust Crate Documentation", + readOnlyHint: true, + destructiveHint: true, + idempotentHint: true, + openWorldHint: true + } +} + +// Handler for lookup_crate_docs +export const createLookupCrateHandler = (fetcher: DocsFetcher) => { + return async (args: LookupCrateArgs): Promise => { + try { + const { data: json, fromCache } = await fetcher.fetchCrateJson( + args.crateName, + args.version, + args.target, + args.formatVersion + ) + + // Log cache status internally for debugging + ErrorLogger.logInfo("Crate documentation retrieved", { + crateName: args.crateName, + fromCache + }) + + const content = parseCrateInfo(json) + + return { + content: [ + { + type: "text", + text: content + } + ] + } + } catch (error) { + // Log the error with full context + if (error instanceof Error) { + ErrorLogger.log(error) + } + + // Provide user-friendly error messages based on error type + let errorMessage: string + if (isJSONParseError(error)) { + errorMessage = + "Failed to parse JSON from docs.rs. The response may not be valid rustdoc JSON." + } else if (isCrateNotFoundError(error)) { + errorMessage = error.message + } else if (isMCPDocsRsError(error)) { + errorMessage = error.message + } else if (error instanceof Error) { + errorMessage = error.message + } else { + errorMessage = "Unknown error occurred" + } + + return { + content: [ + { + type: "text", + text: `Error: ${errorMessage}` + } + ], + isError: true + } + } + } +} + +// Prompt arguments schema for lookup_crate_docs +export const lookupCratePromptSchema = { + crateName: z.string().optional().describe("Name of the Rust crate to lookup documentation for"), + version: z + .string() + .optional() + .describe('Specific version (e.g., "1.0.0") or semver range (e.g., "~4")'), + target: z.string().optional().describe('Target platform (e.g., "i686-pc-windows-msvc")') +} + +// Prompt for lookup_crate_docs with dynamic argument handling +export const lookupCratePrompt = { + name: "lookup_crate_docs", + description: "Analyze and summarize documentation for a Rust crate", + argsSchema: lookupCratePromptSchema, + handler: (args: any) => { + // Check if required arguments are missing + if (!args?.crateName) { + return { + messages: [ + { + role: "user" as const, + content: { + type: "text" as const, + text: "Which Rust crate would you like me to look up documentation for? Please provide the crate name." + } + } + ] + } + } + + // Build the prompt text with the provided arguments + let promptText = `Please analyze and summarize the documentation for the Rust crate "${args.crateName}"` + + if (args.version) { + promptText += ` version ${args.version}` + } + + promptText += `. Focus on: +1. The main purpose and features of the crate +2. Key types and functions +3. Common usage patterns +4. Any important notes or warnings + +I'll fetch the documentation for you.` + + return { + messages: [ + { + role: "user" as const, + content: { + type: "text" as const, + text: promptText + } + } + ] + } + } +} diff --git a/src/tools/lookup-item.ts b/src/tools/lookup-item.ts new file mode 100644 index 0000000..e1a1223 --- /dev/null +++ b/src/tools/lookup-item.ts @@ -0,0 +1,187 @@ +import { z } from "zod" +import { + ErrorLogger, + ItemNotFoundError, + isCrateNotFoundError, + isJSONParseError, + isMCPDocsRsError +} from "../errors.js" +import { findItem } from "../rustdoc-parser.js" +import type { DocsFetcher, DocsResponse, LookupItemArgs } from "../types.js" + +// Input schema for lookup_item_docs tool +export const lookupItemInputSchema = { + crateName: z.string().describe("Name of the Rust crate"), + itemPath: z + .string() + .describe('Path to specific item (e.g., "struct.MyStruct" or "fn.my_function")'), + version: z.string().optional().describe("Specific version or semver range"), + target: z.string().optional().describe("Target platform") +} + +// Tool metadata +export const lookupItemTool = { + name: "lookup_item_docs", + description: "Lookup documentation for a specific item (struct, function, etc.) in a Rust crate", + annotations: { + title: "Lookup Rust Item Documentation", + readOnlyHint: true, + destructiveHint: true, + idempotentHint: true, + openWorldHint: true + } +} + +// Handler for lookup_item_docs +export const createLookupItemHandler = (fetcher: DocsFetcher) => { + return async (args: LookupItemArgs): Promise => { + try { + const { data: json, fromCache } = await fetcher.fetchCrateJson( + args.crateName, + args.version, + args.target + ) + + // Log cache status internally for debugging + ErrorLogger.logInfo("Item documentation retrieved", { + crateName: args.crateName, + itemPath: args.itemPath, + fromCache + }) + + const itemContent = findItem(json, args.itemPath) + + if (!itemContent) { + throw new ItemNotFoundError(args.crateName, args.itemPath) + } + + return { + content: [ + { + type: "text", + text: itemContent + } + ] + } + } catch (error) { + // Log the error with full context + if (error instanceof Error) { + ErrorLogger.log(error) + } + + // Provide user-friendly error messages based on error type + let errorMessage: string + if (isJSONParseError(error)) { + errorMessage = + "Failed to parse JSON from docs.rs. The response may not be valid rustdoc JSON." + } else if (isCrateNotFoundError(error)) { + errorMessage = error.message + } else if (isMCPDocsRsError(error)) { + errorMessage = error.message + } else if (error instanceof Error) { + errorMessage = error.message + } else { + errorMessage = "Unknown error occurred" + } + + return { + content: [ + { + type: "text", + text: `Error: ${errorMessage}` + } + ], + isError: true + } + } + } +} + +// Prompt arguments schema for lookup_item_docs +export const lookupItemPromptSchema = { + crateName: z.string().optional().describe("Name of the Rust crate"), + itemPath: z + .string() + .optional() + .describe('Path to specific item (e.g., "struct.MyStruct" or "fn.my_function")'), + version: z.string().optional().describe("Specific version or semver range"), + target: z.string().optional().describe("Target platform") +} + +// Prompt for lookup_item_docs with dynamic argument handling +export const lookupItemPrompt = { + name: "lookup_item_docs", + description: "Provide detailed information about a specific item from a Rust crate", + argsSchema: lookupItemPromptSchema, + handler: (args: any) => { + // Check if required arguments are missing + if (!args?.crateName && !args?.itemPath) { + return { + messages: [ + { + role: "user" as const, + content: { + type: "text" as const, + text: "I need to know which Rust crate and item you'd like documentation for. Please provide:\n1. The crate name (e.g., 'tokio', 'serde')\n2. The item path (e.g., 'struct.Runtime', 'fn.spawn')" + } + } + ] + } + } + + if (!args?.crateName) { + return { + messages: [ + { + role: "user" as const, + content: { + type: "text" as const, + text: `Which Rust crate contains the item "${args.itemPath}"? Please provide the crate name.` + } + } + ] + } + } + + if (!args?.itemPath) { + return { + messages: [ + { + role: "user" as const, + content: { + type: "text" as const, + text: `What specific item from the "${args.crateName}" crate would you like documentation for? Please provide the item path (e.g., 'struct.MyStruct', 'fn.my_function', 'trait.MyTrait').` + } + } + ] + } + } + + // Build the prompt text with the provided arguments + let promptText = `Please provide detailed information about the "${args.itemPath}" from the Rust crate "${args.crateName}"` + + if (args.version) { + promptText += ` version ${args.version}` + } + + promptText += `. Include: +1. Purpose and functionality +2. Parameters/fields and their types +3. Usage examples if available +4. Related items + +I'll fetch the documentation for you.` + + return { + messages: [ + { + role: "user" as const, + content: { + type: "text" as const, + text: promptText + } + } + ] + } + } +} diff --git a/src/tools/search-crates.ts b/src/tools/search-crates.ts new file mode 100644 index 0000000..105fae9 --- /dev/null +++ b/src/tools/search-crates.ts @@ -0,0 +1,216 @@ +import { z } from "zod" +import { ErrorLogger, NetworkError } from "../errors.js" +import type { DocsResponse } from "../types.js" + +// Crates.io API types +interface CratesIoSearchResponse { + crates: Array<{ + name: string + description: string | null + downloads: number + recent_downloads: number + max_version: string + documentation: string | null + repository: string | null + homepage: string | null + }> + meta: { + total: number + } +} + +// Input schema for search_crates tool +export const searchCratesInputSchema = { + query: z.string().describe("Search query for crate names (supports partial matches)"), + limit: z.number().optional().default(10).describe("Maximum number of results to return") +} + +// Tool metadata +export const searchCratesTool = { + name: "search_crates", + description: "Search for Rust crates on crates.io with fuzzy/partial name matching", + annotations: { + title: "Search Rust Crates", + readOnlyHint: true, + destructiveHint: true, + idempotentHint: true, + openWorldHint: true + } +} + +// Handler for search_crates +export const createSearchCratesHandler = () => { + return async ( + args: z.infer> + ): Promise => { + try { + const searchUrl = `https://crates.io/api/v1/crates?q=${encodeURIComponent(args.query)}&per_page=${args.limit}` + + ErrorLogger.logInfo("Searching crates.io", { + query: args.query, + limit: args.limit + }) + + // Add timeout to prevent hanging requests + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), 5000) // 5 second timeout + + const response = await fetch(searchUrl, { + headers: { + "User-Agent": "mcp-docsrs/1.0.0" + }, + signal: controller.signal + }) + + clearTimeout(timeoutId) + + if (!response.ok) { + throw new NetworkError(searchUrl, response.status, response.statusText) + } + + const data = (await response.json()) as CratesIoSearchResponse + + if (data.crates.length === 0) { + return { + content: [ + { + type: "text", + text: `No crates found matching "${args.query}"` + } + ] + } + } + + // Format results + const results = data.crates + .map((crate, index) => { + const parts = [ + `${index + 1}. **${crate.name}** v${crate.max_version}`, + crate.description ? ` ${crate.description}` : "", + ` Downloads: ${crate.downloads.toLocaleString()} (${crate.recent_downloads.toLocaleString()} recent)`, + crate.documentation ? ` Docs: ${crate.documentation}` : "", + "" + ] + return parts.filter((part) => part).join("\n") + }) + .join("\n") + + const header = `Found ${data.meta.total} crates matching "${args.query}" (showing top ${data.crates.length}):\n\n` + + return { + content: [ + { + type: "text", + text: header + results + } + ] + } + } catch (error) { + // Log the error with full context + if (error instanceof Error) { + ErrorLogger.log(error) + } + + let errorMessage: string + if (error instanceof Error && error.name === "AbortError") { + errorMessage = "Request timed out while searching crates.io" + } else if (error instanceof NetworkError) { + errorMessage = error.message + } else if (error instanceof Error) { + errorMessage = error.message + } else { + errorMessage = "Unknown error occurred" + } + + return { + content: [ + { + type: "text", + text: `Error searching crates: ${errorMessage}` + } + ], + isError: true + } + } + } +} + +// Helper function to suggest similar crate names +export const suggestSimilarCrates = async (crateName: string, limit = 5): Promise => { + try { + const searchUrl = `https://crates.io/api/v1/crates?q=${encodeURIComponent(crateName)}&per_page=${limit}` + + // Add timeout to prevent hanging requests + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), 5000) // 5 second timeout + + const response = await fetch(searchUrl, { + headers: { + "User-Agent": "mcp-docsrs/1.0.0" + }, + signal: controller.signal + }) + + clearTimeout(timeoutId) + + if (!response.ok) { + return [] + } + + const data = (await response.json()) as CratesIoSearchResponse + return data.crates.map((crate) => crate.name) + } catch (error) { + ErrorLogger.log(error as Error) + return [] + } +} + +// Prompt arguments schema for search_crates +export const searchCratesPromptSchema = { + query: z.string().optional().describe("Search query for crate names (supports partial matches)"), + limit: z.number().optional().describe("Maximum number of results to return") +} + +// Prompt for search_crates with dynamic argument handling +export const searchCratesPrompt = { + name: "search_crates", + description: "Search for Rust crates on crates.io", + argsSchema: searchCratesPromptSchema, + handler: (args: any) => { + // Check if required arguments are missing + if (!args?.query) { + return { + messages: [ + { + role: "user" as const, + content: { + type: "text" as const, + text: "What would you like to search for on crates.io? Please provide a search query (e.g., 'serde', 'async', 'web framework')." + } + } + ] + } + } + + // Build the prompt text with the provided arguments + let promptText = `Search for Rust crates matching "${args.query}" on crates.io` + + if (args.limit) { + promptText += ` (limiting to ${args.limit} results)` + } + + promptText += `. I'll search for matching crates and show you the results.` + + return { + messages: [ + { + role: "user" as const, + content: { + type: "text" as const, + text: promptText + } + } + ] + } + } +} diff --git a/src/types.ts b/src/types.ts new file mode 100644 index 0000000..2a40e4c --- /dev/null +++ b/src/types.ts @@ -0,0 +1,167 @@ +import { z } from 'zod'; +import type { createDocsFetcher } from './docs-fetcher'; + +// Zod schemas for tool parameters - raw shapes +export const lookupCrateShape = { + crateName: z.string().describe('Name of the Rust crate to lookup documentation for'), + version: z.string().optional().describe('Specific version (e.g., "1.0.0") or semver range (e.g., "~4")'), + target: z.string().optional().describe('Target platform (e.g., "i686-pc-windows-msvc")'), + formatVersion: z.number().optional().describe('Rustdoc JSON format version'), +}; + +export const lookupItemShape = { + crateName: z.string().describe('Name of the Rust crate'), + itemPath: z.string().describe('Path to specific item (e.g., "struct.MyStruct" or "fn.my_function")'), + version: z.string().optional().describe('Specific version or semver range'), + target: z.string().optional().describe('Target platform'), +}; + +// Zod schemas as objects +export const lookupCrateSchema = z.object(lookupCrateShape); +export const lookupItemSchema = z.object(lookupItemShape); + +// Type inference +export type LookupCrateArgs = z.infer; +export type LookupItemArgs = z.infer; + +// Response types +export type DocsResponse = { + content: Array<{ + type: 'text'; + text: string; + }>; + isError?: boolean; + [key: string]: unknown; +}; + +// Cache types +export type CacheData = { + data: any; + timestamp: number; + ttl: number; +}; + +// Generic cache entry type +export type CacheEntryGeneric = { + data: T; + timestamp: number; + ttl: number; +}; + +// Config types +export type ServerConfig = { + cacheTtl?: number; + maxCacheSize?: number; + requestTimeout?: number; + dbPath?: string; + port?: number; + useStdio?: boolean; +}; + +// Rustdoc JSON format types +export type RustdocItem = { + id: string; + crate_id: number; + name?: string; + span?: any; + visibility: 'public' | 'default' | 'crate' | 'restricted'; + docs?: string; + attrs?: string[]; + deprecation?: any; + inner?: RustdocItemInner; + links?: Record; +}; + +export type RustdocItemInner = { + // Module + module?: { + is_crate: boolean; + items: string[]; + }; + // Struct + struct?: { + struct_type: 'plain' | 'tuple' | 'unit'; + generics?: any; + fields_stripped?: boolean; + fields?: string[]; + impls?: string[]; + }; + // Enum + enum?: { + generics?: any; + variants_stripped?: boolean; + variants?: string[]; + impls?: string[]; + }; + // Function + function?: { + decl: any; + generics?: any; + header?: any; + }; + // Trait + trait?: { + is_auto: boolean; + is_unsafe: boolean; + items: string[]; + generics?: any; + bounds?: any[]; + implementations?: string[]; + }; + // Type alias + typedef?: { + type: any; + generics?: any; + }; + // Impl + impl?: { + is_unsafe: boolean; + generics?: any; + provided_trait_methods?: string[]; + trait?: any; + for?: any; + items: string[]; + }; +}; + +export type RustdocJson = { + root: string; + crate_version?: string; + includes_private: boolean; + format_version: number; + index: Record; + paths: Record< + string, + { + crate_id: number; + path: string[]; + kind: string; + } + >; + external_crates: Record< + string, + { + name: string; + html_root_url?: string; + } + >; +}; + +// Cache query types +export type CacheQueryType = 'stats' | 'list' | 'query'; + +export type CacheStats = { + totalEntries: number; + totalSize: number; + oldestEntry: Date | null; +}; + +export type CacheEntry = { + key: string; + timestamp: Date; + ttl: number; + expiresAt: Date; + size: number; +}; + +export type DocsFetcher = ReturnType; diff --git a/test/integration/README.md b/test/integration/README.md new file mode 100644 index 0000000..cb31d5a --- /dev/null +++ b/test/integration/README.md @@ -0,0 +1,43 @@ +# Integration Tests + +This directory contains integration tests for the mcp-docsrs binary executables across different platforms. + +## Structure + +- `test-binary.ts` - Main integration test suite orchestrator +- `test-crates-search.ts` - Tests for crate search functionality +- `test-mcp-protocol.ts` - Tests for MCP protocol implementation +- `test-persistent-cache.ts` - Tests for persistent cache functionality +- `test-resources.ts` - Tests for MCP resources and cache management +- `test-zstd.ts` - Tests for zstd decompression functionality +- `README.md` - This file + +## Running Tests + +### Native Binaries + +```bash +bun test/integration/test-binary.ts ./dist/mcp-docsrs-linux-x64 linux-x64 +``` + +## Test Coverage + +The integration tests verify: + +### Basic Tests (All Platforms) + +1. **Version Flag** - Binary responds correctly to `--version` +2. **Server Startup** - MCP server starts and shuts down cleanly +3. **Cache Functionality** - Cache directory is properly handled +4. **MCP Operations** - Basic MCP protocol operations work correctly +5. **Crate Search** - Search functionality with various query types +6. **MCP Protocol** - Full protocol implementation including error handling +7. **Persistent Cache** - Cache persistence across server restarts +8. **Resources** - MCP resources, cache statistics, SQL queries, and security +9. **Zstd Decompression** - Decompression of compressed documentation from docs.rs + +## Platform-Specific Tests + +- **Linux (GLIBC)** - Direct execution of full test suite +- **macOS** - Direct execution of full test suite on Intel and Apple Silicon +- **Windows** - Direct execution of full test suite with Windows-specific handling diff --git a/test/integration/api/cache-status.integration.test.ts b/test/integration/api/cache-status.integration.test.ts new file mode 100644 index 0000000..315b478 --- /dev/null +++ b/test/integration/api/cache-status.integration.test.ts @@ -0,0 +1,172 @@ +import { afterEach, beforeEach, describe, expect, it } from "bun:test" +import { existsSync, rmSync } from "node:fs" +import { tmpdir } from "node:os" +import { join } from "node:path" +import { createDocsFetcher } from "../../../src/docs-fetcher.js" + +describe("Cache Status Tracking", () => { + let testDbPath: string + let fetcher: ReturnType + + beforeEach(() => { + // Create unique database path for each test to avoid conflicts + testDbPath = join( + tmpdir(), + `test-cache-status-${Date.now()}-${Math.random().toString(36).substring(2, 9)}.db` + ) + + // Create fetcher with persistent database for testing + fetcher = createDocsFetcher({ + dbPath: testDbPath, + cacheTtl: 3600000, // 1 hour + maxCacheSize: 10 + }) + }) + + afterEach(async () => { + // Clean up + fetcher.close() + + // Add a small delay on Windows to ensure file handles are released + if (process.platform === "win32") { + await new Promise((resolve) => setTimeout(resolve, 100)) + } + + if (existsSync(testDbPath)) { + try { + rmSync(testDbPath, { force: true }) + } catch (error) { + // If still locked, try again after a longer delay + if ((error as any).code === "EBUSY" && process.platform === "win32") { + await new Promise((resolve) => setTimeout(resolve, 500)) + rmSync(testDbPath, { force: true }) + } else { + throw error + } + } + } + }) + + it("should return fromCache: false on first fetch", async () => { + const result = await fetcher.fetchCrateJson("tinc", "0.1.6") + + expect(result.fromCache).toBe(false) + expect(result.data).toBeDefined() + expect(result.data.root).toBeDefined() // Basic validation of rustdoc JSON structure + }, 10000) + + it("should return fromCache: true on subsequent fetch", async () => { + // First fetch - should hit the network + const firstResult = await fetcher.fetchCrateJson("tinc", "0.1.6") + expect(firstResult.fromCache).toBe(false) + + // Second fetch - should hit the cache + const secondResult = await fetcher.fetchCrateJson("tinc", "0.1.6") + expect(secondResult.fromCache).toBe(true) + + // Data should be identical + expect(secondResult.data).toEqual(firstResult.data) + }, 10000) + + it("should persist cache across fetcher instances", async () => { + // First fetcher instance + const result1 = await fetcher.fetchCrateJson("tinc", "0.1.6") + expect(result1.fromCache).toBe(false) + fetcher.close() + + // Create new fetcher instance with same database + const fetcher2 = createDocsFetcher({ + dbPath: testDbPath, + cacheTtl: 3600000, + maxCacheSize: 10 + }) + + // Should get cached result + const result2 = await fetcher2.fetchCrateJson("tinc", "0.1.6") + expect(result2.fromCache).toBe(true) + expect(result2.data).toEqual(result1.data) + + fetcher2.close() + }, 10000) + + it("should handle different versions separately", async () => { + // Fetch latest version + const latestResult = await fetcher.fetchCrateJson("tinc", "latest") + expect(latestResult.fromCache).toBe(false) + + // Fetch a recent version that should have rustdoc JSON + const versionResult = await fetcher.fetchCrateJson("tinc", "0.1.6") + expect(versionResult.fromCache).toBe(false) // Different cache key + + // Fetch latest again - should be cached + const latestAgain = await fetcher.fetchCrateJson("tinc", "latest") + expect(latestAgain.fromCache).toBe(true) + + // Fetch the specific version again - should also be cached + const versionAgain = await fetcher.fetchCrateJson("tinc", "0.1.6") + expect(versionAgain.fromCache).toBe(true) + }, 15000) + + it("should track cache misses for non-existent crates", async () => { + try { + await fetcher.fetchCrateJson("this-crate-definitely-does-not-exist-12345") + // Should not reach here + expect(true).toBe(false) + } catch (_error) { + // Error is expected, but let's verify cache behavior + + // Try again - should still fail (not cached) + try { + await fetcher.fetchCrateJson("this-crate-definitely-does-not-exist-12345") + expect(true).toBe(false) + } catch (secondError) { + // Expected - errors are not cached + expect(secondError).toBeDefined() + } + } + }, 10000) + + it("should work with in-memory cache", async () => { + // Create fetcher with in-memory cache + const memoryFetcher = createDocsFetcher({ + cacheTtl: 3600000, + maxCacheSize: 10 + // No dbPath - uses in-memory cache + }) + + const firstResult = await memoryFetcher.fetchCrateJson("tinc") + expect(firstResult.fromCache).toBe(false) + + const secondResult = await memoryFetcher.fetchCrateJson("tinc") + expect(secondResult.fromCache).toBe(true) + + memoryFetcher.close() + }, 10000) + + it("should respect cache TTL", async () => { + // Create fetcher with very short TTL + const shortTtlFetcher = createDocsFetcher({ + dbPath: join(tmpdir(), `test-ttl-${Date.now()}.db`), + cacheTtl: 100, // 100ms + maxCacheSize: 10 + }) + + const firstResult = await shortTtlFetcher.fetchCrateJson("tinc") + expect(firstResult.fromCache).toBe(false) + + // Wait for TTL to expire + await new Promise((resolve) => setTimeout(resolve, 150)) + + // Should fetch again as cache expired + const secondResult = await shortTtlFetcher.fetchCrateJson("tinc") + expect(secondResult.fromCache).toBe(false) + + shortTtlFetcher.close() + + // Clean up with delay on Windows + if (process.platform === "win32") { + await new Promise((resolve) => setTimeout(resolve, 100)) + } + rmSync(join(tmpdir(), `test-ttl-${Date.now()}.db`), { force: true }) + }, 10000) +}) diff --git a/test/integration/api/docs-fetcher.integration.test.ts b/test/integration/api/docs-fetcher.integration.test.ts new file mode 100644 index 0000000..29cdee6 --- /dev/null +++ b/test/integration/api/docs-fetcher.integration.test.ts @@ -0,0 +1,238 @@ +import { afterEach, beforeEach, describe, expect, it, mock } from "bun:test" +import { createDocsFetcher } from "../../../src/docs-fetcher.js" + +describe("DocsFetcher", () => { + let fetcher: ReturnType + + beforeEach(() => { + fetcher = createDocsFetcher({ + cacheTtl: 1000, + maxCacheSize: 10, + requestTimeout: 5000 + }) + }) + + afterEach(() => { + fetcher.close() + }) + + describe("buildJsonUrl", () => { + it("should build correct URL with defaults", async () => { + // Mock fetch to capture the URL + const originalFetch = global.fetch + let capturedUrl = "" + + global.fetch = mock((url: string | URL | Request) => { + if (typeof url === "string") { + capturedUrl = url + } else if (url instanceof URL) { + capturedUrl = url.toString() + } else { + capturedUrl = url.url + } + return Promise.resolve({ + status: 404, + ok: false, + headers: new Headers(), + json: () => Promise.resolve({}) + } as Response) + }) as unknown as typeof fetch + + try { + await fetcher.fetchCrateJson("test-crate") + } catch {} + + expect(capturedUrl).toBe("https://docs.rs/crate/test-crate/latest/json") + + global.fetch = originalFetch + }) + + it("should build URL with version", async () => { + const originalFetch = global.fetch + let capturedUrl = "" + + global.fetch = mock((url: string | URL | Request) => { + if (typeof url === "string") { + capturedUrl = url + } else if (url instanceof URL) { + capturedUrl = url.toString() + } else { + capturedUrl = url.url + } + return Promise.resolve({ + status: 404, + ok: false, + headers: new Headers(), + json: () => Promise.resolve({}) + } as Response) + }) as unknown as typeof fetch + + try { + await fetcher.fetchCrateJson("test-crate", "1.0.0") + } catch {} + + expect(capturedUrl).toBe("https://docs.rs/crate/test-crate/1.0.0/json") + + global.fetch = originalFetch + }) + + it("should build URL with target", async () => { + const originalFetch = global.fetch + let capturedUrl = "" + + global.fetch = mock((url: string | URL | Request) => { + if (typeof url === "string") { + capturedUrl = url + } else if (url instanceof URL) { + capturedUrl = url.toString() + } else { + capturedUrl = url.url + } + return Promise.resolve({ + status: 404, + ok: false, + headers: new Headers(), + json: () => Promise.resolve({}) + } as Response) + }) as unknown as typeof fetch + + try { + await fetcher.fetchCrateJson("test-crate", "1.0.0", "x86_64-pc-windows-msvc") + } catch {} + + expect(capturedUrl).toBe("https://docs.rs/crate/test-crate/1.0.0/x86_64-pc-windows-msvc/json") + + global.fetch = originalFetch + }) + }) + + describe("fetchCrateJson", () => { + it("should handle 404 errors", () => { + const originalFetch = global.fetch + + global.fetch = mock(() => { + return Promise.resolve({ + status: 404, + ok: false, + headers: new Headers(), + json: () => Promise.resolve({}) + } as Response) + }) as unknown as typeof fetch + + expect(fetcher.fetchCrateJson("non-existent-crate")).rejects.toThrow( + /Crate 'non-existent-crate' not found/ + ) + + global.fetch = originalFetch + }) + + it("should handle timeouts", () => { + const originalFetch = global.fetch + + global.fetch = mock(() => { + return new Promise((_, reject) => { + setTimeout(() => { + const error = new Error("Aborted") + error.name = "AbortError" + reject(error) + }, 100) + }) + }) as unknown as typeof fetch + + const quickFetcher = createDocsFetcher({ requestTimeout: 50 }) + + expect(quickFetcher.fetchCrateJson("test-crate")).rejects.toThrow( + /Request timeout after 50ms/ + ) + + quickFetcher.close() + global.fetch = originalFetch + }) + + it("should cache successful responses", async () => { + const originalFetch = global.fetch + let fetchCount = 0 + const testData = { test: "data", format_version: 30 } + + global.fetch = mock(() => { + fetchCount++ + return Promise.resolve({ + status: 200, + ok: true, + headers: new Headers(), + json: () => Promise.resolve(testData), + text: () => Promise.resolve(JSON.stringify(testData)), + bodyUsed: false + } as Response) + }) as unknown as typeof fetch + + // First call should fetch + const result1 = await fetcher.fetchCrateJson("test-crate") + expect(result1.data).toEqual(testData) + expect(result1.fromCache).toBe(false) + expect(fetchCount).toBe(1) + + // Second call should use cache + const result2 = await fetcher.fetchCrateJson("test-crate") + expect(result2.data).toEqual(testData) + expect(result2.fromCache).toBe(true) + expect(fetchCount).toBe(1) // No additional fetch + + global.fetch = originalFetch + }) + + it("should handle normal JSON responses", async () => { + const originalFetch = global.fetch + const testData = { test: "uncompressed", format_version: 30 } + + global.fetch = mock(() => { + return Promise.resolve({ + status: 200, + ok: true, + headers: new Headers(), + bodyUsed: false, + json: () => Promise.resolve(testData), + text: () => Promise.resolve(JSON.stringify(testData)) + } as Response) + }) as unknown as typeof fetch + + const result = await fetcher.fetchCrateJson("test-crate") + expect(result.data).toEqual(testData) + expect(result.fromCache).toBe(false) + + global.fetch = originalFetch + }) + }) + + describe("cache operations", () => { + it("should clear cache", async () => { + const originalFetch = global.fetch + let fetchCount = 0 + const testData = { test: "data" } + + global.fetch = mock(() => { + fetchCount++ + return Promise.resolve({ + status: 200, + ok: true, + headers: new Headers(), + json: () => Promise.resolve(testData), + text: () => Promise.resolve(JSON.stringify(testData)) + } as Response) + }) as unknown as typeof fetch + + // First fetch + await fetcher.fetchCrateJson("test-crate") + expect(fetchCount).toBe(1) + + // Clear cache + fetcher.clearCache() + + // Should fetch again + await fetcher.fetchCrateJson("test-crate") + expect(fetchCount).toBe(2) + + global.fetch = originalFetch + }) + }) +}) diff --git a/test/integration/api/search-crates.integration.test.ts b/test/integration/api/search-crates.integration.test.ts new file mode 100644 index 0000000..d9c5ba2 --- /dev/null +++ b/test/integration/api/search-crates.integration.test.ts @@ -0,0 +1,152 @@ +import { describe, expect, test } from "bun:test" +import { + createSearchCratesHandler, + suggestSimilarCrates +} from "../../../src/tools/search-crates.js" + +describe("Search Crates Tool", () => { + const searchHandler = createSearchCratesHandler() + + test("should search for crates with partial name match", async () => { + const result = await searchHandler({ + query: "tinc", + limit: 5 + }) + + // Check if there was an error (network issues, etc) + if (result.isError) { + console.log("Search failed with error:", result.content[0].text) + // Skip the test if network is unavailable + return + } + + expect(result.content).toHaveLength(1) + expect(result.content[0].type).toBe("text") + + const text = result.content[0].text + expect(text).toContain("tinc") + expect(text).toContain("Downloads:") + expect(text).toMatch(/Found \d+ crates matching "tinc"/) + }, 10000) + + test("should handle no results gracefully", async () => { + const result = await searchHandler({ + query: "this-crate-definitely-does-not-exist-12345", + limit: 5 + }) + + // Check if there was an error (network issues, etc) + if (result.isError) { + console.log("Search failed with error:", result.content[0].text) + // Skip the test if network is unavailable + return + } + + expect(result.content[0].text).toContain( + 'No crates found matching "this-crate-definitely-does-not-exist-12345"' + ) + }, 10000) + + test("should respect limit parameter", async () => { + const result = await searchHandler({ + query: "test", + limit: 3 + }) + + // Check if there was an error (network issues, etc) + if (result.isError) { + console.log("Search failed with error:", result.content[0].text) + // Skip the test if network is unavailable + return + } + + const text = result.content[0].text + const matches = text.match(/^\d+\./gm) + + // Should have at most 3 results + if (matches) { + expect(matches.length).toBeLessThanOrEqual(3) + } + }, 10000) + + test("should format results correctly", async () => { + const result = await searchHandler({ + query: "tokio", + limit: 1 + }) + + // Skip test if there's a network error + if (result.isError) { + console.log("Search failed with error:", result.content[0].text) + return + } + + const text = result.content[0].text + + // Check for expected formatting + expect(text).toMatch(/1\. \*\*tokio\*\* v\d+\.\d+\.\d+/) + expect(text).toContain("Downloads:") + expect(text).toContain("recent)") + }, 10000) +}) + +describe("Suggest Similar Crates", () => { + test("should suggest similar crates for typos", async () => { + try { + const suggestions = await suggestSimilarCrates("cla", 5) + + expect(Array.isArray(suggestions)).toBe(true) + // Network might be down, so we just check it returns an array + if (suggestions.length > 0) { + // Should likely include something with "cla" in it (like "clap") + expect(suggestions.some((s) => s.includes("cla"))).toBe(true) + } + } catch (error) { + console.log("Network error during suggestion test:", error) + // Return empty array on network error + expect(true).toBe(true) // Pass the test on network error + } + }, 10000) + + test("should return empty array for non-existent crates", async () => { + try { + const suggestions = await suggestSimilarCrates("zzzzz-definitely-not-a-crate-99999", 5) + + expect(Array.isArray(suggestions)).toBe(true) + // Might return empty or might return unrelated crates + expect(suggestions.length).toBeGreaterThanOrEqual(0) + } catch (error) { + console.log("Network error during suggestion test:", error) + // Should still return array on error + expect(true).toBe(true) + } + }, 10000) + + test("should respect limit parameter", async () => { + try { + const suggestions = await suggestSimilarCrates("async", 3) + + expect(Array.isArray(suggestions)).toBe(true) + expect(suggestions.length).toBeLessThanOrEqual(3) + } catch (error) { + console.log("Network error during suggestion test:", error) + expect(true).toBe(true) + } + }, 10000) + + test("should handle network errors gracefully", async () => { + // Mock a failed fetch by using an invalid crate name with special characters + // that might cause issues + try { + const suggestions = await suggestSimilarCrates("", 5) + + expect(Array.isArray(suggestions)).toBe(true) + // Should return empty array on error + expect(suggestions.length).toBeGreaterThanOrEqual(0) + } catch (error) { + // This is expected behavior - we handle errors gracefully + console.log("Expected error handled:", error) + expect(true).toBe(true) + } + }, 10000) +}) diff --git a/test/integration/e2e/full-flow.integration.test.ts b/test/integration/e2e/full-flow.integration.test.ts new file mode 100644 index 0000000..7b68de1 --- /dev/null +++ b/test/integration/e2e/full-flow.integration.test.ts @@ -0,0 +1,74 @@ +import { afterEach, beforeEach, describe, expect, it } from "bun:test" +import { createDocsFetcher } from "../../../src/docs-fetcher.js" +import { findItem, parseCrateInfo } from "../../../src/rustdoc-parser.js" + +describe("Integration Tests", () => { + let fetcher: ReturnType + + beforeEach(() => { + fetcher = createDocsFetcher({ + cacheTtl: 60000, + requestTimeout: 30000 + }) + }) + + afterEach(() => { + fetcher.close() + }) + + it("should fetch and parse a real crate (tinc)", async () => { + try { + // Try to fetch tinc which should have JSON docs + const { data: json } = await fetcher.fetchCrateJson("tinc", "0.1.6") + + expect(json).toBeDefined() + expect(json.format_version).toBeGreaterThanOrEqual(30) + expect(json.index).toBeDefined() + expect(json.paths).toBeDefined() + + // Parse the crate info + const info = parseCrateInfo(json) + expect(info).toContain("tinc") + expect(info).toContain("JSON") + + // Try to find a common item + const valueItem = findItem(json, "Value") + if (valueItem) { + expect(valueItem).toContain("Value") + } + } catch (error: any) { + // If it fails with 404, that's expected for older versions + if (error.message.includes("not found") || error.message.includes("404")) { + console.log("Note: This crate version may not have JSON docs available yet") + } else { + throw error + } + } + }, 10000) + + it("should handle crates without JSON docs gracefully", () => { + // Try an old version that likely doesn't have JSON docs + expect(fetcher.fetchCrateJson("tinc", "0.1.0")).rejects.toThrow(/not found|404/) + }, 10000) + + it("should validate URL construction", async () => { + // Mock a quick test without actual network call + const originalFetch = global.fetch + let capturedUrl = "" + + global.fetch = ((url: string | URL | Request) => { + capturedUrl = url.toString() + throw new Error("Test interception") + }) as unknown as typeof fetch + + try { + await fetcher.fetchCrateJson("test-crate", "1.2.3", "wasm32-unknown-unknown", 30) + } catch {} + + expect(capturedUrl).toBe( + "https://docs.rs/crate/test-crate/1.2.3/wasm32-unknown-unknown/json/30" + ) + + global.fetch = originalFetch + }, 10000) +}) diff --git a/test/integration/persistence/persistent-cache.integration.test.ts b/test/integration/persistence/persistent-cache.integration.test.ts new file mode 100644 index 0000000..a0ff042 --- /dev/null +++ b/test/integration/persistence/persistent-cache.integration.test.ts @@ -0,0 +1,110 @@ +import { afterEach, describe, expect, it } from "bun:test" +import { existsSync, rmSync } from "node:fs" +import { tmpdir } from "node:os" +import { join } from "node:path" +import { createCache } from "../../../src/cache.js" + +describe("Persistent Cache", () => { + const testDbPath = join(tmpdir(), `test-cache-${Date.now()}.db`) + + afterEach(async () => { + // Add a small delay on Windows to ensure file handles are released + if (process.platform === "win32") { + await new Promise((resolve) => setTimeout(resolve, 100)) + } + + // Clean up test database + if (existsSync(testDbPath)) { + try { + rmSync(testDbPath, { force: true }) + } catch (error) { + // If still locked, try again after a longer delay + if ((error as any).code === "EBUSY" && process.platform === "win32") { + await new Promise((resolve) => setTimeout(resolve, 500)) + rmSync(testDbPath, { force: true }) + } else { + throw error + } + } + } + }) + + it("should create database file when dbPath is provided", () => { + const cache = createCache(10, testDbPath) + + // Set a value + cache.set("test-key", { data: "test-value" }, 3600000) + + // Check that the database file was created + expect(existsSync(testDbPath)).toBe(true) + + cache.close() + }) + + it("should persist data across cache instances", () => { + // Create first cache instance and set data + const cache1 = createCache(10, testDbPath) + cache1.set("persistent-key", { value: "persistent-data" }, 3600000) + cache1.close() + + // Create second cache instance and verify data persists + const cache2 = createCache(10, testDbPath) + const retrieved = cache2.get("persistent-key") + + expect(retrieved).toEqual({ value: "persistent-data" }) + + cache2.close() + }) + + it("should handle nested directory creation", async () => { + const nestedPath = join(tmpdir(), "nested", "dirs", `test-cache-${Date.now()}.db`) + + const cache = createCache(10, nestedPath) + cache.set("test", { data: "value" }, 3600000) + + expect(existsSync(nestedPath)).toBe(true) + + cache.close() + + // Add delay on Windows before cleanup + if (process.platform === "win32") { + await new Promise((resolve) => setTimeout(resolve, 100)) + } + + // Clean up nested directories + try { + rmSync(join(tmpdir(), "nested"), { recursive: true, force: true }) + } catch (error) { + if ((error as any).code === "EBUSY" && process.platform === "win32") { + await new Promise((resolve) => setTimeout(resolve, 500)) + rmSync(join(tmpdir(), "nested"), { recursive: true, force: true }) + } else { + throw error + } + } + }) + + it("should use in-memory database when dbPath is not provided", () => { + const cache = createCache(10) + + // Should work normally + cache.set("memory-key", { data: "memory-value" }, 3600000) + const retrieved = cache.get("memory-key") + + expect(retrieved).toEqual({ data: "memory-value" }) + + cache.close() + }) + + it("should use in-memory database when dbPath is ':memory:'", () => { + const cache = createCache(10, ":memory:") + + // Should work normally + cache.set("memory-key", { data: "memory-value" }, 3600000) + const retrieved = cache.get("memory-key") + + expect(retrieved).toEqual({ data: "memory-value" }) + + cache.close() + }) +}) diff --git a/test/integration/test-binary.ts b/test/integration/test-binary.ts new file mode 100644 index 0000000..e2acd4a --- /dev/null +++ b/test/integration/test-binary.ts @@ -0,0 +1,261 @@ +#!/usr/bin/env bun +import { spawn } from "node:child_process" +import { promises as fs } from "node:fs" +import os from "node:os" +import path from "node:path" +import { runCratesSearchTests } from "./test-crates-search" +import { runMCPProtocolTests } from "./test-mcp-protocol" +import { runPersistentCacheTests } from "./test-persistent-cache" +import { runResourcesTests } from "./test-resources" +import { runZstdTests } from "./test-zstd" + +type TestOptions = { + executable: string + target: string + isWindows?: boolean +} + +type CommandResult = { + success: boolean + output: string + error?: string +} + +const runCommand = (command: string[]): Promise => { + return new Promise((resolve) => { + const [cmd, ...args] = command + const proc = spawn(cmd, args, { + stdio: ["pipe", "pipe", "pipe"] + }) + + let output = "" + let error = "" + + proc.stdout.on("data", (data) => { + output += data.toString() + }) + + proc.stderr.on("data", (data) => { + error += data.toString() + }) + + proc.on("exit", (code) => { + resolve({ + success: code === 0, + output: output.trim(), + error: error.trim() + }) + }) + + proc.on("error", (err) => { + resolve({ + success: false, + output: "", + error: err.message + }) + }) + }) +} + +const testVersion = async (options: TestOptions): Promise => { + console.log("\n🔍 Testing --version flag...") + + const result = await runCommand([options.executable, "--version"]) + + if (!result.success) { + throw new Error(`Version test failed: ${result.error}`) + } + + console.log(`✅ Version test passed: ${result.output}`) +} + +const testServerStartup = async (options: TestOptions): Promise => { + console.log("\n🚀 Testing server startup...") + + const server = spawn(options.executable, [], { + env: { ...process.env, DB_PATH: ":memory:", MCP_TEST: "true" }, + stdio: ["pipe", "pipe", "pipe"] + }) + + let errorOutput = "" + + server.stdout.on("data", () => { + // We don't need to capture output for this test + }) + + server.stderr.on("data", (data) => { + errorOutput += data.toString() + }) + + // Give server time to start + await new Promise((resolve) => setTimeout(resolve, 2000)) + + // Check if server is still running + if (server.exitCode !== null) { + throw new Error( + `Server exited unexpectedly with code ${server.exitCode}\nError: ${errorOutput}` + ) + } + + console.log("✅ Server started successfully") + + // Clean shutdown + server.kill("SIGTERM") + + // Wait for graceful shutdown + await new Promise((resolve) => { + server.on("exit", resolve) + setTimeout(resolve, 5000) // Timeout after 5 seconds + }) + + console.log("✅ Server shutdown cleanly") +} + +const testCacheFunctionality = async (): Promise => { + console.log("\n💾 Testing cache functionality...") + + const cacheDir = path.join(os.homedir(), ".mcp-docsrs") + + try { + await fs.access(cacheDir) + console.log("✅ Cache directory exists or will be created on first use") + } catch { + console.log("ℹ️ Cache directory will be created on first use") + } +} + +const testBasicMCPOperations = async (options: TestOptions): Promise => { + console.log("\n🔧 Testing basic MCP operations...") + + // Test with in-memory database to avoid file system issues + const server = spawn(options.executable, [], { + env: { ...process.env, DB_PATH: ":memory:", MCP_TEST: "true" }, + stdio: ["pipe", "pipe", "pipe"] + }) + + // Send a basic MCP request + const testRequest = `${JSON.stringify({ + jsonrpc: "2.0", + id: 1, + method: "initialize", + params: { + protocolVersion: "2024-11-05", + capabilities: {}, + clientInfo: { + name: "test-client", + version: "1.0.0" + } + } + })}\n` + + let response = "" + let responseReceived = false + + server.stdout.on("data", (data) => { + response += data.toString() + responseReceived = true + }) + + server.stderr.on("data", (data) => { + console.error("Server error:", data.toString()) + }) + + // Give server time to initialize + await new Promise((resolve) => setTimeout(resolve, 1000)) + + // Send test request + server.stdin.write(testRequest) + + // Wait for response + await new Promise((resolve) => { + const checkInterval = setInterval(() => { + if (responseReceived) { + clearInterval(checkInterval) + resolve(undefined) + } + }, 100) + + // Timeout after 5 seconds + setTimeout(() => { + clearInterval(checkInterval) + resolve(undefined) + }, 5000) + }) + + if (!responseReceived) { + server.kill("SIGTERM") + throw new Error("No response received from MCP server") + } + + // Parse response + try { + const lines = response.trim().split("\n") + const jsonResponse = JSON.parse(lines[0]) + + if (jsonResponse.result?.protocolVersion) { + console.log( + `✅ MCP initialization successful, protocol version: ${jsonResponse.result.protocolVersion}` + ) + } else { + throw new Error("Invalid MCP response") + } + } catch (error) { + server.kill("SIGTERM") + throw new Error(`Failed to parse MCP response: ${error}`) + } + + // Clean shutdown + server.kill("SIGTERM") + await new Promise((resolve) => setTimeout(resolve, 1000)) + + console.log("✅ Basic MCP operations test passed") +} + +const runTests = async (options: TestOptions): Promise => { + console.log(`\n🧪 Running integration tests for ${options.target}`) + console.log(`📦 Executable: ${options.executable}`) + + // Basic tests + await testVersion(options) + await testServerStartup(options) + await testCacheFunctionality() + await testBasicMCPOperations(options) + + // Extended tests for all platforms + console.log("\n📋 Running extended integration tests...") + + // Run additional test suites + await runCratesSearchTests(options) + await runMCPProtocolTests(options) + await runPersistentCacheTests(options) + await runResourcesTests(options) + await runZstdTests(options) + + console.log(`\n✅ All integration tests passed for ${options.target}`) +} + +// Main execution +if (import.meta.main) { + const args = process.argv.slice(2) + + if (args.length < 2) { + console.error("Usage: bun test/integration/test-binary.ts [--windows]") + process.exit(1) + } + + const [executable, target] = args + const isWindows = args.includes("--windows") + + const options: TestOptions = { + executable, + target, + isWindows + } + + try { + await runTests(options) + } catch (error) { + console.error(`\n❌ Integration tests failed: ${error}`) + process.exit(1) + } +} diff --git a/test/integration/test-crates-search.ts b/test/integration/test-crates-search.ts new file mode 100644 index 0000000..304b1c1 --- /dev/null +++ b/test/integration/test-crates-search.ts @@ -0,0 +1,100 @@ +#!/usr/bin/env bun +import { assertContains, callTool, type TestOptions, withMCPServer } from "./utils" + +const testCratesSearch = async (options: TestOptions): Promise => { + console.log("\n🔍 Testing crates search functionality...") + + await withMCPServer(options.executable, async (server) => { + // Test 1: Search for popular crate + console.log("\n📦 Test 1: Searching for 'tinc'...") + const tincResponse = await callTool( + server, + "search_crates", + { + query: "tinc", + limit: 5 + }, + 2 + ) + + const tincText = tincResponse.result?.content?.[0]?.text || "" + assertContains(tincText, "tinc", "Should find tinc crate") + console.log("✅ Found tinc crate") + + // Test 2: Partial match search + console.log("\n📦 Test 2: Partial match search for 'tokio'...") + const partialResponse = await callTool( + server, + "search_crates", + { + query: "tokio", + limit: 10 + }, + 3 + ) + + const partialText = partialResponse.result?.content?.[0]?.text || "" + assertContains(partialText, "tokio", "Should find tokio crate") + console.log("✅ Found tokio crate") + + // Test 3: Non-existent crate + console.log("\n📦 Test 3: Searching for non-existent crate...") + const nonExistentResponse = await callTool( + server, + "search_crates", + { + query: "this-crate-definitely-does-not-exist-12345", + limit: 5 + }, + 4 + ) + + const nonExistentText = nonExistentResponse.result?.content?.[0]?.text || "" + assertContains(nonExistentText, "No crates found", "Should report no crates found") + console.log("✅ Correctly handled non-existent crate") + + // Test 4: Special characters + console.log("\n📦 Test 4: Searching with special characters...") + const specialResponse = await callTool( + server, + "search_crates", + { + query: "clap-derive", + limit: 5 + }, + 5 + ) + + const specialText = specialResponse.result?.content?.[0]?.text || "" + if (!specialText.includes("clap_derive") && !specialText.includes("clap-derive")) { + throw new Error("Should handle hyphenated names") + } + console.log("✅ Special characters handled correctly") + + console.log("\n✅ All crates search tests passed") + }) +} + +export const runCratesSearchTests = async (options: TestOptions): Promise => { + await testCratesSearch(options) +} + +// Main execution +if (import.meta.main) { + const args = process.argv.slice(2) + + if (args.length < 2) { + console.error("Usage: bun test/integration/test-crates-search.ts ") + process.exit(1) + } + + const [executable, target] = args + const options: TestOptions = { executable, target } + + try { + await runCratesSearchTests(options) + } catch (error) { + console.error(`\n❌ Crates search tests failed: ${error}`) + process.exit(1) + } +} diff --git a/test/integration/test-mcp-protocol.ts b/test/integration/test-mcp-protocol.ts new file mode 100644 index 0000000..7904f06 --- /dev/null +++ b/test/integration/test-mcp-protocol.ts @@ -0,0 +1,111 @@ +#!/usr/bin/env bun +import { + assertContains, + assertError, + assertSuccess, + callTool, + createMCPServer, + initializeServer, + listTools, + type TestOptions +} from "./utils" + +const testMCPProtocol = async (options: TestOptions): Promise => { + console.log("\n🔧 Testing MCP protocol implementation...") + + const server = createMCPServer(options.executable) + + try { + // Test 1: Initialize + console.log("\n📡 Test 1: MCP Initialize...") + await initializeServer(server) + console.log("✅ Initialized successfully") + + // Test 2: List tools + console.log("\n🛠️ Test 2: List available tools...") + const tools = await listTools(server, 2) + + const expectedTools = ["lookup_crate_docs", "lookup_item_docs", "search_crates"] + for (const toolName of expectedTools) { + if (!tools.includes(toolName)) { + throw new Error(`Missing expected tool: ${toolName}`) + } + } + console.log(`✅ Found all ${tools.length} expected tools`) + + // Test 3: Tool invocation - lookup_crate_docs + console.log("\n📚 Test 3: Tool invocation - lookup_crate_docs...") + const lookupResponse = await callTool( + server, + "lookup_crate_docs", + { + crateName: "clap" + }, + 3 + ) + + assertSuccess(lookupResponse, "Failed to lookup crate documentation") + const docText = lookupResponse.result.content[0].text + assertContains(docText, "clap", "Documentation should contain crate name") + console.log("✅ Successfully retrieved crate documentation") + + // Test 4: Error handling - invalid tool + console.log("\n❌ Test 4: Error handling - invalid tool...") + const invalidToolResponse = await server.sendRequest({ + jsonrpc: "2.0", + id: 4, + method: "tools/call", + params: { + name: "invalid_tool_name", + arguments: {} + } + }) + + assertError(invalidToolResponse, "Should have returned an error for invalid tool") + console.log("✅ Correctly handled invalid tool request") + + // Test 5: Error handling - missing arguments + console.log("\n❌ Test 5: Error handling - missing arguments...") + const missingArgsResponse = await server.sendRequest({ + jsonrpc: "2.0", + id: 5, + method: "tools/call", + params: { + name: "lookup_crate_docs", + arguments: {} + } + }) + + assertError(missingArgsResponse, "Should have returned an error for missing arguments") + console.log("✅ Correctly handled missing arguments") + + console.log("\n✅ All MCP protocol tests passed") + } finally { + server.kill() + await new Promise((resolve) => setTimeout(resolve, 1000)) + } +} + +export const runMCPProtocolTests = async (options: TestOptions): Promise => { + await testMCPProtocol(options) +} + +// Main execution +if (import.meta.main) { + const args = process.argv.slice(2) + + if (args.length < 2) { + console.error("Usage: bun test/integration/test-mcp-protocol.ts ") + process.exit(1) + } + + const [executable, target] = args + const options: TestOptions = { executable, target } + + try { + await runMCPProtocolTests(options) + } catch (error) { + console.error(`\n❌ MCP protocol tests failed: ${error}`) + process.exit(1) + } +} diff --git a/test/integration/test-persistent-cache.ts b/test/integration/test-persistent-cache.ts new file mode 100644 index 0000000..c03ce80 --- /dev/null +++ b/test/integration/test-persistent-cache.ts @@ -0,0 +1,173 @@ +#!/usr/bin/env bun +import { promises as fs } from "node:fs" +import path from "node:path" +import { + assertSuccess, + callTool, + createMCPServer, + initializeServer, + type TestOptions, + withTempDir +} from "./utils.js" + +const testPersistentCache = async (options: TestOptions): Promise => { + console.log("\n💾 Testing persistent cache functionality...") + + await withTempDir("mcp-docsrs-test-", async (tempDir) => { + const cacheDbPath = path.join(tempDir, "test-cache.db") + const env = { DB_PATH: cacheDbPath } + + // Test 1: First fetch (cache miss) + console.log("\n🔍 Test 1: First fetch - cache miss...") + const server1 = createMCPServer(options.executable, env) + + try { + await initializeServer(server1) + + const startTime1 = Date.now() + const response1 = await callTool(server1, "lookup_crate_docs", { + crateName: "lazy_static", + version: "1.4.0" + }) + const fetchTime1 = Date.now() - startTime1 + + assertSuccess(response1) + if (!response1.result?.content?.[0]?.text) { + throw new Error("Failed to fetch crate documentation") + } + + console.log(`✅ First fetch completed in ${fetchTime1}ms (cache miss)`) + + // Shutdown first server + server1.kill() + await new Promise((resolve) => setTimeout(resolve, 1000)) + + // Test 2: Second fetch with new server instance (cache hit) + console.log("\n🚀 Test 2: Second fetch - cache hit from persistent storage...") + const server2 = createMCPServer(options.executable, env) + + await initializeServer(server2) + + const startTime2 = Date.now() + const response2 = await callTool(server2, "lookup_crate_docs", { + crateName: "lazy_static", + version: "1.4.0" + }) + const fetchTime2 = Date.now() - startTime2 + + assertSuccess(response2) + if (!response2.result?.content?.[0]?.text) { + throw new Error("Failed to fetch cached documentation") + } + + console.log(`✅ Second fetch completed in ${fetchTime2}ms (cache hit)`) + + // Cache hit should be significantly faster + if (fetchTime2 > fetchTime1 * 0.5) { + console.warn( + `⚠️ Cache hit (${fetchTime2}ms) wasn't significantly faster than miss (${fetchTime1}ms)` + ) + } + + // Test 3: Verify cache database exists + console.log("\n📁 Test 3: Verify cache database exists...") + try { + const stats = await fs.stat(cacheDbPath) + if (!stats.isFile()) { + throw new Error("Cache database is not a file") + } + console.log(`✅ Cache database exists: ${(stats.size / 1024).toFixed(2)}KB`) + } catch (error) { + throw new Error(`Cache database not found: ${error}`) + } + + // Test 4: Multiple crates in cache + console.log("\n📦 Test 4: Multiple crates in cache...") + const response3 = await callTool( + server2, + "lookup_crate_docs", + { + crateName: "once_cell" + }, + 3 + ) + + assertSuccess(response3) + if (!response3.result?.content?.[0]?.text) { + throw new Error("Failed to fetch second crate") + } + console.log("✅ Successfully cached multiple crates") + + // Shutdown second server + server2.kill() + await new Promise((resolve) => setTimeout(resolve, 1000)) + + // Test 5: Verify both crates are in cache with third instance + console.log("\n🔄 Test 5: Verify persistence across multiple restarts...") + const server3 = createMCPServer(options.executable, env) + + await initializeServer(server3) + + // Check first crate + const startVerify1 = Date.now() + await callTool(server3, "lookup_crate_docs", { + crateName: "lazy_static", + version: "1.4.0" + }) + const verifyTime1 = Date.now() - startVerify1 + console.log(`✅ lazy_static retrieved in ${verifyTime1}ms`) + + // Check second crate + const startVerify2 = Date.now() + await callTool( + server3, + "lookup_crate_docs", + { + crateName: "once_cell" + }, + 3 + ) + const verifyTime2 = Date.now() - startVerify2 + console.log(`✅ once_cell retrieved in ${verifyTime2}ms`) + + server3.kill() + await new Promise((resolve) => setTimeout(resolve, 1000)) + } finally { + // Ensure server1 is cleaned up in case of early failure + try { + server1.kill() + } catch { + // Ignore if already killed + } + } + + console.log("\n✅ All persistent cache tests passed") + }) +} + +export const runPersistentCacheTests = async (options: TestOptions): Promise => { + await testPersistentCache(options) +} + +// Main execution +if (import.meta.main) { + const args = process.argv.slice(2) + + if (args.length < 2) { + console.error("Usage: bun test/integration/test-persistent-cache.ts ") + process.exit(1) + } + + const [executable, target] = args + const options: TestOptions = { + executable, + target + } + + try { + await runPersistentCacheTests(options) + } catch (error) { + console.error(`\n❌ Persistent cache tests failed: ${error}`) + process.exit(1) + } +} diff --git a/test/integration/test-resources.ts b/test/integration/test-resources.ts new file mode 100644 index 0000000..41e2d1b --- /dev/null +++ b/test/integration/test-resources.ts @@ -0,0 +1,182 @@ +#!/usr/bin/env bun +import { + assertContains, + callTool, + listResources, + readResource, + type TestOptions, + withMCPServer, + withTempDir +} from "./utils" + +const testResources = async (options: TestOptions): Promise => { + console.log("\n🗂️ Testing MCP resources functionality...") + + await withTempDir("mcp-docsrs-resources-test-", async (tempDir) => { + const cacheDbPath = tempDir + + await withMCPServer( + options.executable, + async (server) => { + // Test 1: List resources when cache is empty + console.log("\n📋 Test 1: List resources with empty cache...") + const emptyResources = await listResources(server) + + // Should have cache statistics and cache entries resources + const expectedResourceNames = ["Cache Statistics", "Cache Entries"] + for (const name of expectedResourceNames) { + if (!emptyResources.some((r: any) => r.name === name)) { + throw new Error(`Missing expected resource: ${name}`) + } + } + console.log(`✅ Found ${emptyResources.length} resources with empty cache`) + + // Test 2: Add some data to cache + console.log("\n🔄 Test 2: Populate cache with test data...") + const addResult = await callTool( + server, + "lookup_crate_docs", + { crateName: "tinc", version: "0.1.6" }, + 3 + ) + if (addResult.result?.isError) { + throw new Error(`Failed to add tinc to cache: ${addResult.result.content[0].text}`) + } + console.log("✅ Added tinc to cache") + + // Small delay to ensure cache write completes + await new Promise((resolve) => setTimeout(resolve, 100)) + + // Test 3: Read cache statistics + console.log("\n📊 Test 3: Read cache statistics...") + const statsContent = await readResource(server, "cache://stats", 4) + + // Parse statistics + const stats = JSON.parse(statsContent) + if (stats.totalEntries !== 1) { + throw new Error(`Expected 1 cache entry, got ${stats.totalEntries}`) + } + console.log( + `✅ Cache statistics: ${stats.totalEntries} entries, ${(stats.totalSize / 1024).toFixed(2)}KB` + ) + + // Test 4: Read cache entries + console.log("\n📚 Test 4: Read cache entries...") + const entriesContent = await readResource(server, "cache://entries?limit=10&offset=0", 5) + + if (!entriesContent) { + throw new Error("No content returned from cache entries resource") + } + + let entries: any + try { + entries = JSON.parse(entriesContent) + } catch (e) { + console.error("Failed to parse entries content:", JSON.stringify(entriesContent)) + console.error("Content length:", entriesContent.length) + console.error("First 100 chars:", entriesContent.substring(0, 100)) + throw e + } + if (entries.entries.length !== 1) { + throw new Error(`Expected 1 cache entry, got ${entries.entries.length}`) + } + + const entry = entries.entries[0] + assertContains(entry.key, "tinc", "Cache entry should contain 'tinc'") + console.log(`✅ Found cache entry for ${entry.key}`) + + // Test 5: Test pagination + console.log("\n📄 Test 5: Test cache entries pagination...") + + // Add more crates to test pagination + const crateNames = ["clap", "anyhow", "thiserror"] + let reqId = 10 + let successCount = 0 + for (const crateName of crateNames) { + const result = await callTool(server, "lookup_crate_docs", { crateName }, reqId++) + if (!result.result?.isError) { + successCount++ + } + } + console.log(`✅ Added ${successCount} more crates to cache`) + + // Test pagination with limit + const paginatedContent = await readResource(server, "cache://entries?limit=2&offset=1", 20) + const paginatedEntries = JSON.parse(paginatedContent) + + if (paginatedEntries.entries.length !== 2) { + throw new Error( + `Expected 2 entries with pagination, got ${paginatedEntries.entries.length}` + ) + } + console.log("✅ Pagination working correctly") + + // Test 6: SQL query execution + console.log("\n🔍 Test 6: SQL query execution...") + const sqlContent = await readResource( + server, + "cache://query?sql=SELECT COUNT(*) as count FROM cache", + 30 + ) + const sqlResult = JSON.parse(sqlContent) + + const expectedCount = 1 + successCount // 1 from tinc + successCount from pagination test + if ( + !Array.isArray(sqlResult) || + sqlResult.length === 0 || + sqlResult[0].count !== expectedCount + ) { + throw new Error(`Expected ${expectedCount} cache entries, got ${sqlResult?.[0]?.count}`) + } + console.log("✅ SQL query executed successfully") + + // Test 7: Security - reject non-SELECT queries + console.log("\n🔒 Test 7: Security - reject non-SELECT queries...") + const dangerousResponse = await server.sendRequest({ + jsonrpc: "2.0", + id: 40, + method: "resources/read", + params: { + uri: "cache://query?sql=DELETE FROM cache" + } + }) + + // The server returns a successful response with an error message in the content + const dangerousContent = dangerousResponse.result?.contents?.[0]?.text || "" + assertContains( + dangerousContent, + "Only SELECT queries are allowed", + "Should reject non-SELECT query" + ) + console.log("✅ Correctly rejected non-SELECT query") + + console.log("\n✅ All resources tests passed") + }, + { DB_PATH: cacheDbPath } + ) + }) +} + +export const runResourcesTests = async (options: TestOptions): Promise => { + await testResources(options) +} + +// Main execution +if (import.meta.main) { + const args = process.argv.slice(2) + + if (args.length < 2) { + console.error("Usage: bun test/integration/test-resources.ts ") + process.exit(1) + } + + const [executable, target] = args + const options: TestOptions = { executable, target } + + try { + await runResourcesTests(options) + } catch (error) { + console.error(`\n❌ Resources tests failed: ${error}`) + process.exit(1) + } +} diff --git a/test/integration/test-zstd.ts b/test/integration/test-zstd.ts new file mode 100644 index 0000000..692b093 --- /dev/null +++ b/test/integration/test-zstd.ts @@ -0,0 +1,125 @@ +#!/usr/bin/env bun +import { + assertContains, + assertSuccess, + callTool, + type TestOptions, + withMCPServer +} from "./utils.js" + +const testZstdDecompression = async (options: TestOptions): Promise => { + console.log("\n🗜️ Testing zstd decompression functionality...") + + await withMCPServer(options.executable, async (server) => { + // Test 1: Fetch a crate that requires zstd decompression + console.log("\n📦 Test 1: Fetch crate documentation with zstd compression...") + + // Using a crate that we know has rustdoc JSON + console.log("⏳ Fetching anyhow documentation (this may take a moment)...") + const startTime = Date.now() + const lookupResponse = await callTool(server, "lookup_crate_docs", { + crateName: "anyhow" + }) + const fetchTime = Date.now() - startTime + + assertSuccess(lookupResponse) + if (!lookupResponse.result?.content?.[0]?.text) { + throw new Error("Failed to fetch crate documentation") + } + + const docText = lookupResponse.result.content[0].text + console.log(`✅ Successfully fetched and decompressed documentation in ${fetchTime}ms`) + + // Test 2: Verify decompressed content is valid + console.log("\n🔍 Test 2: Verify decompressed content...") + + // Check for expected content in anyhow docs + const expectedPatterns = ["anyhow", "error", "result", "context"] + + let foundPatterns = 0 + for (const pattern of expectedPatterns) { + if (docText.toLowerCase().includes(pattern)) { + foundPatterns++ + } + } + + if (foundPatterns < 2) { + throw new Error("Decompressed content doesn't contain expected anyhow documentation patterns") + } + console.log( + `✅ Verified documentation content (found ${foundPatterns}/${expectedPatterns.length} expected patterns)` + ) + + // Test 3: Fetch item documentation (also uses zstd) + console.log("\n📄 Test 3: Fetch specific item documentation...") + const itemResponse = await callTool( + server, + "lookup_item_docs", + { + crateName: "anyhow", + itemPath: "Error" + }, + 3 + ) + + assertSuccess(itemResponse) + if (!itemResponse.result?.content?.[0]?.text) { + throw new Error("Failed to fetch item documentation") + } + + const itemText = itemResponse.result.content[0].text + assertContains(itemText.toLowerCase(), "error", "Item documentation should contain 'error'") + console.log("✅ Successfully fetched and decompressed item documentation") + + // Test 4: Test with a smaller crate to ensure zstd works for various sizes + console.log("\n📐 Test 4: Test with smaller crate...") + const smallCrateResponse = await callTool( + server, + "lookup_crate_docs", + { + crateName: "once_cell" + }, + 4 + ) + + assertSuccess(smallCrateResponse) + if (!smallCrateResponse.result?.content?.[0]?.text) { + throw new Error("Failed to fetch small crate documentation") + } + + const smallCrateText = smallCrateResponse.result.content[0].text + if (!smallCrateText.includes("once_cell") && !smallCrateText.includes("OnceCell")) { + throw new Error("Small crate documentation doesn't contain expected content") + } + console.log("✅ Zstd decompression works for crates of various sizes") + + console.log("\n✅ All zstd decompression tests passed") + }) +} + +export const runZstdTests = async (options: TestOptions): Promise => { + await testZstdDecompression(options) +} + +// Main execution +if (import.meta.main) { + const args = process.argv.slice(2) + + if (args.length < 2) { + console.error("Usage: bun test/integration/test-zstd.ts ") + process.exit(1) + } + + const [executable, target] = args + const options: TestOptions = { + executable, + target + } + + try { + await runZstdTests(options) + } catch (error) { + console.error(`\n❌ Zstd decompression tests failed: ${error}`) + process.exit(1) + } +} diff --git a/test/integration/utils.ts b/test/integration/utils.ts new file mode 100644 index 0000000..65c754b --- /dev/null +++ b/test/integration/utils.ts @@ -0,0 +1,261 @@ +import { type ChildProcess, spawn } from "node:child_process" +import { promises as fs } from "node:fs" +import os from "node:os" +import path from "node:path" + +export type TestOptions = { + executable: string + target: string + isMusl?: boolean + isWindows?: boolean +} + +export type MCPResponse = { + jsonrpc: string + id: number + result?: any + error?: any +} + +export type MCPServer = { + process: ChildProcess + sendRequest: (request: any) => Promise + kill: () => void +} + +/** + * Creates an MCP server instance with helper methods + */ +export const createMCPServer = (executable: string, env?: Record): MCPServer => { + const server = spawn(executable, [], { + env: { ...process.env, MCP_TEST: "true", ...env }, + stdio: ["pipe", "pipe", "pipe"] + }) + + let _errorOutput = "" + server.stderr.on("data", (data) => { + _errorOutput += data.toString() + if (process.env.DEBUG) { + console.error("Server error:", data.toString()) + } + }) + + const sendRequest = (request: any): Promise => { + return new Promise((resolve, reject) => { + const requestStr = `${JSON.stringify(request)}\n` + let response = "" + let responseReceived = false + + const dataHandler = (data: Buffer) => { + response += data.toString() + const lines = response.split("\n") + + for (const line of lines) { + if (line.trim()) { + try { + const parsed = JSON.parse(line) + if (parsed.id === request.id) { + responseReceived = true + server.stdout.off("data", dataHandler) + resolve(parsed) + return + } + } catch { + // Continue collecting data + } + } + } + } + + server.stdout.on("data", dataHandler) + server.stdin.write(requestStr) + + // Timeout based on request type + const timeout = request.method === "tools/call" ? 30000 : 10000 + setTimeout(() => { + if (!responseReceived) { + server.stdout.off("data", dataHandler) + reject(new Error(`Timeout waiting for response to ${request.method}`)) + } + }, timeout) + }) + } + + const kill = () => { + server.kill("SIGTERM") + } + + return { + process: server, + sendRequest, + kill + } +} + +/** + * Initializes an MCP server + */ +export const initializeServer = async (server: MCPServer): Promise => { + const initRequest = { + jsonrpc: "2.0", + id: 1, + method: "initialize", + params: { + protocolVersion: "2024-11-05", + capabilities: {}, + clientInfo: { + name: "integration-test", + version: "1.0.0" + } + } + } + + const response = await server.sendRequest(initRequest) + if (!response.result?.protocolVersion) { + throw new Error(`Failed to initialize MCP server: ${JSON.stringify(response)}`) + } +} + +/** + * Creates a test with an initialized MCP server + */ +export const withMCPServer = async ( + executable: string, + testFn: (server: MCPServer) => Promise, + env?: Record +): Promise => { + const server = createMCPServer(executable, { DB_PATH: ":memory:", ...env }) + + try { + await initializeServer(server) + await testFn(server) + } finally { + server.kill() + await new Promise((resolve) => setTimeout(resolve, 1000)) + } +} + +/** + * Creates a temporary directory for testing + */ +export const withTempDir = async ( + prefix: string, + testFn: (tempDir: string) => Promise +): Promise => { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), prefix)) + + try { + return await testFn(tempDir) + } finally { + try { + await fs.rm(tempDir, { recursive: true, force: true }) + } catch { + // Ignore cleanup errors + } + } +} + +/** + * Calls an MCP tool + */ +export const callTool = ( + server: MCPServer, + toolName: string, + args: Record, + requestId = 2 +): Promise => { + return server.sendRequest({ + jsonrpc: "2.0", + id: requestId, + method: "tools/call", + params: { + name: toolName, + arguments: args + } + }) +} + +/** + * Lists MCP tools + */ +export const listTools = async (server: MCPServer, requestId = 2): Promise => { + const response = await server.sendRequest({ + jsonrpc: "2.0", + id: requestId, + method: "tools/list", + params: {} + }) + + return (response.result?.tools || []).map((t: any) => t.name) +} + +/** + * Lists MCP resources + */ +export const listResources = async (server: MCPServer, requestId = 2): Promise => { + const response = await server.sendRequest({ + jsonrpc: "2.0", + id: requestId, + method: "resources/list", + params: {} + }) + + return response.result?.resources || [] +} + +/** + * Reads an MCP resource + */ +export const readResource = async ( + server: MCPServer, + uri: string, + requestId = 2 +): Promise => { + const response = await server.sendRequest({ + jsonrpc: "2.0", + id: requestId, + method: "resources/read", + params: { uri } + }) + + return response.result?.contents?.[0]?.text || "" +} + +/** + * Asserts that a response contains expected text + */ +export const assertContains = (text: string, expected: string, message?: string): void => { + if (!text.includes(expected)) { + throw new Error(message || `Expected text to contain "${expected}"`) + } +} + +/** + * Asserts that a response does not contain text + */ +export const assertNotContains = (text: string, unexpected: string, message?: string): void => { + if (text.includes(unexpected)) { + throw new Error(message || `Expected text not to contain "${unexpected}"`) + } +} + +/** + * Asserts that an error response is received + */ +export const assertError = (response: MCPResponse, message?: string): void => { + if (!response.error) { + throw new Error(message || "Expected an error response") + } +} + +/** + * Asserts that a successful response is received + */ +export const assertSuccess = (response: MCPResponse, message?: string): void => { + if (response.error) { + throw new Error(message || `Expected success but got error: ${JSON.stringify(response.error)}`) + } + if (!response.result) { + throw new Error(message || "Expected a result in the response") + } +} diff --git a/test/unit/cache.test.ts b/test/unit/cache.test.ts new file mode 100644 index 0000000..8b5ea0d --- /dev/null +++ b/test/unit/cache.test.ts @@ -0,0 +1,75 @@ +import { afterEach, beforeEach, describe, expect, it } from "bun:test" +import { createCache } from "../../src/cache.js" + +describe("Cache", () => { + let cache: ReturnType> + + beforeEach(() => { + cache = createCache(3) // Small cache for testing + }) + + afterEach(() => { + cache.close() + }) + + it("should store and retrieve values", () => { + const testData = { foo: "bar", count: 42 } + cache.set("test-key", testData, 3600000) + + const retrieved = cache.get("test-key") + expect(retrieved).toEqual(testData) + }) + + it("should return null for non-existent keys", () => { + const result = cache.get("non-existent") + expect(result).toBeNull() + }) + + it("should expire entries after TTL", async () => { + cache.set("expire-test", "value", 100) // 100ms TTL + + // Should exist immediately + expect(cache.get("expire-test")).toBe("value") + + // Wait for expiration + await new Promise((resolve) => setTimeout(resolve, 150)) + + // Should be expired + expect(cache.get("expire-test")).toBeNull() + }) + + it("should enforce max size", () => { + cache.set("key1", "value1", 3600000) + cache.set("key2", "value2", 3600000) + cache.set("key3", "value3", 3600000) + cache.set("key4", "value4", 3600000) // Should evict oldest + + // Oldest should be evicted + expect(cache.get("key1")).toBeNull() + + // Others should still exist + expect(cache.get("key2")).toBe("value2") + expect(cache.get("key3")).toBe("value3") + expect(cache.get("key4")).toBe("value4") + }) + + it("should clear all entries", () => { + cache.set("key1", "value1", 3600000) + cache.set("key2", "value2", 3600000) + + cache.clear() + + expect(cache.get("key1")).toBeNull() + expect(cache.get("key2")).toBeNull() + }) + + it("should delete specific entries", () => { + cache.set("key1", "value1", 3600000) + cache.set("key2", "value2", 3600000) + + cache.delete("key1") + + expect(cache.get("key1")).toBeNull() + expect(cache.get("key2")).toBe("value2") + }) +}) diff --git a/test/unit/docs-fetcher.unit.test.ts b/test/unit/docs-fetcher.unit.test.ts new file mode 100644 index 0000000..449dede --- /dev/null +++ b/test/unit/docs-fetcher.unit.test.ts @@ -0,0 +1,187 @@ +import { afterEach, beforeEach, describe, expect, it } from "bun:test" +import { createDocsFetcher } from "../../src/docs-fetcher" +import { NetworkError } from "../../src/errors" +import { mockFetch, mockFetchResponses, mockRustdocJson, resetMocks } from "./mocks" + +describe("DocsFetcher (Unit Tests)", () => { + let fetcher: ReturnType + let originalFetch: typeof global.fetch + + beforeEach(() => { + // Save original fetch + originalFetch = global.fetch + // Mock the global fetch + global.fetch = mockFetch as any + + resetMocks() + + // Create fetcher with in-memory cache + fetcher = createDocsFetcher({ + cacheTtl: 3600000, + maxCacheSize: 10 + // No dbPath means in-memory cache + }) + }) + + afterEach(() => { + // Restore original fetch + global.fetch = originalFetch + // Close the fetcher + fetcher.close() + }) + + describe("URL construction", () => { + it("should construct correct URL with defaults", async () => { + const url = "https://docs.rs/crate/tinc/latest/json" + mockFetchResponses.set(url, { + ok: true, + status: 200, + headers: { "content-type": "application/json" }, + json: mockRustdocJson + }) + + await fetcher.fetchCrateJson("tinc") + expect(mockFetch).toHaveBeenCalledWith(url, expect.any(Object)) + }) + + it("should construct URL with version", async () => { + const url = "https://docs.rs/crate/tinc/0.1.6/json" + mockFetchResponses.set(url, { + ok: true, + status: 200, + headers: { "content-type": "application/json" }, + json: mockRustdocJson + }) + + await fetcher.fetchCrateJson("tinc", "0.1.6") + expect(mockFetch).toHaveBeenCalledWith(url, expect.any(Object)) + }) + + it("should construct URL with target", async () => { + const url = "https://docs.rs/crate/tinc/latest/wasm32-unknown-unknown/json" + mockFetchResponses.set(url, { + ok: true, + status: 200, + headers: { "content-type": "application/json" }, + json: mockRustdocJson + }) + + await fetcher.fetchCrateJson("tinc", undefined, "wasm32-unknown-unknown") + expect(mockFetch).toHaveBeenCalledWith(url, expect.any(Object)) + }) + + it("should construct URL with all parameters", async () => { + const url = "https://docs.rs/crate/tinc/0.1.6/wasm32-unknown-unknown/json/30" + mockFetchResponses.set(url, { + ok: true, + status: 200, + headers: { "content-type": "application/json" }, + json: mockRustdocJson + }) + + await fetcher.fetchCrateJson("tinc", "0.1.6", "wasm32-unknown-unknown", 30) + expect(mockFetch).toHaveBeenCalledWith(url, expect.any(Object)) + }) + }) + + describe("fetchCrateJson", () => { + it("should fetch and parse JSON response", async () => { + const url = "https://docs.rs/crate/test_crate/latest/json" + mockFetchResponses.set(url, { + ok: true, + status: 200, + headers: { "content-type": "application/json" }, + json: mockRustdocJson + }) + + const result = await fetcher.fetchCrateJson("test_crate") + + expect(result.fromCache).toBe(false) + expect(result.data).toEqual(mockRustdocJson) + expect(mockFetch).toHaveBeenCalledWith(url, expect.any(Object)) + }) + + it("should return cached data on second fetch", async () => { + const url = "https://docs.rs/crate/test_crate/latest/json" + mockFetchResponses.set(url, { + ok: true, + status: 200, + headers: { "content-type": "application/json" }, + json: mockRustdocJson + }) + + // First fetch + await fetcher.fetchCrateJson("test_crate") + + // Second fetch should hit cache + const result = await fetcher.fetchCrateJson("test_crate") + + expect(result.fromCache).toBe(true) + expect(result.data).toEqual(mockRustdocJson) + expect(mockFetch).toHaveBeenCalledTimes(1) // Only called once + }) + + it("should handle 404 errors", () => { + const url = "https://docs.rs/crate/nonexistent/latest/json" + mockFetchResponses.set(url, { + ok: false, + status: 404, + statusText: "Not Found" + }) + + expect(fetcher.fetchCrateJson("nonexistent")).rejects.toThrow("Crate 'nonexistent' not found") + }) + + it("should handle network errors", () => { + const url = "https://docs.rs/crate/test_crate/latest/json" + mockFetchResponses.set(url, { + error: new NetworkError(url, undefined, undefined, "Network error") + }) + + expect(fetcher.fetchCrateJson("test_crate")).rejects.toThrow(NetworkError) + }) + + // Note: zstd decompression is tested in integration tests (test/integration/test-zstd.ts) + // because it requires the actual fzstd library and real compressed data from docs.rs + + it("should respect abort signal", () => { + const url = "https://docs.rs/crate/test_crate/latest/json" + + // Create a native AbortError + const abortError = new Error("The operation was aborted") + abortError.name = "AbortError" + + mockFetchResponses.set(url, { + error: abortError + }) + + expect(fetcher.fetchCrateJson("test_crate")).rejects.toThrow() + }) + + it("should handle different versions", async () => { + const url1 = "https://docs.rs/crate/test_crate/1.0.0/json" + const url2 = "https://docs.rs/crate/test_crate/2.0.0/json" + + mockFetchResponses.set(url1, { + ok: true, + status: 200, + headers: { "content-type": "application/json" }, + json: { ...mockRustdocJson, version: "1.0.0" } + }) + + mockFetchResponses.set(url2, { + ok: true, + status: 200, + headers: { "content-type": "application/json" }, + json: { ...mockRustdocJson, version: "2.0.0" } + }) + + const result1 = await fetcher.fetchCrateJson("test_crate", "1.0.0") + const result2 = await fetcher.fetchCrateJson("test_crate", "2.0.0") + + expect(result1.data.version).toBe("1.0.0") + expect(result2.data.version).toBe("2.0.0") + expect(mockFetch).toHaveBeenCalledTimes(2) + }) + }) +}) diff --git a/test/unit/mocks/index.ts b/test/unit/mocks/index.ts new file mode 100644 index 0000000..ea02022 --- /dev/null +++ b/test/unit/mocks/index.ts @@ -0,0 +1,169 @@ +import { vi } from "bun:test" + +/** + * Mock fetch responses for testing + */ +export const mockFetchResponses = new Map() + +/** + * Mock fetch implementation + */ +export const mockFetch = vi.fn((url: string | URL, _init?: RequestInit) => { + const urlString = url.toString() + + // Check if we have a mock response + const mockResponse = mockFetchResponses.get(urlString) + if (mockResponse) { + if (mockResponse.error) { + throw mockResponse.error + } + + return { + ok: mockResponse.ok ?? true, + status: mockResponse.status ?? 200, + statusText: mockResponse.statusText ?? "OK", + headers: new Headers( + mockResponse.headers || { + "content-type": mockResponse.contentType || "application/json" + } + ), + json: async () => mockResponse.json, + text: async () => mockResponse.text || JSON.stringify(mockResponse.json), + arrayBuffer: async () => mockResponse.arrayBuffer || new ArrayBuffer(0) + } as Response + } + + // Default 404 response + return { + ok: false, + status: 404, + statusText: "Not Found", + headers: new Headers(), + json: () => { + throw new Error("Not Found") + }, + text: async () => "Not Found" + } as Response +}) + +/** + * Reset all mocks + */ +export const resetMocks = () => { + mockFetchResponses.clear() + mockFetch.mockClear() +} + +/** + * Mock SQLite cache for testing + */ +export const createMockCache = () => { + const store = new Map() + + return { + get: (key: string): T | null => { + const entry = store.get(key) + if (!entry) return null + if (entry.expires < Date.now()) { + store.delete(key) + return null + } + return entry.value + }, + set: (key: string, value: T, ttl: number) => { + store.set(key, { + value, + expires: Date.now() + ttl + }) + }, + delete: (key: string) => { + store.delete(key) + }, + clear: () => { + store.clear() + }, + close: () => { + // No-op for mock + } + } +} + +/** + * Mock rustdoc JSON response + */ +export const mockRustdocJson = { + format_version: 30, + root: "0:0", + crate: { + name: "test_crate", + version: "1.0.0" + }, + index: { + "0:0": { + name: "test_crate", + kind: "module", + inner: { + module: { + items: ["0:1", "0:2"] + } + } + }, + "0:1": { + name: "TestStruct", + kind: "struct", + inner: { + struct: { + kind: "plain", + fields: [] + } + } + }, + "0:2": { + name: "test_function", + kind: "function", + inner: { + function: { + decl: { + inputs: [], + output: null + } + } + } + } + }, + paths: { + "0:0": { + crate_id: 0, + path: ["test_crate"] + }, + "0:1": { + crate_id: 0, + path: ["test_crate", "TestStruct"] + }, + "0:2": { + crate_id: 0, + path: ["test_crate", "test_function"] + } + } +} + +/** + * Mock crates.io search response + */ +export const mockCratesSearchResponse = (query: string, crates: any[] = []) => ({ + crates: + crates.length > 0 + ? crates + : [ + { + name: query, + description: `A mock crate matching ${query}`, + downloads: 1000000, + recent_downloads: 50000, + max_stable_version: "1.0.0" + } + ], + meta: { + total: crates.length || 1 + } +}) diff --git a/test/unit/rustdoc-parser.test.ts b/test/unit/rustdoc-parser.test.ts new file mode 100644 index 0000000..e726e50 --- /dev/null +++ b/test/unit/rustdoc-parser.test.ts @@ -0,0 +1,148 @@ +import { describe, expect, it } from "bun:test" +import { RustdocParseError } from "../../src/errors.js" +import { findItem, parseCrateInfo } from "../../src/rustdoc-parser.js" +import type { RustdocJson } from "../../src/types.js" + +describe("RustdocParser", () => { + // Sample rustdoc JSON structure for testing + const sampleJson: RustdocJson = { + format_version: 30, + root: "0:0", + crate_version: "1.0.0", + includes_private: false, + index: { + "0:0": { + id: "0:0", + crate_id: 0, + name: "test_crate", + visibility: "public" as const, + docs: "This is a test crate for demonstration purposes.", + inner: { + module: { + is_crate: true, + items: ["0:1", "0:2"] + } + } + }, + "0:1": { + id: "0:1", + crate_id: 0, + name: "MyStruct", + visibility: "public" as const, + docs: "A sample struct", + inner: { + struct: { + struct_type: "plain", + fields: [] + } + } + }, + "0:2": { + id: "0:2", + crate_id: 0, + name: "my_function", + visibility: "public" as const, + docs: "A sample function that does something", + inner: { + function: { + decl: {}, + header: { + const: false, + async: false, + unsafe: false + } + } + } + } + }, + paths: { + "0:1": { + crate_id: 0, + path: ["test_crate", "MyStruct"], + kind: "struct" + }, + "0:2": { + crate_id: 0, + path: ["test_crate", "my_function"], + kind: "function" + } + }, + external_crates: {} + } + + describe("parseCrateInfo", () => { + it("should parse crate information correctly", () => { + const result = parseCrateInfo(sampleJson) + + expect(result).toContain("# Crate: test_crate v1.0.0") + expect(result).toContain("## Documentation") + expect(result).toContain("This is a test crate for demonstration purposes.") + expect(result).toContain("## Structs") + expect(result).toContain("- **MyStruct**: A sample struct") + expect(result).toContain("## Functions") + expect(result).toContain("- **my_function**: A sample function that does something") + }) + + it("should handle missing root item", () => { + const emptyJson: RustdocJson = { + format_version: 30, + root: "invalid", + includes_private: false, + index: {}, + paths: {}, + external_crates: {} + } + + expect(() => parseCrateInfo(emptyJson)).toThrow(RustdocParseError) + expect(() => parseCrateInfo(emptyJson)).toThrow("Root item 'invalid' not found in index") + }) + }) + + describe("findItem", () => { + it("should find items by path", () => { + const result = findItem(sampleJson, "MyStruct") + + expect(result).toContain("# MyStruct") + expect(result).toContain("**Type:** struct") + expect(result).toContain("A sample struct") + }) + + it("should find items by partial path", () => { + const result = findItem(sampleJson, "my_function") + + expect(result).toContain("# my_function") + expect(result).toContain("**Type:** function") + expect(result).toContain("A sample function that does something") + }) + + it("should return null for non-existent items", () => { + const result = findItem(sampleJson, "NonExistent") + expect(result).toBeNull() + }) + + it("should handle struct details", () => { + const structJson = { + ...sampleJson, + index: { + ...sampleJson.index, + "0:1": { + ...sampleJson.index["0:1"], + inner: { + struct: { + struct_type: "tuple" as const, + fields: ["0:3", "0:4"], + impls: ["0:5"] + } + } + } + } + } + + const result = findItem(structJson, "MyStruct") + + expect(result).toContain("**Struct Type:** tuple") + expect(result).toContain("**Fields:** (field IDs available") + expect(result).toContain("**Implementations:** 1 impl block(s)") + }) + }) +}) diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 0000000..cd9f4c8 --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,38 @@ +{ + "compilerOptions": { + "target": "ES2024", + "module": "Preserve", + "moduleResolution": "bundler", + "lib": [ + "ES2024", + ], + "types": [ + "@types/bun" + ], + "esModuleInterop": true, + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "noEmit": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "outDir": "./dist", + "rootDir": "./src", + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true + }, + "include": [ + "src/**/*" + ], + "exclude": [ + "node_modules", + "dist" + ] +} \ No newline at end of file