Skip to content

ci: finalize test for the documentation code #226

ci: finalize test for the documentation code

ci: finalize test for the documentation code #226

Workflow file for this run

name: CI
on:
pull_request:
branches: [ main ]
concurrency:
group: ci-${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build-python-packages:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- name: hindsight-all
path: hindsight
- name: hindsight-api
path: hindsight-api
- name: hindsight-client
path: hindsight-clients/python
steps:
- uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v5
with:
enable-cache: true
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version-file: ".python-version"
- name: Build ${{ matrix.name }}
working-directory: ./${{ matrix.path }}
run: uv build
build-api-python-versions:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.11', '3.12', '3.13']
steps:
- uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v5
with:
enable-cache: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Build hindsight-api
working-directory: ./hindsight-api
run: uv build
build-typescript-client:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: package-lock.json
- name: Install dependencies
run: npm ci --workspace=hindsight-clients/typescript
- name: Build TypeScript client
run: npm run build --workspace=hindsight-clients/typescript
build-control-plane:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: package-lock.json
- name: Install SDK dependencies
run: npm ci --workspace=hindsight-clients/typescript
- name: Build SDK
run: npm run build --workspace=hindsight-clients/typescript
# Install control plane deps and fix hoisted lightningcss binary
# lightningcss gets hoisted to root node_modules, so we need to reinstall it there
- name: Install Control Plane dependencies
run: |
npm install --workspace=hindsight-control-plane
rm -rf node_modules/lightningcss node_modules/@tailwindcss
npm install lightningcss @tailwindcss/postcss @tailwindcss/node
- name: Build Control Plane
run: npm run build --workspace=hindsight-control-plane
- name: Verify standalone build
run: |
test -f hindsight-control-plane/standalone/server.js || exit 1
test -d hindsight-control-plane/standalone/node_modules || exit 1
node hindsight-control-plane/bin/cli.js --help
- name: Smoke test - verify server starts
run: |
cd hindsight-control-plane
node bin/cli.js --port 9999 &
SERVER_PID=$!
sleep 5
if curl -sf http://localhost:9999 > /dev/null 2>&1; then
echo "Server started successfully"
kill $SERVER_PID 2>/dev/null || true
exit 0
else
echo "Server failed to respond"
kill $SERVER_PID 2>/dev/null || true
exit 1
fi
build-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: package-lock.json
- name: Install dependencies
run: npm ci --workspace=hindsight-docs
- name: Build docs
run: npm run build --workspace=hindsight-docs
build-rust-cli:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Cache cargo
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
hindsight-cli/target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Build CLI
working-directory: hindsight-cli
run: cargo build --release
- name: Upload CLI artifact
uses: actions/upload-artifact@v4
with:
name: hindsight-cli
path: hindsight-cli/target/release/hindsight
retention-days: 1
lint-helm-chart:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Helm
uses: azure/setup-helm@v4
with:
version: 'latest'
- name: Lint Helm chart
run: helm lint helm/hindsight
build-docker-images:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- target: api-only
name: api
- target: cp-only
name: control-plane
- target: standalone
name: standalone
steps:
- uses: actions/checkout@v4
- name: Free Disk Space
uses: jlumbroso/free-disk-space@main
with:
tool-cache: true
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build ${{ matrix.name }} image
uses: docker/build-push-action@v6
with:
context: .
file: docker/standalone/Dockerfile
target: ${{ matrix.target }}
push: false
load: false
# TODO: Re-enable smoke test when disk space issue is resolved
# - name: Smoke test - verify container starts
# env:
# GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
# run: ./scripts/docker-smoke-test.sh "hindsight-${{ matrix.name }}:test" "${{ matrix.target }}"
test-api:
runs-on: ubuntu-latest
env:
HINDSIGHT_API_LLM_PROVIDER: groq
HINDSIGHT_API_LLM_API_KEY: ${{ secrets.GROQ_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
HINDSIGHT_API_LLM_MODEL: openai/gpt-oss-20b
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Prefer CPU-only PyTorch in CI (but keep PyPI for everything else)
UV_INDEX: pytorch=https://download.pytorch.org/whl/cpu
steps:
- uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v5
with:
enable-cache: true
prune-cache: false
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version-file: ".python-version"
- name: Build API
working-directory: ./hindsight-api
run: uv build
- name: Install dependencies
working-directory: ./hindsight-api
run: uv sync --extra test --no-install-project --index-strategy unsafe-best-match
- name: Cache HuggingFace models
uses: actions/cache@v4
with:
path: ~/.cache/huggingface
key: ${{ runner.os }}-huggingface-${{ hashFiles('hindsight-api/pyproject.toml') }}
restore-keys: |
${{ runner.os }}-huggingface-
- name: Pre-download models
working-directory: ./hindsight-api
run: |
uv run python -c "
from sentence_transformers import SentenceTransformer, CrossEncoder
print('Downloading embedding model...')
SentenceTransformer('BAAI/bge-small-en-v1.5')
print('Downloading cross-encoder model...')
CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
print('Models downloaded successfully')
"
- name: Run tests
working-directory: ./hindsight-api
run: uv run pytest tests -v
test-python-client:
runs-on: ubuntu-latest
env:
HINDSIGHT_API_LLM_PROVIDER: groq
HINDSIGHT_API_LLM_API_KEY: ${{ secrets.GROQ_API_KEY }}
HINDSIGHT_API_LLM_MODEL: openai/gpt-oss-20b
HINDSIGHT_API_URL: http://localhost:8888
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Prefer CPU-only PyTorch in CI (but keep PyPI for everything else)
UV_INDEX: pytorch=https://download.pytorch.org/whl/cpu
steps:
- uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v5
with:
enable-cache: true
prune-cache: false
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version-file: ".python-version"
- name: Build API
working-directory: ./hindsight-api
run: uv build
- name: Build Python client
working-directory: ./hindsight-clients/python
run: uv build
- name: Install client test dependencies
working-directory: ./hindsight-clients/python
run: uv sync --extra test --index-strategy unsafe-best-match
- name: Install API dependencies
working-directory: ./hindsight-api
run: uv sync --no-install-project --index-strategy unsafe-best-match
- name: Create .env file
run: |
cat > .env << EOF
HINDSIGHT_API_LLM_PROVIDER=${{ env.HINDSIGHT_API_LLM_PROVIDER }}
HINDSIGHT_API_LLM_API_KEY=${{ env.HINDSIGHT_API_LLM_API_KEY }}
HINDSIGHT_API_LLM_MODEL=${{ env.HINDSIGHT_API_LLM_MODEL }}
EOF
- name: Start API server
run: |
./scripts/dev/start-api.sh > /tmp/api-server.log 2>&1 &
echo "Waiting for API server to be ready..."
for i in {1..60}; do
if curl -sf http://localhost:8888/health > /dev/null 2>&1; then
echo "API server is ready after ${i}s"
break
fi
if [ $i -eq 60 ]; then
echo "API server failed to start after 60s"
cat /tmp/api-server.log
exit 1
fi
sleep 1
done
- name: Run Python client tests
working-directory: ./hindsight-clients/python
run: uv run pytest tests -v
- name: Show API server logs
if: always()
run: |
echo "=== API Server Logs ==="
cat /tmp/api-server.log || echo "No API server log found"
test-typescript-client:
runs-on: ubuntu-latest
env:
HINDSIGHT_API_LLM_PROVIDER: groq
HINDSIGHT_API_LLM_API_KEY: ${{ secrets.GROQ_API_KEY }}
HINDSIGHT_API_LLM_MODEL: openai/gpt-oss-20b
HINDSIGHT_API_URL: http://localhost:8888
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Prefer CPU-only PyTorch in CI (but keep PyPI for everything else)
UV_INDEX: pytorch=https://download.pytorch.org/whl/cpu
steps:
- uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v5
with:
enable-cache: true
prune-cache: false
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version-file: ".python-version"
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Build API
working-directory: ./hindsight-api
run: uv build
- name: Install API dependencies
working-directory: ./hindsight-api
run: uv sync --no-install-project --index-strategy unsafe-best-match
- name: Install TypeScript client dependencies
working-directory: ./hindsight-clients/typescript
run: npm ci
- name: Build TypeScript client
working-directory: ./hindsight-clients/typescript
run: npm run build
- name: Create .env file
run: |
cat > .env << EOF
HINDSIGHT_API_LLM_PROVIDER=${{ env.HINDSIGHT_API_LLM_PROVIDER }}
HINDSIGHT_API_LLM_API_KEY=${{ env.HINDSIGHT_API_LLM_API_KEY }}
HINDSIGHT_API_LLM_MODEL=${{ env.HINDSIGHT_API_LLM_MODEL }}
EOF
- name: Start API server
run: |
./scripts/dev/start-api.sh > /tmp/api-server.log 2>&1 &
echo "Waiting for API server to be ready..."
for i in {1..60}; do
if curl -sf http://localhost:8888/health > /dev/null 2>&1; then
echo "API server is ready after ${i}s"
break
fi
if [ $i -eq 60 ]; then
echo "API server failed to start after 60s"
cat /tmp/api-server.log
exit 1
fi
sleep 1
done
- name: Run TypeScript client tests
working-directory: ./hindsight-clients/typescript
run: npm test
- name: Show API server logs
if: always()
run: |
echo "=== API Server Logs ==="
cat /tmp/api-server.log || echo "No API server log found"
test-rust-client:
runs-on: ubuntu-latest
env:
HINDSIGHT_API_LLM_PROVIDER: groq
HINDSIGHT_API_LLM_API_KEY: ${{ secrets.GROQ_API_KEY }}
HINDSIGHT_API_LLM_MODEL: openai/gpt-oss-20b
HINDSIGHT_API_URL: http://localhost:8888
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Prefer CPU-only PyTorch in CI (but keep PyPI for everything else)
UV_INDEX: pytorch=https://download.pytorch.org/whl/cpu
steps:
- uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v5
with:
enable-cache: true
prune-cache: false
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version-file: ".python-version"
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Cache cargo
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
hindsight-clients/rust/target
key: ${{ runner.os }}-cargo-client-${{ hashFiles('hindsight-clients/rust/Cargo.lock') }}
- name: Build API
working-directory: ./hindsight-api
run: uv build
- name: Install API dependencies
working-directory: ./hindsight-api
run: uv sync --no-install-project --index-strategy unsafe-best-match
- name: Create .env file
run: |
cat > .env << EOF
HINDSIGHT_API_LLM_PROVIDER=${{ env.HINDSIGHT_API_LLM_PROVIDER }}
HINDSIGHT_API_LLM_API_KEY=${{ env.HINDSIGHT_API_LLM_API_KEY }}
HINDSIGHT_API_LLM_MODEL=${{ env.HINDSIGHT_API_LLM_MODEL }}
EOF
- name: Start API server
run: |
./scripts/dev/start-api.sh > /tmp/api-server.log 2>&1 &
echo "Waiting for API server to be ready..."
for i in {1..60}; do
if curl -sf http://localhost:8888/health > /dev/null 2>&1; then
echo "API server is ready after ${i}s"
break
fi
if [ $i -eq 60 ]; then
echo "API server failed to start after 60s"
cat /tmp/api-server.log
exit 1
fi
sleep 1
done
- name: Run Rust client tests
working-directory: ./hindsight-clients/rust
run: cargo test --lib
- name: Show API server logs
if: always()
run: |
echo "=== API Server Logs ==="
cat /tmp/api-server.log || echo "No API server log found"
test-litellm-integration:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v5
with:
enable-cache: true
prune-cache: false
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version-file: ".python-version"
- name: Build litellm integration
working-directory: ./hindsight-integrations/litellm
run: uv build
- name: Install dependencies
working-directory: ./hindsight-integrations/litellm
run: uv sync --extra dev
- name: Run tests
working-directory: ./hindsight-integrations/litellm
run: uv run pytest tests -v
test-doc-examples:
runs-on: ubuntu-latest
needs: build-rust-cli
env:
HINDSIGHT_API_LLM_PROVIDER: groq
HINDSIGHT_API_LLM_API_KEY: ${{ secrets.GROQ_API_KEY }}
HINDSIGHT_API_LLM_MODEL: openai/gpt-oss-20b
HINDSIGHT_API_URL: http://localhost:8888
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
UV_INDEX: pytorch=https://download.pytorch.org/whl/cpu
steps:
- uses: actions/checkout@v4
- name: Download CLI artifact
uses: actions/download-artifact@v4
with:
name: hindsight-cli
path: /usr/local/bin
- name: Make CLI executable
run: chmod +x /usr/local/bin/hindsight
- name: Install uv
uses: astral-sh/setup-uv@v5
with:
enable-cache: true
prune-cache: false
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version-file: ".python-version"
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: package-lock.json
- name: Build and install API
working-directory: ./hindsight-api
run: |
uv build
uv sync --no-install-project --index-strategy unsafe-best-match
- name: Install Python client dependencies
working-directory: ./hindsight-clients/python
run: uv sync --extra test --index-strategy unsafe-best-match
- name: Install TypeScript client
run: |
npm ci --workspace=hindsight-clients/typescript
npm run build --workspace=hindsight-clients/typescript
- name: Create .env file
run: |
cat > .env << EOF
HINDSIGHT_API_LLM_PROVIDER=${{ env.HINDSIGHT_API_LLM_PROVIDER }}
HINDSIGHT_API_LLM_API_KEY=${{ env.HINDSIGHT_API_LLM_API_KEY }}
HINDSIGHT_API_LLM_MODEL=${{ env.HINDSIGHT_API_LLM_MODEL }}
EOF
- name: Start API server
run: |
./scripts/dev/start-api.sh > /tmp/api-server.log 2>&1 &
echo "Waiting for API server to be ready..."
for i in {1..60}; do
if curl -sf http://localhost:8888/health > /dev/null 2>&1; then
echo "API server is ready after ${i}s"
break
fi
if [ $i -eq 60 ]; then
echo "API server failed to start after 60s"
cat /tmp/api-server.log
exit 1
fi
sleep 1
done
- name: Run Python doc examples
working-directory: ./hindsight-clients/python
run: |
for f in ../../hindsight-docs/examples/api/*.py; do
echo "Running $f..."
uv run python "$f"
done
- name: Run Node.js doc examples
run: |
for f in hindsight-docs/examples/api/*.mjs; do
echo "Running $f..."
node "$f"
done
- name: Configure CLI
run: hindsight configure --api-url http://localhost:8888
- name: Run CLI doc examples
run: |
for f in hindsight-docs/examples/api/*.sh; do
echo "Running $f..."
bash "$f"
done
- name: Show API server logs
if: always()
run: |
echo "=== API Server Logs ==="
cat /tmp/api-server.log || echo "No API server log found"