Skip to content

fix(registry): add token into header when registry are private #24

fix(registry): add token into header when registry are private

fix(registry): add token into header when registry are private #24

name: Validate Changesets
on:
pull_request:
paths:
- '.changeset/*.md'
concurrency:
group: validate-changesets-${{ github.event.pull_request.number }}
cancel-in-progress: true
jobs:
validate-changesets:
runs-on: ubuntu-latest
permissions:
contents: read
models: read
pull-requests: write
steps:
- name: Checkout
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
with:
fetch-depth: 0
- name: Restore response cache
uses: actions/cache/restore@6f8efc29b200d32929f49075959781ed54ec270c # v3.5.0
id: cache-restore
with:
path: /tmp/llm_cache
key: llm-validation-pr-${{ github.event.pull_request.number }}-${{ github.run_id }}
restore-keys: |
llm-validation-pr-${{ github.event.pull_request.number }}-
- name: Set up Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.13.7'
- name: Install llm CLI
run: |
python -m pip install --upgrade pip
pip install "llm-github-models"
- name: Collect and validate changesets
id: validate
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "Starting changeset validation"
# Find changed changeset files
echo "Looking for changed files between ${{ github.event.pull_request.base.sha }} and ${{ github.sha }}"
git diff --name-only --diff-filter=AM \
"${{ github.event.pull_request.base.sha }}...${{ github.sha }}" \
-- '.changeset/*.md' | grep -v 'README.md' > changeset_files.txt || true
echo "Found changeset files:"
cat changeset_files.txt || true
if [ ! -s changeset_files.txt ]; then
echo "::notice::No changeset files to validate"
echo "has_changesets=false" >> "$GITHUB_OUTPUT"
exit 0
fi
echo "has_changesets=true" >> "$GITHUB_OUTPUT"
echo "::notice::Found $(wc -l < changeset_files.txt) changeset file(s) to validate"
# Collect changeset contents using XML-style tags for better AI parsing
changesets=""
while IFS= read -r file; do
if [ -f "$file" ]; then
echo "Reading changeset file: $file"
# Use printf for more reliable string building
changesets=$(printf "%s<changeset file=\"%s\">\n%s\n</changeset>\n\n" "$changesets" "$file" "$(cat "$file")")
else
echo "::warning::Changeset file not found: $file"
fi
done < changeset_files.txt
echo "Collected changesets content (first 500 chars):"
echo "$changesets" | head -c 500
# Read the validation prompt from file
prompt_template=$(cat .github/workflows/changeset-validation-prompt.txt)
# Create the full prompt with changesets
prompt="${prompt_template}
$changesets"
# Generate cache key from prompt content
cache_dir="/tmp/llm_cache"
mkdir -p "$cache_dir"
cache_key=$(echo "$prompt" | sha256sum | cut -d' ' -f1)
cache_file="${cache_dir}/${cache_key}.json"
echo "Cache key (prompt hash): $cache_key"
# Check for cached response
if [ -f "$cache_file" ]; then
echo "::notice::Cache hit! Using cached validation response"
response=$(cat "$cache_file")
else
echo "Cache miss - calling LLM API"
need_llm_call=true
fi
schema='{
"type": "object",
"properties": {
"overall_valid": {
"type": "boolean",
"description": "Whether all changesets pass validation"
},
"files": {
"type": "array",
"items": {
"type": "object",
"properties": {
"file": {
"type": "string",
"description": "Path to the changeset file"
},
"valid": {
"type": "boolean",
"description": "Whether this specific file passes validation"
},
"issues": {
"type": "array",
"items": {
"type": "object",
"properties": {
"quote": {"type": "string", "description": "The problematic text from the changeset"},
"errors": {
"type": "array",
"items": {"type": "string"},
"description": "Error messages for this quoted text"
},
"suggestions": {
"type": "array",
"items": {"type": "string"},
"description": "Suggestions for this quoted text"
}
},
"required": ["quote", "errors", "suggestions"],
"additionalProperties": false
},
"description": "Issues grouped by quoted text"
},
"general_errors": {
"type": "array",
"items": {"type": "string"},
"description": "General errors not tied to specific text"
},
"general_suggestions": {
"type": "array",
"items": {"type": "string"},
"description": "General suggestions not tied to specific text"
}
},
"required": ["file", "valid", "issues", "general_errors", "general_suggestions"],
"additionalProperties": false
},
"description": "Per-file validation results"
}
},
"required": ["overall_valid", "files"],
"additionalProperties": false
}'
# Only call LLM if we don't have a cached response
if [ "$need_llm_call" = "true" ]; then
echo "Running llm CLI"
response=$(echo "$prompt" | llm prompt -m github/gpt-4o --schema "$schema" --no-stream 2>&1) || {
echo "::error::Failed to run llm CLI. Exit code: $?"
echo "Error output: $response"
response='{"overall_valid": false, "files": [{"file": "unknown", "valid": false, "errors": ["Failed to get AI response"], "suggestions": []}]}'
}
# Save successful response to cache
if echo "$response" | jq -e '.overall_valid != null' > /dev/null 2>&1; then
echo "$response" > "$cache_file"
echo "Response cached for future use"
fi
fi
echo "LLM response received (first 500 chars):"
echo "$response" | head -c 500
echo ""
# Save response for parsing
echo "$response" > validation_response.json
echo "Response saved to validation_response.json"
# Extract validation status - check both overall_valid AND actual errors
has_errors=false
# Check if any file has issues or errors
if echo "$response" | jq -e '.files[]? | select((.issues // [] | length > 0) or (.general_errors // [] | length > 0))' > /dev/null 2>&1; then
has_errors=true
fi
# Validation passes only if overall_valid is true AND there are no errors
if echo "$response" | jq -e '.overall_valid == true' > /dev/null 2>&1 && [ "$has_errors" = "false" ]; then
echo "::notice::Validation passed"
echo "valid=true" >> "$GITHUB_OUTPUT"
else
echo "::warning::Validation failed or has errors"
echo "valid=false" >> "$GITHUB_OUTPUT"
# Show per-file errors if present
echo "$response" | jq -r '.files[]? | select((.issues // [] | length > 0) or (.general_errors // [] | length > 0)) | "File: " + .file + " - Issues found"' 2>/dev/null || true
fi
- name: Comment validation results on PR
id: comment
if: steps.validate.outputs.has_changesets == 'true'
uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0
with:
script: |
const fs = require('fs');
let validation;
try {
const raw = fs.readFileSync('validation_response.json', 'utf8');
validation = JSON.parse(raw);
} catch (e) {
validation = {
overall_valid: false,
files: [{
file: "unknown",
valid: false,
errors: [{ message: "Failed to parse validation response" }],
suggestions: []
}]
};
}
// Find existing comment first
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const marker = "## 📝 Changeset Validation Results";
const existingComment = comments.find(c =>
c.user?.type === "Bot" && c.body?.includes(marker)
);
// Count total errors across all files
let totalErrors = 0;
if (validation.files) {
totalErrors = validation.files.reduce((sum, file) => {
let fileErrors = 0;
if (file.issues) {
fileErrors += file.issues.reduce((issueSum, issue) => {
return issueSum + (issue.errors ? issue.errors.length : 0);
}, 0);
}
if (file.general_errors) {
fileErrors += file.general_errors.length;
}
return sum + fileErrors;
}, 0);
}
// Extract validation status - check both overall_valid AND actual errors
const overallValid = validation.overall_valid && totalErrors === 0;
// If validation passes and there are no errors, don't comment
if (overallValid) {
// Delete existing comment if it exists
if (existingComment) {
await github.rest.issues.deleteComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existingComment.id
});
console.log("Deleted existing comment - validation passed");
}
return; // Don't create a new comment
}
// Only comment if there are actual errors
let comment = "## 📝 Changeset Validation Results\n\n";
comment += "❌ **Changeset validation failed**\n\n";
if (validation.files) {
const failedFiles = validation.files.filter(file => {
if (!file.valid) return true;
// Check for errors in issues array
if (file.issues && file.issues.some(issue => issue.errors && issue.errors.length > 0)) {
return true;
}
// Check for general errors
if (file.general_errors && file.general_errors.length > 0) {
return true;
}
return false;
});
if (failedFiles.length > 0) {
comment += "### Issues Found:\n\n";
// Get the repository URL for proper file linking
const repoUrl = "https://github.com/" + context.repo.owner + "/" + context.repo.repo;
const branch = context.payload.pull_request.head.ref;
failedFiles.forEach(file => {
// Create proper GitHub file link with branch
const fileUrl = repoUrl + "/blob/" + branch + "/" + file.file;
comment += "#### [`" + file.file + "`](" + fileUrl + ")\n\n";
// Display issues grouped by quote
if (file.issues && file.issues.length > 0) {
file.issues.forEach(issue => {
if (issue.quote) {
comment += "**Issue with:** '" + issue.quote + "'\n\n";
if (issue.errors && issue.errors.length > 0) {
if (issue.errors.length === 1) {
comment += "❌ " + issue.errors[0] + "\n\n";
} else {
comment += "❌ **Errors:**\n";
issue.errors.forEach(error => {
comment += " • " + error + "\n";
});
comment += "\n";
}
}
if (issue.suggestions && issue.suggestions.length > 0) {
if (issue.suggestions.length === 1) {
comment += "💡 " + issue.suggestions[0] + "\n\n";
} else {
comment += "💡 **Suggestions:**\n";
issue.suggestions.forEach(suggestion => {
comment += " • " + suggestion + "\n";
});
comment += "\n";
}
}
}
});
}
// Display general errors (not tied to specific quotes)
if (file.general_errors && file.general_errors.length > 0) {
comment += "**General Errors:**\n";
file.general_errors.forEach(error => {
comment += "- " + error + "\n";
});
comment += "\n";
}
// Display general suggestions (not tied to specific quotes)
if (file.general_suggestions && file.general_suggestions.length > 0) {
comment += "**General Suggestions:**\n";
file.general_suggestions.forEach(suggestion => {
comment += "- " + suggestion + "\n";
});
comment += "\n";
}
});
}
}
comment += "---\n";
comment += "*📖 See [Astro's changeset guide](https://contribute.docs.astro.build/docs-for-code-changes/changesets/) for details.*";
// Update or create comment only for failures
if (existingComment) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existingComment.id,
body: comment
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: comment
});
}
// Set output to indicate validation status without failing the job yet
// This ensures the cache gets saved
if (!overallValid) {
core.setOutput("validation_failed", "true");
console.log("Validation failed - will fail job after cache is saved");
}
- name: Save response cache
uses: actions/cache/save@6f8efc29b200d32929f49075959781ed54ec270c # v3.5.0
if: always()
with:
path: /tmp/llm_cache
key: llm-validation-pr-${{ github.event.pull_request.number }}-${{ github.run_id }}
- name: Fail job if validation failed
if: steps.comment.outputs.validation_failed == 'true'
run: |
echo "::error::Changeset validation failed - see PR comment for details"
exit 1