diff --git a/.mise/tasks/setup b/.mise/tasks/setup
index 410cbc0..4ccc467 100755
--- a/.mise/tasks/setup
+++ b/.mise/tasks/setup
@@ -5,7 +5,11 @@ echo ""
echo "🍜 Setting up project"
echo ""
-hk install
+# Only run hk install if not in a git worktree
+if ! git worktree list | grep -q "$(pwd)"; then
+ hk install
+fi
+
bun install
echo ""
diff --git a/AGENTS.md b/AGENTS.md
index 6b58606..1a21342 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -34,6 +34,7 @@
- **Methods/properties**: camelCase
- **Status strings**: use union types (e.g., `'pending' | 'running' | 'completed' | 'failed' | 'cancelled'`)
- **Explicit types**: prefer explicit type annotations over inference
+- **NEVER USE ANY**: avoid `any` type at all costs
- **Return types**: optional (not required but recommended for public methods)
### Error Handling
diff --git a/README.md b/README.md
index c073e08..7cc4b1e 100644
--- a/README.md
+++ b/README.md
@@ -74,6 +74,103 @@ skill_find "testing -performance"
- Natural query syntax with negation and quoted phrases
- Skill ranking by relevance (name matches weighted higher)
- Silent message insertion (noReply pattern)
+- **Pluggable prompt rendering** with model-aware format selection (XML, JSON, Markdown)
+
+## Prompt Renderer Configuration
+
+The plugin supports **multiple formats for prompt injection**, allowing you to optimize results for different LLM models and use cases.
+
+> See the [Configuration](#configuration) section for complete configuration details, including bunfig setup and global/project-level overrides.
+
+### Supported Formats
+
+Choose the format that works best for your LLM:
+
+| Format | Best For | Characteristics |
+| ----------------- | ---------------------- | ---------------------------------------------------------------------- |
+| **XML** (default) | Claude models | Human-readable, structured, XML-optimized for Claude |
+| **JSON** | GPT and strict parsers | Machine-readable, strict JSON structure, strong parsing support |
+| **Markdown** | All models | Readable prose, heading-based structure, easy to read in conversations |
+
+### Configuration Syntax
+
+Set your preferences in `.opencode-skillful.json`:
+
+```json
+{
+ "promptRenderer": "xml",
+ "modelRenderers": {
+ "claude-3-5-sonnet": "xml",
+ "gpt-4": "json",
+ "gpt-4-turbo": "json"
+ }
+}
+```
+
+**How It Works:**
+
+1. Every tool execution checks the current active LLM model
+2. If `modelRenderers[modelID]` is configured, that format is used
+3. Otherwise, the global `promptRenderer` default is used
+4. Results are rendered in the selected format and injected into the prompt
+
+### Format Output Examples
+
+#### XML Format (Claude Optimized)
+
+```xml
+
+ git-commits
+ Guidelines for writing effective git commit messages
+ writing_git_commits
+
+```
+
+**Advantages:**
+
+- Matches Claude's native instruction format
+- Clear tag-based structure
+- Excellent readability for complex nested data
+
+#### JSON Format (GPT Optimized)
+
+```json
+{
+ "name": "git-commits",
+ "description": "Guidelines for writing effective git commit messages",
+ "toolName": "writing_git_commits"
+}
+```
+
+**Advantages:**
+
+- Strong parsing support across LLMs
+- Strict, validated structure
+- Familiar format for language models trained on JSON data
+
+#### Markdown Format (Human Readable)
+
+```markdown
+# Skill
+
+### name
+
+- **name**: _git-commits_
+
+### description
+
+- **description**: _Guidelines for writing effective git commit messages_
+
+### toolName
+
+- **toolName**: _writing_git_commits_
+```
+
+**Advantages:**
+
+- Most readable in conversations
+- Natural language-friendly
+- Works well for exploratory workflows
## Skill Discovery Paths
@@ -354,26 +451,133 @@ Non-zero exit codes indicate script failures. Always check STDERR and the exit c
## Configuration
-The plugin reads configuration from the OpenCode config file (`~/.config/opencode/config.json`):
+The plugin loads configuration from **bunfig**, supporting both project-local and global configuration files:
+
+### Configuration Files
+
+Configuration is loaded in this priority order (highest priority last):
+
+1. **Global config** (standard platform locations):
+ - Linux/macOS: `~/.config/opencode-skillful/config.json`
+ - Windows: `%APPDATA%/opencode-skillful/config.json`
+
+2. **Project config** (in your project root):
+ - `.opencode-skillful.json`
+
+Later configuration files override earlier ones. Use project-local `.opencode-skillful.json` to override global settings for specific projects.
+
+### Configuration Options
+
+#### Plugin Installation
+
+First, register the plugin in your OpenCode config (`~/.config/opencode/config.json`):
```json
{
- "plugins": ["@zenobius/opencode-skillful"],
- "skillful": {
- "debug": false,
- "basePaths": ["~/.config/opencode/skills", ".opencode/skills"]
- }
+ "plugins": ["@zenobius/opencode-skillful"]
}
```
-### Configuration Options
+#### Skill Discovery Configuration
+
+Create `.opencode-skillful.json` in your project root or global config directory:
+
+```json
+{
+ "debug": false,
+ "basePaths": ["~/.config/opencode/skills", ".opencode/skills"],
+ "promptRenderer": "xml",
+ "modelRenderers": {}
+}
+```
+
+**Configuration Fields:**
- **debug** (boolean, default: `false`): Enable debug output showing skill discovery stats
- - When enabled, `skill_find` includes discovered, parsed, rejected, and duplicate counts
- - Useful for diagnosing skill loading issues
+ - When enabled, `skill_find` responses include discovered, parsed, rejected, and error counts
+ - Useful for diagnosing skill loading and parsing issues
+
- **basePaths** (array, default: standard locations): Custom skill search directories
- - Paths are searched in order; later paths override earlier ones for duplicate skill names
+ - Paths are searched in priority order; later paths override earlier ones for duplicate skill names
+ - Default: `[~/.config/opencode/skills, .opencode/skills]`
- Use project-local `.opencode/skills/` for project-specific skills
+ - Platform-aware paths: automatically resolves to XDG, macOS, or Windows standard locations
+
+- **promptRenderer** (string, default: `'xml'`): Default format for prompt injection
+ - Options: `'xml'` | `'json'` | `'md'`
+ - XML (default): Claude-optimized, human-readable structured format
+ - JSON: GPT-optimized, strict JSON formatting for strong parsing models
+ - Markdown: Human-readable format with headings and nested lists
+ - Used when no model-specific renderer is configured
+
+- **modelRenderers** (object, default: `{}`): Per-model format overrides
+ - Maps model IDs to preferred formats
+ - Overrides global `promptRenderer` for specific models
+ - Example: `{ "gpt-4": "json", "claude-3-5-sonnet": "xml" }`
+
+### How Renderer Selection Works
+
+When any tool executes (`skill_find`, `skill_use`, `skill_resource`):
+
+1. The plugin queries the OpenCode session to determine the active LLM model
+2. Builds a list of model candidates to check, from most to least specific:
+ - Full model ID (e.g., `"anthropic-claude-3-5-sonnet"`)
+ - Generic model pattern (e.g., `"claude-3-5-sonnet"`)
+3. Checks if any candidate exists in `modelRenderers` configuration
+ - First match wins (most specific takes precedence)
+ - If found, uses that format
+4. If no match in `modelRenderers`, falls back to `promptRenderer` default
+5. Renders the results in the selected format and injects into the prompt
+
+**Example**: If your config has `"claude-3-5-sonnet": "xml"` and the active model is `"anthropic-claude-3-5-sonnet"`, the plugin will:
+
+- Try matching `"anthropic-claude-3-5-sonnet"` (no match)
+- Try matching `"claude-3-5-sonnet"` (match found! Use XML)
+- Return `"xml"` format
+
+This allows different models to receive results in their preferred format without needing to specify every model variant. Configure the generic model name once and it works for all provider-prefixed variations.
+
+### Example Configurations
+
+#### Global Configuration for Multi-Model Setup
+
+`~/.config/opencode-skillful/config.json`:
+
+```json
+{
+ "debug": false,
+ "promptRenderer": "xml",
+ "modelRenderers": {
+ "claude-3-5-sonnet": "xml",
+ "claude-3-opus": "xml",
+ "gpt-4": "json",
+ "gpt-4-turbo": "json",
+ "llama-2-70b": "md"
+ }
+}
+```
+
+#### Project-Specific Override
+
+`.opencode-skillful.json` (project root):
+
+```json
+{
+ "debug": true,
+ "basePaths": ["~/.config/opencode/skills", ".opencode/skills", "./vendor/skills"],
+ "promptRenderer": "xml",
+ "modelRenderers": {
+ "gpt-4": "json"
+ }
+}
+```
+
+This project-local config:
+
+- Enables debug output for troubleshooting
+- Adds a custom vendor skills directory
+- Uses JSON format specifically for GPT-4 when it's the active model
+- Falls back to XML for all other models
## Architecture
diff --git a/bun.lock b/bun.lock
index e1432ce..6c86829 100644
--- a/bun.lock
+++ b/bun.lock
@@ -6,6 +6,7 @@
"name": "@zenobius/opencode-skillful",
"dependencies": {
"@opencode-ai/plugin": "1.0.85",
+ "bunfig": "^0.15.6",
"env-paths": "^3.0.0",
"gray-matter": "^4.0.3",
"mime": "^4.1.0",
@@ -179,6 +180,10 @@
"@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.53.3", "", { "os": "win32", "cpu": "x64" }, "sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ=="],
+ "@stacksjs/clapp": ["@stacksjs/clapp@0.2.0", "", { "dependencies": { "mri": "^1.2.0", "wrap-ansi": "^9.0.0" }, "bin": { "clapp": "dist/bin/cli.js", "@stacksjs/clapp": "dist/bin/cli.js" } }, "sha512-dSqnbeZjXnQLLvVxC5NU7D9Vpjxc6cC9Bo2ZwaqjgruK7pbVoFCI0goc9Mtf/lfSTbTx6Uvv/mbY7+cOW/j3Og=="],
+
+ "@stacksjs/clarity": ["@stacksjs/clarity@0.3.24", "", { "bin": { "clarity": "dist/bin/cli.js" } }, "sha512-QN21fT/9dovcuFTkni9LFHDzBpiBZ4Q//0a3vFJsckPiblNIu1RhwwePkkTK4j6Xu2DtVYGR60/9Scdrp6wRfw=="],
+
"@types/bun": ["@types/bun@1.3.5", "", { "dependencies": { "bun-types": "1.3.5" } }, "sha512-RnygCqNrd3srIPEWBd5LFeUYG7plCoH2Yw9WaZGyNmdTEei+gWaHqydbaIRkIkcbXwhBT94q78QljxN0Sk838w=="],
"@types/chai": ["@types/chai@5.2.3", "", { "dependencies": { "@types/deep-eql": "*", "assertion-error": "^2.0.1" } }, "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA=="],
@@ -233,6 +238,8 @@
"ajv": ["ajv@6.12.6", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g=="],
+ "ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="],
+
"ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="],
"argparse": ["argparse@1.0.10", "", { "dependencies": { "sprintf-js": "~1.0.2" } }, "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg=="],
@@ -247,6 +254,8 @@
"bun-types": ["bun-types@1.3.5", "", { "dependencies": { "@types/node": "*" } }, "sha512-inmAYe2PFLs0SUbFOWSVD24sg1jFlMPxOjOSSCYqUgn4Hsc3rDc7dFvfVYjFPNHtov6kgUeulV4SxbuIV/stPw=="],
+ "bunfig": ["bunfig@0.15.6", "", { "dependencies": { "@stacksjs/clapp": "^0.2.0", "@stacksjs/clarity": "^0.3.24" }, "bin": { "bunfig": "bin/cli.js" } }, "sha512-7ynPmrn1dN5F+0DtUVY0Vo2MZOOnSdb6hpQePwABEYIJ+d/rSb3vaOVUs3MFxwxWuaVc1FEStVJG6+kCgbLuyg=="],
+
"cac": ["cac@6.7.14", "", {}, "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ=="],
"callsites": ["callsites@3.1.0", "", {}, "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ=="],
@@ -271,6 +280,8 @@
"deep-is": ["deep-is@0.1.4", "", {}, "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="],
+ "emoji-regex": ["emoji-regex@10.6.0", "", {}, "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A=="],
+
"env-paths": ["env-paths@3.0.0", "", {}, "sha512-dtJUTepzMW3Lm/NPxRf3wP4642UWhjL2sQxc+ym2YMj1m/H2zDNQOlezafzkHwn6sMstjHTwG6iQQsctDW/b1A=="],
"es-module-lexer": ["es-module-lexer@1.7.0", "", {}, "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA=="],
@@ -333,6 +344,8 @@
"fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="],
+ "get-east-asian-width": ["get-east-asian-width@1.4.0", "", {}, "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q=="],
+
"glob-parent": ["glob-parent@6.0.2", "", { "dependencies": { "is-glob": "^4.0.3" } }, "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A=="],
"glob-to-regex.js": ["glob-to-regex.js@1.2.0", "", { "peerDependencies": { "tslib": "2" } }, "sha512-QMwlOQKU/IzqMUOAZWubUOT8Qft+Y0KQWnX9nK3ch0CJg0tTp4TvGZsTfudYKv2NzoQSyPcnA6TYeIQ3jGichQ=="],
@@ -397,6 +410,8 @@
"minimatch": ["minimatch@3.1.2", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw=="],
+ "mri": ["mri@1.2.0", "", {}, "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA=="],
+
"ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="],
"nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="],
@@ -465,6 +480,10 @@
"std-env": ["std-env@3.10.0", "", {}, "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg=="],
+ "string-width": ["string-width@7.2.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="],
+
+ "strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="],
+
"strip-bom-string": ["strip-bom-string@1.0.0", "", {}, "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g=="],
"strip-json-comments": ["strip-json-comments@3.1.1", "", {}, "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig=="],
@@ -523,6 +542,8 @@
"word-wrap": ["word-wrap@1.2.5", "", {}, "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA=="],
+ "wrap-ansi": ["wrap-ansi@9.0.2", "", { "dependencies": { "ansi-styles": "^6.2.1", "string-width": "^7.0.0", "strip-ansi": "^7.1.0" } }, "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww=="],
+
"yocto-queue": ["yocto-queue@0.1.0", "", {}, "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q=="],
"zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="],
@@ -555,6 +576,8 @@
"typescript-eslint/@typescript-eslint/utils": ["@typescript-eslint/utils@8.48.1", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", "@typescript-eslint/scope-manager": "8.48.1", "@typescript-eslint/types": "8.48.1", "@typescript-eslint/typescript-estree": "8.48.1" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-fAnhLrDjiVfey5wwFRwrweyRlCmdz5ZxXz2G/4cLn0YDLjTapmN4gcCsTBR1N2rWnZSDeWpYtgLDsJt+FpmcwA=="],
+ "wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="],
+
"@eslint/eslintrc/js-yaml/argparse": ["argparse@2.0.1", "", {}, "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="],
"@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="],
diff --git a/bunfig.config.ts b/bunfig.config.ts
new file mode 100644
index 0000000..eaf727e
--- /dev/null
+++ b/bunfig.config.ts
@@ -0,0 +1,37 @@
+/**
+ * Bunfig Configuration - Plugin Settings Schema
+ *
+ * WHY: Bunfig provides typed configuration loading with validation.
+ * It allows us to:
+ * - Define a schema for all plugin settings (type-safe)
+ * - Load from multiple sources (.opencode-skillful.json, ~/.config/opencode-skillful/config.json)
+ * - Merge with sensible defaults
+ * - Support model-aware prompt renderer selection
+ */
+
+export default {
+ debug: false,
+ basePaths: [] as string[],
+ promptRenderer: 'xml' as const,
+
+ /**
+ * Model-specific renderer overrides
+ *
+ * WHY: Different LLM models have different preferences and strengths:
+ * - Claude models: trained on XML, prefer structured formats
+ * - GPT models: strong JSON parsing, prefer JSON
+ * - Other models: may benefit from markdown readability
+ *
+ * Structure: Record
+ * - modelID: The model identifier from OpenCode (e.g., 'claude-3-5-sonnet')
+ * - format: Preferred format for that model ('json' | 'xml' | 'md')
+ *
+ * Example:
+ * modelRenderers: {
+ * 'claude-3-5-sonnet': 'xml',
+ * 'gpt-4': 'json',
+ * 'llama-2': 'md',
+ * }
+ */
+ modelRenderers: {} as Record,
+};
diff --git a/package.json b/package.json
index 53b7f9a..75c2a16 100644
--- a/package.json
+++ b/package.json
@@ -20,6 +20,7 @@
"types": "dist/index.d.ts",
"dependencies": {
"@opencode-ai/plugin": "1.0.85",
+ "bunfig": "^0.15.6",
"env-paths": "^3.0.0",
"gray-matter": "^4.0.3",
"mime": "^4.1.0",
diff --git a/src/api.ts b/src/api.ts
index 4aba9f7..6aba7d5 100644
--- a/src/api.ts
+++ b/src/api.ts
@@ -1,7 +1,7 @@
/**
* API Factory - Plugin Initialization and Tool Creation
*
- * WHY: Centralizes the creation of all plugin components (logger, registry, tools)
+ * WHY: Centralizes the creation of all plugin components (logger, registry, tools, renderers)
* in one place. Makes it easy to test different configurations, mock components,
* and understand the complete initialization flow.
*
@@ -9,10 +9,12 @@
* - Logger (for debug output)
* - SkillRegistry (for discovery and parsing)
* - Tool creators (functions that create skill_find, skill_use, skill_resource)
+ * - PromptRenderer (for format selection and rendering)
*
* INITIALIZATION TIMING (CRITICAL):
* - createLogger(): synchronous, immediate
* - createSkillRegistry(): synchronous factory call (returns a SkillRegistry object)
+ * - createPromptRenderer(): synchronous, immediate (format selection at runtime)
* - registry.initialise(): NOT called here, caller must do this separately
*
* WHY NOT CALL initialise(): The caller (index.ts) needs to await initialise()
@@ -21,13 +23,14 @@
* RETURN VALUE: Object with:
* - registry: SkillRegistry instance (must call .initialise() before use)
* - logger: PluginLogger for debug output
+ * - config: PluginConfig (needed for model-aware format selection)
* - findSkills: Tool creator function for skill search
* - readResource: Tool creator function for resource reading
* - loadSkill: Tool creator function for skill loading
*
* EXAMPLE:
* const api = await createApi(config);
- * const { registry, findSkills, readResource, loadSkill } = api;
+ * const { registry, config, findSkills, readResource, loadSkill } = api;
* // Note: registry is created but NOT yet initialized
* // Must be done by caller: await registry.initialise()
*/
@@ -37,7 +40,7 @@ import { createSkillRegistry } from './services/SkillRegistry';
import { createSkillFinder } from './tools/SkillFinder';
import { createSkillResourceReader } from './tools/SkillResourceReader';
import { createSkillLoader } from './tools/SkillUser';
-import { PluginConfig } from './types';
+import type { PluginConfig } from './types';
export const createApi = async (config: PluginConfig) => {
const logger = createLogger(config);
@@ -46,6 +49,7 @@ export const createApi = async (config: PluginConfig) => {
return {
registry,
logger,
+ config,
findSkills: createSkillFinder(registry),
readResource: createSkillResourceReader(registry),
loadSkill: createSkillLoader(registry),
diff --git a/src/config.ts b/src/config.ts
index ce05027..cc818ab 100644
--- a/src/config.ts
+++ b/src/config.ts
@@ -1,5 +1,5 @@
/**
- * Plugin Configuration - Skill Discovery Path Resolution
+ * Plugin Configuration - Skill Discovery and Prompt Rendering
*
* WHY: Skills can be stored in two places:
* 1. User-global: ~/.opencode/skills/ (or platform equivalent)
@@ -23,26 +23,43 @@
* - Windows: %APPDATA%/opencode/skills/
* Without this, hard-coding ~/.opencode/ fails on non-Unix systems.
*
+ * PROMPT RENDERER CONFIGURATION:
+ * - promptRenderer: Default format for prompt injection ('xml' | 'json' | 'md')
+ * - modelRenderers: Per-model format overrides (optional)
+ * - Loaded via bunfig from .opencode-skillful.json or ~/.config/opencode-skillful/config.json
+ *
* @param ctx PluginInput from OpenCode runtime (provides working directory)
- * @returns Promise with resolved basePaths array and debug flag
+ * @returns Promise with resolved paths, debug flag, and renderer config
*/
+import type { Config } from 'bunfig';
+import { loadConfig } from 'bunfig';
import type { PluginInput } from '@opencode-ai/plugin';
import { join } from 'node:path';
-import { mergeDeepLeft } from 'ramda';
-import type { PluginConfig } from './types';
import envPaths from 'env-paths';
+import { PluginConfig } from './types';
export const OpenCodePaths = envPaths('opencode', { suffix: '' });
-export async function getPluginConfig(ctx: PluginInput): Promise {
- const base = {
+const options: Config = {
+ name: 'opencode-skillful',
+ cwd: './',
+ defaultConfig: {
debug: false,
basePaths: [
join(OpenCodePaths.config, 'skills'), // Lowest priority: Standard User Config (windows)
- join(ctx.directory, '.opencode', 'skills'), // Highest priority: Project-local
],
- };
+ promptRenderer: 'xml',
+ modelRenderers: {},
+ },
+};
+
+export async function getPluginConfig(ctx: PluginInput) {
+ const resolvedConfig = await loadConfig(options);
+
+ resolvedConfig.basePaths.push(
+ join(ctx.directory, '.opencode', 'skills') // Highest priority: Project-local
+ );
- return mergeDeepLeft({}, base);
+ return resolvedConfig;
}
diff --git a/src/index.ts b/src/index.ts
index 40e1764..f946fdf 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -11,6 +11,7 @@
* - find_skills(): Search for skills by free-text query
* - Delivers skill content via silent message insertion (noReply pattern)
* - Supports nested skills with proper naming
+ * - Supports multiple prompt formats (XML, JSON, Markdown) with model-aware selection
*
* Design Decisions:
* - Consolidates 50+ individual skill tools into 2 unified tools (cleaner namespace)
@@ -20,6 +21,7 @@
* - Message insertion pattern ensures skill content persists (user messages not purged)
* - Base directory context enables relative path resolution
* - Skills require restart to reload (acceptable trade-off)
+ * - Prompt format selection: model-aware via modelRenderers config, default XML
*
* @see https://github.com/anthropics/skills
*/
@@ -29,16 +31,43 @@ import { tool, ToolContext, type Plugin } from '@opencode-ai/plugin';
import { createInstructionInjector } from './lib/OpenCodeChat';
import { createApi } from './api';
import { getPluginConfig } from './config';
-import { jsonToXml } from './lib/xml';
+import { createPromptRenderer } from './lib/createPromptRenderer';
+import { getModelFormat } from './lib/getModelFormat';
+import { createMessageModelIdAccountant } from './services/MessageModelIdAccountant';
export const SkillsPlugin: Plugin = async (ctx) => {
const config = await getPluginConfig(ctx);
const api = await createApi(config);
const sendPrompt = createInstructionInjector(ctx);
+ const promptRenderer = createPromptRenderer();
+ const modelIdAccountant = createMessageModelIdAccountant();
api.registry.initialise();
return {
+ 'chat.message': async (input) => {
+ if (!input.messageID || !input.model?.providerID || !input.model?.modelID) {
+ return;
+ }
+
+ // Track model usage per message
+ modelIdAccountant.track({
+ messageID: input.messageID,
+ providerID: input.model.providerID,
+ modelID: input.model.modelID,
+ sessionID: input.sessionID,
+ });
+ },
+ async event(args) {
+ switch (args.event.type) {
+ case 'message.removed':
+ modelIdAccountant.untrackMessage(args.event.properties);
+ break;
+ case 'session.deleted':
+ modelIdAccountant.untrackSession(args.event.properties.info.id);
+ break;
+ }
+ },
tool: {
skill_use: tool({
description:
@@ -49,9 +78,23 @@ export const SkillsPlugin: Plugin = async (ctx) => {
.describe('An array of skill names to load.'),
},
execute: async (args, toolCtx: ToolContext) => {
+ const messageID = toolCtx.messageID;
+ const sessionID = toolCtx.sessionID;
+ const modelInfo = modelIdAccountant.getModelInfo({ messageID, sessionID });
+
+ // Resolve the appropriate format for the current model
+ const format = getModelFormat({
+ modelId: modelInfo?.modelID,
+ providerId: modelInfo?.providerID,
+ config,
+ });
+ const renderer = promptRenderer.getFormatter(format);
+
const results = await api.loadSkill(args.skill_names);
for await (const skill of results.loaded) {
- await sendPrompt(jsonToXml(skill, 'Skill'), { sessionId: toolCtx.sessionID });
+ await sendPrompt(renderer(skill, 'Skill'), {
+ sessionId: toolCtx.sessionID,
+ });
}
return JSON.stringify({
@@ -68,9 +111,21 @@ export const SkillsPlugin: Plugin = async (ctx) => {
.union([tool.schema.string(), tool.schema.array(tool.schema.string())])
.describe('The search query string or array of strings.'),
},
- execute: async (args) => {
+ execute: async (args, toolCtx: ToolContext) => {
+ const messageID = toolCtx.messageID;
+ const sessionID = toolCtx.sessionID;
+ const modelInfo = modelIdAccountant.getModelInfo({ messageID, sessionID });
+
+ // Resolve the appropriate format for the current model
+ const format = getModelFormat({
+ config,
+ modelId: modelInfo?.modelID,
+ providerId: modelInfo?.providerID,
+ });
+ const renderer = promptRenderer.getFormatter(format);
+
const results = await api.findSkills(args);
- const output = jsonToXml(results, 'SkillSearchResults');
+ const output = renderer(results, 'SkillSearchResults');
return output;
},
}),
@@ -84,12 +139,27 @@ export const SkillsPlugin: Plugin = async (ctx) => {
.describe('The relative path to the resource file within the skill directory.'),
},
execute: async (args, toolCtx: ToolContext) => {
+ const messageID = toolCtx.messageID;
+ const sessionID = toolCtx.sessionID;
+ const modelInfo = modelIdAccountant.getModelInfo({ messageID, sessionID });
+
+ // Resolve the appropriate format for the current model
+ const format = getModelFormat({
+ config,
+ modelId: modelInfo?.modelID,
+ providerId: modelInfo?.providerID,
+ });
+
+ const renderer = promptRenderer.getFormatter(format);
+
const result = await api.readResource(args);
if (!result.injection) {
throw new Error('Failed to read resource');
}
- await sendPrompt(jsonToXml(result.injection), { sessionId: toolCtx.sessionID });
+ await sendPrompt(renderer(result.injection, 'Resource'), {
+ sessionId: toolCtx.sessionID,
+ });
return JSON.stringify({
result: 'Resource injected successfully',
diff --git a/src/lib/PromptRenderer.ts b/src/lib/PromptRenderer.ts
new file mode 100644
index 0000000..7e5493f
--- /dev/null
+++ b/src/lib/PromptRenderer.ts
@@ -0,0 +1,33 @@
+/**
+ * PromptRenderer Interface - Provider Pattern for Prompt Injection Formatting
+ *
+ * WHY: Different LLM models have different strengths and preferences for structured data:
+ * - Claude models: trained extensively on XML, prefer structured XML injection
+ * - GPT models: strong JSON parsing, prefer JSON-formatted data
+ * - Other models: may benefit from markdown readability for better context
+ *
+ * The provider pattern allows selecting the appropriate renderer at runtime based on:
+ * - Model preference (configured in modelRenderers)
+ * - Global default (promptRenderer)
+ * - Model detection via client.session.message()
+ *
+ * This abstraction decouples rendering format from tool execution logic,
+ * enabling easy format additions without changing plugin code.
+ */
+
+export interface PromptRenderer {
+ /**
+ * Render an object to a string using the preferred format
+ *
+ * @param data The object to render (typically skill metadata or search results)
+ * @param rootElement Optional element name (used for XML rendering as root tag)
+ * @returns Formatted string ready for prompt injection
+ */
+ render(data: object, rootElement?: string): string;
+
+ /**
+ * The format identifier for this renderer
+ * Used for logging, debugging, and format selection
+ */
+ readonly format: 'json' | 'xml' | 'md';
+}
diff --git a/src/lib/createPromptRenderer.test.ts b/src/lib/createPromptRenderer.test.ts
new file mode 100644
index 0000000..8a6a313
--- /dev/null
+++ b/src/lib/createPromptRenderer.test.ts
@@ -0,0 +1,72 @@
+/**
+ * createPromptRenderer Factory Tests
+ *
+ * Test coverage:
+ * - Correct renderer instantiation for each format
+ * - Invalid format error handling
+ * - Format identifier verification
+ */
+
+import { describe, it, expect } from 'vitest';
+import { createPromptRenderer } from './createPromptRenderer';
+
+describe('createPromptRenderer', () => {
+ it('should return an object with getFormatter method', () => {
+ const promptRenderer = createPromptRenderer();
+
+ expect(promptRenderer).toBeDefined();
+ expect(promptRenderer.getFormatter).toBeDefined();
+ expect(typeof promptRenderer.getFormatter).toBe('function');
+ });
+
+ it('should return a formatter function for json format', () => {
+ const promptRenderer = createPromptRenderer();
+ const formatter = promptRenderer.getFormatter('json');
+
+ expect(formatter).toBeDefined();
+ expect(typeof formatter).toBe('function');
+ });
+
+ it('should return a formatter function for xml format', () => {
+ const promptRenderer = createPromptRenderer();
+ const formatter = promptRenderer.getFormatter('xml');
+
+ expect(formatter).toBeDefined();
+ expect(typeof formatter).toBe('function');
+ });
+
+ it('should return a formatter function for md format', () => {
+ const promptRenderer = createPromptRenderer();
+ const formatter = promptRenderer.getFormatter('md');
+
+ expect(formatter).toBeDefined();
+ expect(typeof formatter).toBe('function');
+ });
+
+ it('should throw error for unknown format', () => {
+ const promptRenderer = createPromptRenderer();
+ // Type assertion needed to test invalid input
+ const invalidFormat = 'invalid' as 'json' | 'xml' | 'md';
+
+ expect(() => promptRenderer.getFormatter(invalidFormat)).toThrow();
+ });
+
+ it('should create formatters for all supported formats', () => {
+ const promptRenderer = createPromptRenderer();
+ const formats: Array<'json' | 'xml' | 'md'> = ['json', 'xml', 'md'];
+
+ for (const format of formats) {
+ const formatter = promptRenderer.getFormatter(format);
+ expect(formatter).toBeDefined();
+ expect(typeof formatter).toBe('function');
+ }
+ });
+
+ it('should return consistent formatter instances', () => {
+ const promptRenderer = createPromptRenderer();
+ const formatter1 = promptRenderer.getFormatter('json');
+ const formatter2 = promptRenderer.getFormatter('json');
+
+ expect(formatter1).toBe(formatter2);
+ });
+});
diff --git a/src/lib/createPromptRenderer.ts b/src/lib/createPromptRenderer.ts
new file mode 100644
index 0000000..21c9356
--- /dev/null
+++ b/src/lib/createPromptRenderer.ts
@@ -0,0 +1,44 @@
+/**
+ * Prompt Renderer Factory
+ *
+ * WHY: Factory pattern centralizes renderer instantiation and makes it easy to:
+ * - Add new renderer types in the future
+ * - Test with different renderers
+ * - Handle invalid formats gracefully
+ */
+
+import { createJsonPromptRenderer } from './renderers/JsonPromptRenderer';
+import { createXmlPromptRenderer } from './renderers/XmlPromptRenderer';
+import { createMdPromptRenderer } from './renderers/MdPromptRenderer';
+
+/**
+ * Create a prompt renderer for the specified format
+ *
+ * @param format The desired format: 'json' | 'xml' | 'md'
+ * @returns A PromptRenderer instance for the specified format
+ * @throws Error if format is not recognized
+ */
+export function createPromptRenderer() {
+ const renderers = {
+ json: createJsonPromptRenderer(),
+ xml: createXmlPromptRenderer(),
+ md: createMdPromptRenderer(),
+ };
+
+ const getFormatter = (format: 'json' | 'xml' | 'md') => {
+ switch (format) {
+ case 'json':
+ return renderers.json.render;
+ case 'xml':
+ return renderers.xml.render;
+ case 'md':
+ return renderers.md.render;
+ default:
+ throw new Error(`Unsupported format: ${format}`);
+ }
+ };
+
+ return {
+ getFormatter,
+ };
+}
diff --git a/src/lib/getModelFormat.ts b/src/lib/getModelFormat.ts
new file mode 100644
index 0000000..5d30f50
--- /dev/null
+++ b/src/lib/getModelFormat.ts
@@ -0,0 +1,56 @@
+/**
+ * Model Format Resolver - Select renderer based on active LLM model
+ *
+ * WHY: Different LLM models have different preferences and strengths:
+ * - Claude models: optimized for XML, prefer structured XML injection
+ * - GPT models: strong JSON parsing, prefer JSON-formatted data
+ * - Other models: may benefit from markdown readability
+ *
+ * This function detects the active model and selects the configured
+ * format preference for that model, falling back to progressively more
+ * generic model patterns.
+ *
+ * HOW IT WORKS:
+ * 1. Query the current session via client.session.message()
+ * 2. Extract the modelID from the response (e.g., "anthropic-claude-3-5-sonnet")
+ * 3. Try matching in order:
+ * - Full model ID (e.g., "anthropic-claude-3-5-sonnet")
+ * - Generic model pattern (e.g., "claude-3-5-sonnet")
+ * 4. If no match, fall back to promptRenderer default
+ */
+
+import type { PluginConfig } from '../types';
+
+/**
+ * Resolve the appropriate prompt format for the current model
+ *
+ * @param args An object containing:
+ * - modelId?: The identifier of the active model (e.g., "claude-3-5-sonnet")
+ * - providerId?: The identifier of the model provider (e.g., "anthropic")
+ * - config: The plugin configuration (has promptRenderer and modelRenderers)
+ * @returns The format to use: 'json' | 'xml' | 'md'
+ */
+export function getModelFormat(args: {
+ modelId?: string;
+ providerId?: string;
+ config: PluginConfig;
+}): 'json' | 'xml' | 'md' {
+ const { modelId, providerId, config } = args;
+ const modelRenderers = config.modelRenderers ?? {};
+
+ // Prefer full provider-model key when both are defined
+ if (providerId && modelId) {
+ const combinedKey = `${providerId}-${modelId}`;
+ if (combinedKey in modelRenderers) {
+ return modelRenderers[combinedKey];
+ }
+ }
+
+ // Fallback to model-only key when modelId is defined
+ if (modelId && modelId in modelRenderers) {
+ return modelRenderers[modelId];
+ }
+
+ // Final fallback to default promptRenderer
+ return config.promptRenderer;
+}
diff --git a/src/lib/renderers/JsonPromptRenderer.test.ts b/src/lib/renderers/JsonPromptRenderer.test.ts
new file mode 100644
index 0000000..492ea31
--- /dev/null
+++ b/src/lib/renderers/JsonPromptRenderer.test.ts
@@ -0,0 +1,118 @@
+/**
+ * JsonPromptRenderer Tests
+ *
+ * Test coverage:
+ * - Basic JSON output formatting
+ * - Indentation and spacing
+ * - Object and array handling
+ * - Nested structures
+ * - Special values (null, undefined, numbers, booleans)
+ */
+
+import { describe, it, expect } from 'vitest';
+import { createJsonPromptRenderer } from './JsonPromptRenderer';
+
+describe('JsonPromptRenderer', () => {
+ const renderer = createJsonPromptRenderer();
+
+ it('should have json format identifier', () => {
+ expect(renderer.format).toBe('json');
+ });
+
+ it('should render simple object with proper indentation', () => {
+ const data = { name: 'test', value: 42 };
+ const result = renderer.render(data, 'Test');
+
+ expect(result).toContain('"name": "test"');
+ expect(result).toContain('"value": 42');
+ expect(result).toContain(' '); // Check for indentation
+ });
+
+ it('should render nested objects', () => {
+ const data = {
+ user: {
+ name: 'Alice',
+ profile: {
+ age: 30,
+ },
+ },
+ };
+ const result = renderer.render(data, 'User');
+
+ expect(result).toContain('"user"');
+ expect(result).toContain('"profile"');
+ expect(result).toContain('"age": 30');
+ });
+
+ it('should render arrays', () => {
+ const data = {
+ items: ['apple', 'banana', 'cherry'],
+ };
+ const result = renderer.render(data, 'Items');
+
+ expect(result).toContain('"apple"');
+ expect(result).toContain('"banana"');
+ expect(result).toContain('"cherry"');
+ });
+
+ it('should handle null and undefined values', () => {
+ const data = {
+ name: 'test',
+ value: null,
+ optional: undefined,
+ };
+ const result = renderer.render(data, 'Test');
+
+ expect(result).toContain('"name": "test"');
+ });
+
+ it('should handle boolean values', () => {
+ const data = {
+ active: true,
+ archived: false,
+ };
+ const result = renderer.render(data, 'Flags');
+
+ expect(result).toContain('true');
+ expect(result).toContain('false');
+ });
+
+ it('should handle numeric values', () => {
+ const data = {
+ count: 42,
+ ratio: 3.14,
+ negative: -10,
+ };
+ const result = renderer.render(data, 'Numbers');
+
+ expect(result).toContain('42');
+ expect(result).toContain('3.14');
+ expect(result).toContain('-10');
+ });
+
+ it('should wrap output in root element', () => {
+ const data = { name: 'test' };
+ const result = renderer.render(data, 'Skill');
+
+ expect(result).toContain('"Skill"');
+ });
+
+ it('should use default root element', () => {
+ const data = { name: 'test' };
+ const result = renderer.render(data);
+
+ expect(result).toContain('"root"');
+ });
+
+ it('should properly format for readability', () => {
+ const data = {
+ name: 'example',
+ items: [1, 2, 3],
+ };
+ const result = renderer.render(data, 'Example');
+
+ // Check that output is properly formatted (has newlines and indentation)
+ expect(result).toMatch(/\n/);
+ expect(result).toMatch(/[ ]{2}/);
+ });
+});
diff --git a/src/lib/renderers/JsonPromptRenderer.ts b/src/lib/renderers/JsonPromptRenderer.ts
new file mode 100644
index 0000000..8c3a477
--- /dev/null
+++ b/src/lib/renderers/JsonPromptRenderer.ts
@@ -0,0 +1,20 @@
+/**
+ * JsonPromptRenderer - Format objects as JSON
+ *
+ * WHY: Some LLM models (especially GPT family) have strong JSON parsing
+ * and prefer structured JSON data over XML for reliability and clarity.
+ */
+
+import type { PromptRenderer } from '../PromptRenderer';
+
+export const createJsonPromptRenderer = (): PromptRenderer => {
+ const format = 'json' as const;
+ const render = (data: object, rootElement = 'root'): string => {
+ return JSON.stringify({ [rootElement]: data }, null, 2);
+ };
+
+ return {
+ format,
+ render,
+ };
+};
diff --git a/src/lib/renderers/MdPromptRenderer.test.ts b/src/lib/renderers/MdPromptRenderer.test.ts
new file mode 100644
index 0000000..513832b
--- /dev/null
+++ b/src/lib/renderers/MdPromptRenderer.test.ts
@@ -0,0 +1,140 @@
+/**
+ * MdPromptRenderer Tests
+ *
+ * Test coverage:
+ * - Heading generation (H3 for top-level, H4 for nested)
+ * - List item rendering with emphasis
+ * - Array handling as nested bullets
+ * - HTML character escaping
+ * - Nested object indentation
+ * - Skill content appending with separator
+ * - Special values handling
+ */
+
+import { describe, it, expect } from 'vitest';
+import { createMdPromptRenderer } from './MdPromptRenderer';
+
+type TestData = Record;
+
+describe('MdPromptRenderer', () => {
+ const renderer = createMdPromptRenderer();
+
+ it('should have md format identifier', () => {
+ expect(renderer.format).toBe('md');
+ });
+
+ it('should render simple object with H1 title and H3 headings', () => {
+ const data: TestData = { name: 'test', value: '42', content: 'Some content' };
+ const result = renderer.render(data, 'Example');
+
+ expect(result).toContain('# Example');
+ expect(result).toContain('### name');
+ expect(result).toContain('### value');
+ expect(result).toContain('- **name**: *test*');
+ expect(result).toContain('- **value**: *42*');
+ });
+
+ it('should use H4 for nested objects', () => {
+ const data: TestData = {
+ metadata: {
+ version: '1.0',
+ },
+ content: '',
+ };
+ const result = renderer.render(data, 'Test');
+
+ expect(result).toContain('### metadata');
+ expect(result).toContain('#### version');
+ expect(result).toContain('- **version**: *1.0*');
+ });
+
+ it('should render arrays as nested bullets', () => {
+ const data: TestData = {
+ tags: ['important', 'urgent', 'review'],
+ content: '',
+ };
+ const result = renderer.render(data, 'Tags');
+
+ expect(result).toContain('### tags');
+ expect(result).toContain('- *important*');
+ expect(result).toContain('- *urgent*');
+ expect(result).toContain('- *review*');
+ });
+
+ it('should HTML-escape special characters in values', () => {
+ const data: TestData = {
+ html: '',
+ ampersand: 'A & B',
+ quotes: 'He said "hello"',
+ content: '',
+ };
+ const result = renderer.render(data, 'Escaped');
+
+ expect(result).toContain('<script>');
+ expect(result).toContain('&');
+ expect(result).toContain('"');
+ });
+
+ it('should skip null and undefined values', () => {
+ const data: TestData = {
+ name: 'test',
+ value: null,
+ optional: undefined,
+ content: '',
+ };
+ const result = renderer.render(data, 'Test');
+
+ expect(result).toContain('- **name**: *test*');
+ expect(result).not.toContain('### value');
+ expect(result).not.toContain('### optional');
+ });
+
+ it('should append skill content after separator', () => {
+ const data: TestData = {
+ name: 'git-commits',
+ description: 'Guidelines for git commits',
+ content: 'Use imperative mood...\nWrite clear messages...',
+ };
+ const result = renderer.render(data, 'Skill');
+
+ expect(result).toContain('---');
+ expect(result).toContain('### Content');
+ expect(result).toContain('Use imperative mood');
+ expect(result).toContain('Write clear messages');
+ });
+
+ it('should handle mixed nested and array data', () => {
+ const data: TestData = {
+ user: {
+ name: 'Alice',
+ roles: ['admin', 'reviewer'],
+ },
+ content: '',
+ };
+ const result = renderer.render(data, 'User');
+
+ expect(result).toContain('### user');
+ expect(result).toContain('#### name');
+ expect(result).toContain('#### roles');
+ expect(result).toContain('- *admin*');
+ expect(result).toContain('- *reviewer*');
+ });
+
+ it('should handle deeply nested objects', () => {
+ const data: TestData = {
+ level1: {
+ level2: {
+ level3: {
+ value: 'deep',
+ },
+ },
+ },
+ content: '',
+ };
+ const result = renderer.render(data, 'Nested');
+
+ expect(result).toContain('### level1');
+ expect(result).toContain('#### level2');
+ expect(result).toContain('- **value**: *deep*');
+ });
+});
diff --git a/src/lib/renderers/MdPromptRenderer.ts b/src/lib/renderers/MdPromptRenderer.ts
new file mode 100644
index 0000000..edc5dee
--- /dev/null
+++ b/src/lib/renderers/MdPromptRenderer.ts
@@ -0,0 +1,168 @@
+/**
+ * MdPromptRenderer - Format objects as human-readable Markdown
+ *
+ * WHY: Markdown provides human-readable formatting that works well for:
+ * - Models that benefit from visual structure and readability
+ * - Debugging and development (easier to read in logs)
+ * - Accessibility and presentation
+ *
+ * STRUCTURE:
+ * - Top-level keys → H3 headings (### key)
+ * - Nested objects → H4 headings (#### key) with increased indentation
+ * - Leaf nodes → nested bullet list items with emphasis: - **key**: *value*
+ * - Arrays → nested bullets under parent key
+ * - Special characters → HTML-escaped (<, >, &)
+ * - Skill content → appended after --- separator with ### Content heading
+ */
+
+import type { PromptRenderer } from '../PromptRenderer';
+
+export const createMdPromptRenderer = (): PromptRenderer => {
+ const format = 'md' as const;
+
+ /**
+ * Recursively render an object with proper heading levels and list nesting
+ *
+ * @param obj The object to render
+ * @param headingLevel The heading level for this object's keys (3 = H3, 4 = H4, etc)
+ * @param indentLevel Current indentation level for list items
+ */
+ const renderObject = (
+ obj: Record,
+ headingLevel: number,
+ indentLevel: number = 0
+ ): string => {
+ const entries = Object.entries(obj);
+ let output = '';
+
+ for (const [key, value] of entries) {
+ if (value === null || value === undefined) {
+ // Skip null/undefined values
+ continue;
+ }
+
+ // Add heading for this key
+ const heading = '#'.repeat(headingLevel);
+ output += `${heading} ${key}`;
+
+ if (typeof value === 'object' && !Array.isArray(value)) {
+ // Nested object - recurse with increased heading level
+ output += renderObject(
+ value as Record,
+ Math.min(headingLevel + 1, 6),
+ indentLevel
+ );
+ } else if (Array.isArray(value)) {
+ // Array - render as nested list items
+ output += renderArray(value, indentLevel);
+ } else {
+ // Leaf node - render as list item
+ const indent = ' '.repeat(indentLevel);
+ const escapedValue = htmlEscape(String(value));
+ output += `${indent}- **${key}**: *${escapedValue}*`;
+ }
+
+ output += '\n'; // Add spacing between items
+ }
+
+ return output;
+ };
+
+ /**
+ * Render an array as nested list items
+ *
+ * @param arr The array to render
+ * @param indentLevel Current indentation level
+ */
+ const renderArray = (arr: unknown[], indentLevel: number): string => {
+ const indent = ' '.repeat(indentLevel);
+ let output = '';
+
+ for (const item of arr) {
+ if (item === null || item === undefined) {
+ continue;
+ }
+
+ if (typeof item === 'object' && !Array.isArray(item)) {
+ // Nested object in array
+ const nestedObj = item as Record;
+ for (const [key, value] of Object.entries(nestedObj)) {
+ if (value === null || value === undefined) {
+ continue;
+ }
+
+ if (typeof value === 'object') {
+ if (Array.isArray(value)) {
+ // Nested array inside object in array
+ output += `${indent}- **${key}**:\n`;
+ output += renderArray(value, indentLevel + 1);
+ } else {
+ // Nested object inside object in array
+ output += `${indent}- **${key}**\n`;
+ output += renderObject(value as Record, 4, indentLevel + 1);
+ }
+ } else {
+ const escapedValue = htmlEscape(String(value));
+ output += `${indent}- **${key}**: *${escapedValue}*\n`;
+ }
+ }
+ } else if (Array.isArray(item)) {
+ // Nested array - recurse
+ output += renderArray(item, indentLevel + 1);
+ } else {
+ // Simple value
+ const escapedValue = htmlEscape(String(item));
+ output += `${indent}- *${escapedValue}*\n`;
+ }
+ }
+
+ return output;
+ };
+
+ /**
+ * HTML-escape special characters in values
+ * Prevents XML/HTML injection and ensures proper rendering
+ *
+ * @param value The value to escape
+ * @returns The escaped value
+ */
+ const htmlEscape = (value: string): string => {
+ return value
+ .replace(/&/g, '&')
+ .replace(//g, '>')
+ .replace(/"/g, '"')
+ .replace(/'/g, ''');
+ };
+
+ const render = (
+ data: T,
+ rootElement: string = 'Prompt'
+ ): string => {
+ // Separate out the 'content' field if it exists (for skills)
+ const { content, ...restData } = data;
+
+ // Render the metadata section
+ return `# ${rootElement}
+
+${renderObject(restData, 3)}
+
+${
+ (content &&
+ `---
+
+### Content
+
+${content}
+
+`) ||
+ ''
+}
+ `;
+ };
+
+ return {
+ format,
+ render,
+ };
+};
diff --git a/src/lib/renderers/XmlPromptRenderer.test.ts b/src/lib/renderers/XmlPromptRenderer.test.ts
new file mode 100644
index 0000000..a80018d
--- /dev/null
+++ b/src/lib/renderers/XmlPromptRenderer.test.ts
@@ -0,0 +1,112 @@
+/**
+ * XmlPromptRenderer Tests
+ *
+ * Test coverage:
+ * - XML output with proper formatting
+ * - Root element handling
+ * - Special character escaping
+ * - Nested structure rendering
+ * - Delegation to jsonToXml
+ */
+
+import { describe, it, expect } from 'vitest';
+import { createXmlPromptRenderer } from './XmlPromptRenderer';
+
+describe('XmlPromptRenderer', () => {
+ const renderer = createXmlPromptRenderer();
+
+ it('should have xml format identifier', () => {
+ expect(renderer.format).toBe('xml');
+ });
+
+ it('should render simple object as XML with default root', () => {
+ const data = { name: 'test', value: '42' };
+ const result = renderer.render(data);
+
+ expect(result).toContain('');
+ expect(result).toContain('');
+ expect(result).toContain('test');
+ expect(result).toContain('42');
+ });
+
+ it('should use custom root element when provided', () => {
+ const data = { key: 'value' };
+ const result = renderer.render(data, 'Skill');
+
+ expect(result).toContain('');
+ expect(result).toContain('');
+ expect(result).not.toContain('');
+ });
+
+ it('should escape special XML characters', () => {
+ const data = {
+ text: 'Content with & "quotes"',
+ };
+ const result = renderer.render(data);
+
+ expect(result).toContain('<');
+ expect(result).toContain('>');
+ expect(result).toContain('&');
+ });
+
+ it('should render nested objects', () => {
+ const data = {
+ skill: {
+ name: 'test-skill',
+ description: 'A test skill',
+ },
+ };
+ const result = renderer.render(data, 'Skill');
+
+ expect(result).toContain('');
+ expect(result).toContain('test-skill');
+ expect(result).toContain('A test skill');
+ });
+
+ it('should render arrays with numeric indices', () => {
+ const data = {
+ items: ['apple', 'banana'],
+ };
+ const result = renderer.render(data);
+
+ // Arrays are rendered with indexed elements
+ expect(result).toContain('');
+ expect(result).toContain('');
+ // jsonToXml iterates through string characters
+ expect(result).toContain('<0>');
+ expect(result).toContain('<1>');
+ expect(result).toContain('0>');
+ expect(result).toContain('1>');
+ });
+
+ it('should output valid XML structure', () => {
+ const data = {
+ metadata: {
+ version: '1.0',
+ },
+ content: 'test',
+ };
+ const result = renderer.render(data);
+
+ // Should have opening and closing tags
+ expect(result).toMatch(/[\s\S]*<\/root>/);
+ });
+
+ it('should handle deep nesting', () => {
+ const data = {
+ level1: {
+ level2: {
+ level3: {
+ value: 'deep',
+ },
+ },
+ },
+ };
+ const result = renderer.render(data);
+
+ expect(result).toContain('');
+ expect(result).toContain('');
+ expect(result).toContain('');
+ expect(result).toContain('deep');
+ });
+});
diff --git a/src/lib/renderers/XmlPromptRenderer.ts b/src/lib/renderers/XmlPromptRenderer.ts
new file mode 100644
index 0000000..c30170a
--- /dev/null
+++ b/src/lib/renderers/XmlPromptRenderer.ts
@@ -0,0 +1,23 @@
+/**
+ * XmlPromptRenderer - Format objects as XML (current default)
+ *
+ * WHY: Claude models are trained extensively on XML and prefer structured
+ * XML injection for skill metadata and search results. This maintains the
+ * current behavior as the default and recommended format for Claude models.
+ */
+
+import type { PromptRenderer } from '../PromptRenderer';
+import { jsonToXml } from '../xml';
+
+export const createXmlPromptRenderer = (): PromptRenderer => {
+ const format = 'xml' as const;
+
+ const render = (data: object, rootElement: string = 'root'): string => {
+ return jsonToXml(data, rootElement);
+ };
+
+ return {
+ format,
+ render,
+ };
+};
diff --git a/src/mocks.skillfs.ts b/src/mocks.skillfs.ts
index 627086f..ccbd12e 100644
--- a/src/mocks.skillfs.ts
+++ b/src/mocks.skillfs.ts
@@ -1,3 +1,4 @@
+/* eslint-disable no-console */
import { mock } from 'bun:test';
import { Volume } from 'memfs';
import path from 'node:path';
diff --git a/src/mocks.ts b/src/mocks.ts
index 87dd8fb..4883bc4 100644
--- a/src/mocks.ts
+++ b/src/mocks.ts
@@ -98,5 +98,7 @@ function createMockConfig() {
return {
debug: false,
basePaths: ['/mock/path/to/skills'],
+ promptRenderer: 'xml' as const,
+ modelRenderers: {},
};
}
diff --git a/src/services/MessageModelIdAccountant.ts b/src/services/MessageModelIdAccountant.ts
new file mode 100644
index 0000000..e343e74
--- /dev/null
+++ b/src/services/MessageModelIdAccountant.ts
@@ -0,0 +1,61 @@
+function createMessageModelIdAccountant() {
+ const modelUsage = new Map<
+ string, // sessionID
+ Record<
+ string, // messageID
+ {
+ modelID: string;
+ providerID: string;
+ }
+ >
+ >();
+
+ const track = (info: {
+ sessionID: string;
+ messageID: string;
+ modelID: string;
+ providerID: string;
+ }) => {
+ if (!modelUsage.has(info.sessionID)) {
+ modelUsage.set(info.sessionID, {});
+ }
+ const sessionMap = modelUsage.get(info.sessionID)!;
+ sessionMap[info.messageID] = {
+ modelID: info.modelID,
+ providerID: info.providerID,
+ };
+ };
+
+ const untrackMessage = (args: { messageID: string; sessionID: string }) => {
+ const sessionMap = modelUsage.get(args.sessionID);
+ if (sessionMap && sessionMap[args.messageID]) {
+ delete sessionMap[args.messageID];
+ if (Object.keys(sessionMap).length === 0) {
+ modelUsage.delete(args.sessionID);
+ }
+ }
+ };
+
+ const untrackSession = (sessionID: string) => {
+ modelUsage.delete(sessionID);
+ };
+
+ const getModelInfo = (args: { messageID: string; sessionID: string }) => {
+ const sessionMap = modelUsage.get(args.sessionID);
+ return sessionMap ? sessionMap[args.messageID] : undefined;
+ };
+
+ const reset = () => {
+ modelUsage.clear();
+ };
+
+ return {
+ reset,
+ track,
+ untrackMessage,
+ untrackSession,
+ getModelInfo,
+ };
+}
+
+export { createMessageModelIdAccountant };
diff --git a/src/services/SkillResourceResolver.test.ts b/src/services/SkillResourceResolver.test.ts
index c75338d..ce274fa 100644
--- a/src/services/SkillResourceResolver.test.ts
+++ b/src/services/SkillResourceResolver.test.ts
@@ -10,7 +10,12 @@ import { createSkillRegistry } from './SkillRegistry';
describe('SkillResourceResolver', () => {
async function createMockResolver() {
- const config = { basePaths: ['/skills', '/place/that/doesnt/exist'], debug: false };
+ const config = {
+ basePaths: ['/skills', '/place/that/doesnt/exist'],
+ debug: false,
+ promptRenderer: 'xml' as const,
+ modelRenderers: {},
+ };
const registry = await createSkillRegistry(config, console);
await registry.initialise();
return createSkillResourceResolver(registry);
diff --git a/src/services/logger.ts b/src/services/logger.ts
index 531579e..36b2a6e 100644
--- a/src/services/logger.ts
+++ b/src/services/logger.ts
@@ -1,3 +1,4 @@
+/* eslint-disable no-console */
import { LogType, PluginConfig, PluginLogger } from '../types';
const namespace = '[OpencodeSkillful]';
diff --git a/src/types.ts b/src/types.ts
index 1641973..9cbcc04 100644
--- a/src/types.ts
+++ b/src/types.ts
@@ -93,6 +93,8 @@ export type SkillRank = {
export type PluginConfig = {
debug: boolean;
basePaths: string[];
+ promptRenderer: 'json' | 'xml' | 'md';
+ modelRenderers?: Record;
};
export type LogType = 'log' | 'debug' | 'error' | 'warn';