From 37fc419eece387d2b74999af69a561ed66e2dbb7 Mon Sep 17 00:00:00 2001 From: Shinsuke Kagawa Date: Fri, 1 May 2026 06:02:05 +0900 Subject: [PATCH 1/3] chore: add Node tooling for plugin sync workflow Introduce package.json (Node >=22, pnpm), a sync script that mirrors the canonical agents/skills directories into per-plugin subdirectories, and a lefthook pre-commit job that runs sync + claude plugin validate. A sync-check GitHub Actions workflow guards against drift in PRs. This is preparation for the per-subdirectory plugin layout switch in the following commit. Co-Authored-By: Claude Opus 4.7 (1M context) --- .github/workflows/sync-check.yml | 22 ++++ .gitignore | 3 + .nvmrc | 1 + lefthook.yml | 25 ++++ package.json | 23 ++++ pnpm-lock.yaml | 114 +++++++++++++++++ scripts/sync-plugins.mjs | 204 +++++++++++++++++++++++++++++++ 7 files changed, 392 insertions(+) create mode 100644 .github/workflows/sync-check.yml create mode 100644 .nvmrc create mode 100644 lefthook.yml create mode 100644 package.json create mode 100644 pnpm-lock.yaml create mode 100644 scripts/sync-plugins.mjs diff --git a/.github/workflows/sync-check.yml b/.github/workflows/sync-check.yml new file mode 100644 index 0000000..47e4ccb --- /dev/null +++ b/.github/workflows/sync-check.yml @@ -0,0 +1,22 @@ +name: sync-check + +on: + push: + branches: [main] + pull_request: + +permissions: + contents: read + +jobs: + sync-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v5.0.0 + - uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0 + with: + node-version: 22 + cache: pnpm + - run: pnpm install --frozen-lockfile + - run: pnpm sync:check diff --git a/.gitignore b/.gitignore index 6702622..9fe17f7 100644 --- a/.gitignore +++ b/.gitignore @@ -32,6 +32,9 @@ temp/ .env.local .env.*.local +# Node +node_modules/ + # Misc *.bak *.backup diff --git a/.nvmrc b/.nvmrc new file mode 100644 index 0000000..2bd5a0a --- /dev/null +++ b/.nvmrc @@ -0,0 +1 @@ +22 diff --git a/lefthook.yml b/lefthook.yml new file mode 100644 index 0000000..23fa2f0 --- /dev/null +++ b/lefthook.yml @@ -0,0 +1,25 @@ +pre-commit: + jobs: + - name: sync-plugins + glob: + - "agents/**" + - "skills/**" + - ".claude-plugin/marketplace.json" + - "dev-workflows/**" + - "dev-workflows-frontend/**" + - "dev-skills/**" + run: pnpm sync && git add dev-workflows dev-workflows-frontend dev-skills + + - name: validate-plugins + glob: + - "agents/**" + - "skills/**" + - ".claude-plugin/marketplace.json" + - "dev-workflows/**" + - "dev-workflows-frontend/**" + - "dev-skills/**" + run: | + claude plugin validate .claude-plugin/marketplace.json && \ + claude plugin validate dev-workflows && \ + claude plugin validate dev-workflows-frontend && \ + claude plugin validate dev-skills diff --git a/package.json b/package.json new file mode 100644 index 0000000..7a1ade8 --- /dev/null +++ b/package.json @@ -0,0 +1,23 @@ +{ + "name": "claude-code-workflows", + "version": "0.16.16", + "private": true, + "type": "module", + "engines": { + "node": ">=22" + }, + "packageManager": "pnpm@10.28.2", + "scripts": { + "prepare": "lefthook install", + "sync": "node scripts/sync-plugins.mjs", + "sync:check": "node scripts/sync-plugins.mjs --check" + }, + "devDependencies": { + "lefthook": "^2.1.6" + }, + "pnpm": { + "onlyBuiltDependencies": [ + "lefthook" + ] + } +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml new file mode 100644 index 0000000..67f7692 --- /dev/null +++ b/pnpm-lock.yaml @@ -0,0 +1,114 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + devDependencies: + lefthook: + specifier: ^2.1.6 + version: 2.1.6 + +packages: + + lefthook-darwin-arm64@2.1.6: + resolution: {integrity: sha512-hyB7eeiX78BS66f70byTJacDLC/xV1vgMv9n+idFUsrM7J3Udd/ag9Ag5NP3t0eN0EqQqAtrNnt35EH01lxnRQ==} + cpu: [arm64] + os: [darwin] + + lefthook-darwin-x64@2.1.6: + resolution: {integrity: sha512-5Ka6cFxiH83krt+OMRQtmS6zqoZR5SLXSudLjTbZA1c3ZqF0+dqkeb4XcB6plx6WR0GFizabuc6Bi3iXPIe1eQ==} + cpu: [x64] + os: [darwin] + + lefthook-freebsd-arm64@2.1.6: + resolution: {integrity: sha512-VswyOg5CVN3rMaOJ2HtnkltiMKgFHW/wouWxXsV8RxSa4tgWOKxM0EmSXi8qc2jX+LRga6B0uOY6toXS01zWxA==} + cpu: [arm64] + os: [freebsd] + + lefthook-freebsd-x64@2.1.6: + resolution: {integrity: sha512-vXsCUFYuVwrVWwcypB7Zt2Hf+5pl1V1la7ZfvGYZaTRURu0zF/XUnMF/nOz/PebGv0f4x/iOWXWwP7E42xRWsg==} + cpu: [x64] + os: [freebsd] + + lefthook-linux-arm64@2.1.6: + resolution: {integrity: sha512-WDJiQhJdZOvKORZd+kF/ms2l6NSsXzdA9ahflyr65V90AC4jES223W8VtEMbGPUtHuGWMEZ/v/XvwlWv0Ioz9g==} + cpu: [arm64] + os: [linux] + + lefthook-linux-x64@2.1.6: + resolution: {integrity: sha512-C18nCd7nTX1AVL4TcvwMmLAO1VI1OuGluIOTjiPkBQ746Ls1HhL5rl//jMPACmT28YmxIQJ2ZcLPNmhvEVBZvw==} + cpu: [x64] + os: [linux] + + lefthook-openbsd-arm64@2.1.6: + resolution: {integrity: sha512-mZOMxM8HiPxVFXDO3PtCUbH4GB8rkveXhsgXF27oAZTYVzQ3gO9vT6r/pxit6msqRXz3fvcwimLVJgb8eRsa8A==} + cpu: [arm64] + os: [openbsd] + + lefthook-openbsd-x64@2.1.6: + resolution: {integrity: sha512-sG9ALLZSnnMOfXu+B7SmxFhJhuoAh4bqi5En5aaHJET48TqrLOcWWZuH+7ArFM6gr/U5KfSUvdmHFmY8WqCcIg==} + cpu: [x64] + os: [openbsd] + + lefthook-windows-arm64@2.1.6: + resolution: {integrity: sha512-lD8yFWY4Csuljd0Rqs7EQaySC0VvDf7V3rN1FhRMUISTRDHutebIom1Loc8ckQPvKYGC6mftT9k0GvipsS+Brw==} + cpu: [arm64] + os: [win32] + + lefthook-windows-x64@2.1.6: + resolution: {integrity: sha512-q4z2n3xucLscoWiyMwFViEj3N8MDSkPulMwcJYuCYFHoPhP1h+icqNu7QRLGYj6AnVrCQweiUJY3Tb2X+GbD/A==} + cpu: [x64] + os: [win32] + + lefthook@2.1.6: + resolution: {integrity: sha512-w9sBoR0mdN+kJc3SB85VzpiAAl451/rxdCRcZlwW71QLjkeH3EBQFgc4VMj5apePychYDHAlqEWTB8J8JK/j1Q==} + hasBin: true + +snapshots: + + lefthook-darwin-arm64@2.1.6: + optional: true + + lefthook-darwin-x64@2.1.6: + optional: true + + lefthook-freebsd-arm64@2.1.6: + optional: true + + lefthook-freebsd-x64@2.1.6: + optional: true + + lefthook-linux-arm64@2.1.6: + optional: true + + lefthook-linux-x64@2.1.6: + optional: true + + lefthook-openbsd-arm64@2.1.6: + optional: true + + lefthook-openbsd-x64@2.1.6: + optional: true + + lefthook-windows-arm64@2.1.6: + optional: true + + lefthook-windows-x64@2.1.6: + optional: true + + lefthook@2.1.6: + optionalDependencies: + lefthook-darwin-arm64: 2.1.6 + lefthook-darwin-x64: 2.1.6 + lefthook-freebsd-arm64: 2.1.6 + lefthook-freebsd-x64: 2.1.6 + lefthook-linux-arm64: 2.1.6 + lefthook-linux-x64: 2.1.6 + lefthook-openbsd-arm64: 2.1.6 + lefthook-openbsd-x64: 2.1.6 + lefthook-windows-arm64: 2.1.6 + lefthook-windows-x64: 2.1.6 diff --git a/scripts/sync-plugins.mjs b/scripts/sync-plugins.mjs new file mode 100644 index 0000000..7f8e29c --- /dev/null +++ b/scripts/sync-plugins.mjs @@ -0,0 +1,204 @@ +#!/usr/bin/env node +import { cp, lstat, mkdir, mkdtemp, readFile, readdir, rm, stat, writeFile } from 'node:fs/promises' +import { tmpdir } from 'node:os' +import { dirname, isAbsolute, join, relative, resolve, sep } from 'node:path' +import { fileURLToPath } from 'node:url' + +const ROOT = resolve(dirname(fileURLToPath(import.meta.url)), '..') +const MARKETPLACE_PATH = join(ROOT, '.claude-plugin', 'marketplace.json') +const CHECK = process.argv.slice(2).includes('--check') + +function isLocalSource(source) { + return typeof source === 'string' && source.startsWith('./') +} + +function isInside(parent, child) { + const rel = relative(parent, child) + return rel !== '' && !rel.startsWith('..') && !isAbsolute(rel) +} + +async function resolveSafeSource(declaredPath, kind) { + const allowedPrefix = `./${kind}/` + if (typeof declaredPath !== 'string' || !declaredPath.startsWith(allowedPrefix)) { + throw new Error(`unsafe ${kind} path (must start with ${allowedPrefix}): ${declaredPath}`) + } + const src = resolve(ROOT, declaredPath) + if (!isInside(ROOT, src)) { + throw new Error(`unsafe ${kind} path (escapes repo root): ${declaredPath}`) + } + await assertNoSymlinks(src, declaredPath) + return src +} + +async function assertNoSymlinks(absPath, label) { + const st = await lstat(absPath) + if (st.isSymbolicLink()) { + throw new Error(`refusing to read symlinked source: ${label}`) + } + if (st.isDirectory()) { + const entries = await readdir(absPath, { withFileTypes: true }) + for (const e of entries) { + await assertNoSymlinks(join(absPath, e.name), `${label}/${e.name}`) + } + } +} + +function pluginManifest(entry) { + const out = { + name: entry.name, + description: entry.description, + version: entry.version, + } + if (entry.author) out.author = entry.author + if (entry.homepage) out.homepage = entry.homepage + if (entry.repository) out.repository = entry.repository + if (entry.license) out.license = entry.license + if (entry.keywords) out.keywords = entry.keywords + return out +} + +async function generatePlugin(entry, baseDir) { + const expectedSource = `./${entry.name}` + if (entry.source !== expectedSource) { + throw new Error( + `plugin "${entry.name}" must declare source: "${expectedSource}" (got: ${JSON.stringify(entry.source)}). ` + + 'The source must equal "./" + plugin name to avoid accidentally overwriting the canonical agents/ or skills/ directories.', + ) + } + const targetDir = resolve(baseDir, entry.source) + if (!isInside(baseDir, targetDir)) { + throw new Error(`unsafe source path: ${entry.source}`) + } + + await rm(targetDir, { recursive: true, force: true }) + await mkdir(join(targetDir, '.claude-plugin'), { recursive: true }) + await writeFile( + join(targetDir, '.claude-plugin', 'plugin.json'), + `${JSON.stringify(pluginManifest(entry), null, 2)}\n`, + ) + + for (const agentPath of entry.agents ?? []) { + const src = await resolveSafeSource(agentPath, 'agents') + const fileName = agentPath.split('/').pop() + const dst = join(targetDir, 'agents', fileName) + await mkdir(dirname(dst), { recursive: true }) + await cp(src, dst, { verbatimSymlinks: true }) + } + + for (const skillPath of entry.skills ?? []) { + const src = await resolveSafeSource(skillPath, 'skills') + const skillName = skillPath.split('/').pop() + const dst = join(targetDir, 'skills', skillName) + await mkdir(dirname(dst), { recursive: true }) + await cp(src, dst, { recursive: true, verbatimSymlinks: true }) + } +} + +async function loadLocalPlugins() { + const marketplace = JSON.parse(await readFile(MARKETPLACE_PATH, 'utf8')) + return marketplace.plugins.filter((p) => isLocalSource(p.source)) +} + +async function syncAll(baseDir) { + const local = await loadLocalPlugins() + for (const entry of local) { + await generatePlugin(entry, baseDir) + } + return local +} + +async function pathExists(p) { + try { + await stat(p) + return true + } catch { + return false + } +} + +async function listFilesRecursive(root) { + const out = [] + async function walk(dir) { + const entries = await readdir(dir, { withFileTypes: true }) + for (const e of entries) { + const full = join(dir, e.name) + if (e.isDirectory()) { + await walk(full) + } else if (e.isFile()) { + out.push(relative(root, full).split(sep).join('/')) + } else { + out.push(`${relative(root, full).split(sep).join('/')} (non-regular: ${e.isSymbolicLink() ? 'symlink' : 'other'})`) + } + } + } + await walk(root) + out.sort() + return out +} + +async function diffDirs(a, b) { + const [filesA, filesB] = await Promise.all([listFilesRecursive(a), listFilesRecursive(b)]) + const diffs = [] + + const setA = new Set(filesA) + const setB = new Set(filesB) + for (const f of filesA) { + if (!setB.has(f)) diffs.push(`only in real: ${f}`) + } + for (const f of filesB) { + if (!setA.has(f)) diffs.push(`only in expected: ${f}`) + } + + const common = filesA.filter((f) => setB.has(f)) + for (const f of common) { + const [bufA, bufB] = await Promise.all([readFile(join(a, f)), readFile(join(b, f))]) + if (!bufA.equals(bufB)) diffs.push(`content differs: ${f}`) + } + return diffs +} + +async function checkDrift() { + const tmp = await mkdtemp(join(tmpdir(), 'sync-plugins-')) + try { + const local = await syncAll(tmp) + let drift = false + for (const entry of local) { + const real = resolve(ROOT, entry.source) + const expected = resolve(tmp, entry.source) + if (!(await pathExists(real))) { + console.error(`[drift] missing subdirectory: ${entry.source}`) + drift = true + continue + } + const diffs = await diffDirs(real, expected) + if (diffs.length > 0) { + console.error(`[drift] ${entry.source}:`) + for (const d of diffs) console.error(` ${d}`) + drift = true + } + } + if (drift) { + console.error('\nPlugin subdirectories are out of sync. Run: pnpm sync') + process.exit(1) + } + console.log('All in-repo plugin subdirectories are in sync.') + } finally { + await rm(tmp, { recursive: true, force: true }) + } +} + +async function main() { + if (CHECK) { + await checkDrift() + return + } + const local = await syncAll(ROOT) + for (const entry of local) { + console.log(`synced ${entry.name} -> ${entry.source}`) + } +} + +main().catch((err) => { + console.error(err) + process.exit(1) +}) From b20ed11461c18ba8252a7d891ad3da726fe2d7ed Mon Sep 17 00:00:00 2001 From: Shinsuke Kagawa Date: Fri, 1 May 2026 06:02:29 +0900 Subject: [PATCH 2/3] fix: switch in-repo plugins to per-subdirectory layout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each in-repo plugin now lives in its own subdirectory (./dev-workflows, ./dev-workflows-frontend, ./dev-skills) populated by scripts/sync-plugins.mjs from the canonical top-level agents/ and skills/ directories. The marketplace.json source field points at each subdirectory and strict mode is restored. This works around two upstream Claude Code regressions in the loader: * The plugin installer stopped copying symlinked content in v2.1.117 (anthropics/claude-code#53948, still open) — which made the previous flatten that uses source: "./" the only viable single-source layout. * The marketplace agents/skills filter arrays under source: "./" are silently ignored by the loader (anthropics/claude-code#13344, still open) — so dev-skills was loading every agent and recipe-* skill that existed at the repo root. Switching to per-subdirectory sources sidesteps both issues by giving the loader a directory that physically contains only the intended subset. The marketplace.json arrays are kept and used as the spec for the sync script, so when the upstream filter is fixed we can collapse back to a single-source layout without re-deriving the per-plugin curation. Verified with a clean install in an isolated HOME on macOS: * dev-workflows: 20 agents / 20 skills * dev-workflows-frontend: 20 agents / 18 skills * dev-skills: 0 agents / 9 skills (no leakage) Co-Authored-By: Claude Opus 4.7 (1M context) --- .claude-plugin/marketplace.json | 18 +- dev-skills/.claude-plugin/plugin.json | 22 + .../skills/ai-development-guide/SKILL.md | 330 ++++++++++++ dev-skills/skills/coding-principles/SKILL.md | 224 +++++++++ .../references/security-checks.md | 64 +++ .../skills/documentation-criteria/SKILL.md | 236 +++++++++ .../references/adr-template.md | 68 +++ .../references/design-template.md | 388 ++++++++++++++ .../references/plan-template.md | 192 +++++++ .../references/prd-template.md | 142 ++++++ .../references/task-template.md | 54 ++ .../references/ui-spec-template.md | 199 ++++++++ dev-skills/skills/frontend-ai-guide/SKILL.md | 250 ++++++++++ .../skills/implementation-approach/SKILL.md | 144 ++++++ .../skills/integration-e2e-testing/SKILL.md | 154 ++++++ .../references/e2e-design.md | 86 ++++ dev-skills/skills/test-implement/SKILL.md | 30 ++ .../skills/test-implement/references/e2e.md | 252 ++++++++++ .../test-implement/references/frontend.md | 217 ++++++++ dev-skills/skills/testing-principles/SKILL.md | 472 ++++++++++++++++++ dev-skills/skills/typescript-rules/SKILL.md | 206 ++++++++ .../.claude-plugin/plugin.json | 24 + .../agents/acceptance-test-generator.md | 316 ++++++++++++ .../agents/code-reviewer.md | 266 ++++++++++ .../agents/code-verifier.md | 238 +++++++++ .../agents/codebase-analyzer.md | 233 +++++++++ dev-workflows-frontend/agents/design-sync.md | 313 ++++++++++++ .../agents/document-reviewer.md | 350 +++++++++++++ .../agents/integration-test-reviewer.md | 145 ++++++ dev-workflows-frontend/agents/investigator.md | 214 ++++++++ dev-workflows-frontend/agents/prd-creator.md | 242 +++++++++ .../agents/quality-fixer-frontend.md | 425 ++++++++++++++++ .../agents/requirement-analyzer.md | 135 +++++ dev-workflows-frontend/agents/rule-advisor.md | 166 ++++++ .../agents/security-reviewer.md | 143 ++++++ dev-workflows-frontend/agents/solver.md | 175 +++++++ .../agents/task-decomposer.md | 255 ++++++++++ .../agents/task-executor-frontend.md | 305 +++++++++++ .../agents/technical-designer-frontend.md | 431 ++++++++++++++++ .../agents/ui-spec-designer.md | 113 +++++ dev-workflows-frontend/agents/verifier.md | 216 ++++++++ dev-workflows-frontend/agents/work-planner.md | 256 ++++++++++ .../skills/ai-development-guide/SKILL.md | 330 ++++++++++++ .../skills/coding-principles/SKILL.md | 224 +++++++++ .../references/security-checks.md | 64 +++ .../skills/documentation-criteria/SKILL.md | 236 +++++++++ .../references/adr-template.md | 68 +++ .../references/design-template.md | 388 ++++++++++++++ .../references/plan-template.md | 192 +++++++ .../references/prd-template.md | 142 ++++++ .../references/task-template.md | 54 ++ .../references/ui-spec-template.md | 199 ++++++++ .../skills/frontend-ai-guide/SKILL.md | 250 ++++++++++ .../skills/implementation-approach/SKILL.md | 144 ++++++ .../skills/integration-e2e-testing/SKILL.md | 154 ++++++ .../references/e2e-design.md | 86 ++++ .../skills/recipe-diagnose/SKILL.md | 232 +++++++++ .../skills/recipe-front-build/SKILL.md | 137 +++++ .../skills/recipe-front-design/SKILL.md | 120 +++++ .../skills/recipe-front-plan/SKILL.md | 75 +++ .../skills/recipe-front-review/SKILL.md | 157 ++++++ .../skills/recipe-task/SKILL.md | 58 +++ .../skills/recipe-update-doc/SKILL.md | 214 ++++++++ .../subagents-orchestration-guide/SKILL.md | 419 ++++++++++++++++ .../references/monorepo-flow.md | 139 ++++++ .../skills/task-analyzer/SKILL.md | 128 +++++ .../references/skills-index.yaml | 216 ++++++++ .../skills/test-implement/SKILL.md | 30 ++ .../skills/test-implement/references/e2e.md | 252 ++++++++++ .../test-implement/references/frontend.md | 217 ++++++++ .../skills/testing-principles/SKILL.md | 472 ++++++++++++++++++ .../skills/typescript-rules/SKILL.md | 206 ++++++++ dev-workflows/.claude-plugin/plugin.json | 23 + .../agents/acceptance-test-generator.md | 316 ++++++++++++ dev-workflows/agents/code-reviewer.md | 266 ++++++++++ dev-workflows/agents/code-verifier.md | 238 +++++++++ dev-workflows/agents/codebase-analyzer.md | 233 +++++++++ dev-workflows/agents/design-sync.md | 313 ++++++++++++ dev-workflows/agents/document-reviewer.md | 350 +++++++++++++ .../agents/integration-test-reviewer.md | 145 ++++++ dev-workflows/agents/investigator.md | 214 ++++++++ dev-workflows/agents/prd-creator.md | 242 +++++++++ dev-workflows/agents/quality-fixer.md | 318 ++++++++++++ dev-workflows/agents/requirement-analyzer.md | 135 +++++ dev-workflows/agents/rule-advisor.md | 166 ++++++ dev-workflows/agents/scope-discoverer.md | 243 +++++++++ dev-workflows/agents/security-reviewer.md | 143 ++++++ dev-workflows/agents/solver.md | 175 +++++++ dev-workflows/agents/task-decomposer.md | 255 ++++++++++ dev-workflows/agents/task-executor.md | 350 +++++++++++++ dev-workflows/agents/technical-designer.md | 431 ++++++++++++++++ dev-workflows/agents/verifier.md | 216 ++++++++ dev-workflows/agents/work-planner.md | 256 ++++++++++ .../skills/ai-development-guide/SKILL.md | 330 ++++++++++++ .../skills/coding-principles/SKILL.md | 224 +++++++++ .../references/security-checks.md | 64 +++ .../skills/documentation-criteria/SKILL.md | 236 +++++++++ .../references/adr-template.md | 68 +++ .../references/design-template.md | 388 ++++++++++++++ .../references/plan-template.md | 192 +++++++ .../references/prd-template.md | 142 ++++++ .../references/task-template.md | 54 ++ .../references/ui-spec-template.md | 199 ++++++++ .../skills/implementation-approach/SKILL.md | 144 ++++++ .../skills/integration-e2e-testing/SKILL.md | 154 ++++++ .../references/e2e-design.md | 86 ++++ .../recipe-add-integration-tests/SKILL.md | 161 ++++++ dev-workflows/skills/recipe-build/SKILL.md | 137 +++++ dev-workflows/skills/recipe-design/SKILL.md | 73 +++ dev-workflows/skills/recipe-diagnose/SKILL.md | 232 +++++++++ .../skills/recipe-fullstack-build/SKILL.md | 154 ++++++ .../recipe-fullstack-implement/SKILL.md | 164 ++++++ .../skills/recipe-implement/SKILL.md | 143 ++++++ dev-workflows/skills/recipe-plan/SKILL.md | 71 +++ .../skills/recipe-reverse-engineer/SKILL.md | 409 +++++++++++++++ dev-workflows/skills/recipe-review/SKILL.md | 160 ++++++ dev-workflows/skills/recipe-task/SKILL.md | 58 +++ .../skills/recipe-update-doc/SKILL.md | 214 ++++++++ .../subagents-orchestration-guide/SKILL.md | 419 ++++++++++++++++ .../references/monorepo-flow.md | 139 ++++++ dev-workflows/skills/task-analyzer/SKILL.md | 128 +++++ .../references/skills-index.yaml | 216 ++++++++ .../skills/testing-principles/SKILL.md | 472 ++++++++++++++++++ 123 files changed, 24962 insertions(+), 9 deletions(-) create mode 100644 dev-skills/.claude-plugin/plugin.json create mode 100644 dev-skills/skills/ai-development-guide/SKILL.md create mode 100644 dev-skills/skills/coding-principles/SKILL.md create mode 100644 dev-skills/skills/coding-principles/references/security-checks.md create mode 100644 dev-skills/skills/documentation-criteria/SKILL.md create mode 100644 dev-skills/skills/documentation-criteria/references/adr-template.md create mode 100644 dev-skills/skills/documentation-criteria/references/design-template.md create mode 100644 dev-skills/skills/documentation-criteria/references/plan-template.md create mode 100644 dev-skills/skills/documentation-criteria/references/prd-template.md create mode 100644 dev-skills/skills/documentation-criteria/references/task-template.md create mode 100644 dev-skills/skills/documentation-criteria/references/ui-spec-template.md create mode 100644 dev-skills/skills/frontend-ai-guide/SKILL.md create mode 100644 dev-skills/skills/implementation-approach/SKILL.md create mode 100644 dev-skills/skills/integration-e2e-testing/SKILL.md create mode 100644 dev-skills/skills/integration-e2e-testing/references/e2e-design.md create mode 100644 dev-skills/skills/test-implement/SKILL.md create mode 100644 dev-skills/skills/test-implement/references/e2e.md create mode 100644 dev-skills/skills/test-implement/references/frontend.md create mode 100644 dev-skills/skills/testing-principles/SKILL.md create mode 100644 dev-skills/skills/typescript-rules/SKILL.md create mode 100644 dev-workflows-frontend/.claude-plugin/plugin.json create mode 100644 dev-workflows-frontend/agents/acceptance-test-generator.md create mode 100644 dev-workflows-frontend/agents/code-reviewer.md create mode 100644 dev-workflows-frontend/agents/code-verifier.md create mode 100644 dev-workflows-frontend/agents/codebase-analyzer.md create mode 100644 dev-workflows-frontend/agents/design-sync.md create mode 100644 dev-workflows-frontend/agents/document-reviewer.md create mode 100644 dev-workflows-frontend/agents/integration-test-reviewer.md create mode 100644 dev-workflows-frontend/agents/investigator.md create mode 100644 dev-workflows-frontend/agents/prd-creator.md create mode 100644 dev-workflows-frontend/agents/quality-fixer-frontend.md create mode 100644 dev-workflows-frontend/agents/requirement-analyzer.md create mode 100644 dev-workflows-frontend/agents/rule-advisor.md create mode 100644 dev-workflows-frontend/agents/security-reviewer.md create mode 100644 dev-workflows-frontend/agents/solver.md create mode 100644 dev-workflows-frontend/agents/task-decomposer.md create mode 100644 dev-workflows-frontend/agents/task-executor-frontend.md create mode 100644 dev-workflows-frontend/agents/technical-designer-frontend.md create mode 100644 dev-workflows-frontend/agents/ui-spec-designer.md create mode 100644 dev-workflows-frontend/agents/verifier.md create mode 100644 dev-workflows-frontend/agents/work-planner.md create mode 100644 dev-workflows-frontend/skills/ai-development-guide/SKILL.md create mode 100644 dev-workflows-frontend/skills/coding-principles/SKILL.md create mode 100644 dev-workflows-frontend/skills/coding-principles/references/security-checks.md create mode 100644 dev-workflows-frontend/skills/documentation-criteria/SKILL.md create mode 100644 dev-workflows-frontend/skills/documentation-criteria/references/adr-template.md create mode 100644 dev-workflows-frontend/skills/documentation-criteria/references/design-template.md create mode 100644 dev-workflows-frontend/skills/documentation-criteria/references/plan-template.md create mode 100644 dev-workflows-frontend/skills/documentation-criteria/references/prd-template.md create mode 100644 dev-workflows-frontend/skills/documentation-criteria/references/task-template.md create mode 100644 dev-workflows-frontend/skills/documentation-criteria/references/ui-spec-template.md create mode 100644 dev-workflows-frontend/skills/frontend-ai-guide/SKILL.md create mode 100644 dev-workflows-frontend/skills/implementation-approach/SKILL.md create mode 100644 dev-workflows-frontend/skills/integration-e2e-testing/SKILL.md create mode 100644 dev-workflows-frontend/skills/integration-e2e-testing/references/e2e-design.md create mode 100644 dev-workflows-frontend/skills/recipe-diagnose/SKILL.md create mode 100644 dev-workflows-frontend/skills/recipe-front-build/SKILL.md create mode 100644 dev-workflows-frontend/skills/recipe-front-design/SKILL.md create mode 100644 dev-workflows-frontend/skills/recipe-front-plan/SKILL.md create mode 100644 dev-workflows-frontend/skills/recipe-front-review/SKILL.md create mode 100644 dev-workflows-frontend/skills/recipe-task/SKILL.md create mode 100644 dev-workflows-frontend/skills/recipe-update-doc/SKILL.md create mode 100644 dev-workflows-frontend/skills/subagents-orchestration-guide/SKILL.md create mode 100644 dev-workflows-frontend/skills/subagents-orchestration-guide/references/monorepo-flow.md create mode 100644 dev-workflows-frontend/skills/task-analyzer/SKILL.md create mode 100644 dev-workflows-frontend/skills/task-analyzer/references/skills-index.yaml create mode 100644 dev-workflows-frontend/skills/test-implement/SKILL.md create mode 100644 dev-workflows-frontend/skills/test-implement/references/e2e.md create mode 100644 dev-workflows-frontend/skills/test-implement/references/frontend.md create mode 100644 dev-workflows-frontend/skills/testing-principles/SKILL.md create mode 100644 dev-workflows-frontend/skills/typescript-rules/SKILL.md create mode 100644 dev-workflows/.claude-plugin/plugin.json create mode 100644 dev-workflows/agents/acceptance-test-generator.md create mode 100644 dev-workflows/agents/code-reviewer.md create mode 100644 dev-workflows/agents/code-verifier.md create mode 100644 dev-workflows/agents/codebase-analyzer.md create mode 100644 dev-workflows/agents/design-sync.md create mode 100644 dev-workflows/agents/document-reviewer.md create mode 100644 dev-workflows/agents/integration-test-reviewer.md create mode 100644 dev-workflows/agents/investigator.md create mode 100644 dev-workflows/agents/prd-creator.md create mode 100644 dev-workflows/agents/quality-fixer.md create mode 100644 dev-workflows/agents/requirement-analyzer.md create mode 100644 dev-workflows/agents/rule-advisor.md create mode 100644 dev-workflows/agents/scope-discoverer.md create mode 100644 dev-workflows/agents/security-reviewer.md create mode 100644 dev-workflows/agents/solver.md create mode 100644 dev-workflows/agents/task-decomposer.md create mode 100644 dev-workflows/agents/task-executor.md create mode 100644 dev-workflows/agents/technical-designer.md create mode 100644 dev-workflows/agents/verifier.md create mode 100644 dev-workflows/agents/work-planner.md create mode 100644 dev-workflows/skills/ai-development-guide/SKILL.md create mode 100644 dev-workflows/skills/coding-principles/SKILL.md create mode 100644 dev-workflows/skills/coding-principles/references/security-checks.md create mode 100644 dev-workflows/skills/documentation-criteria/SKILL.md create mode 100644 dev-workflows/skills/documentation-criteria/references/adr-template.md create mode 100644 dev-workflows/skills/documentation-criteria/references/design-template.md create mode 100644 dev-workflows/skills/documentation-criteria/references/plan-template.md create mode 100644 dev-workflows/skills/documentation-criteria/references/prd-template.md create mode 100644 dev-workflows/skills/documentation-criteria/references/task-template.md create mode 100644 dev-workflows/skills/documentation-criteria/references/ui-spec-template.md create mode 100644 dev-workflows/skills/implementation-approach/SKILL.md create mode 100644 dev-workflows/skills/integration-e2e-testing/SKILL.md create mode 100644 dev-workflows/skills/integration-e2e-testing/references/e2e-design.md create mode 100644 dev-workflows/skills/recipe-add-integration-tests/SKILL.md create mode 100644 dev-workflows/skills/recipe-build/SKILL.md create mode 100644 dev-workflows/skills/recipe-design/SKILL.md create mode 100644 dev-workflows/skills/recipe-diagnose/SKILL.md create mode 100644 dev-workflows/skills/recipe-fullstack-build/SKILL.md create mode 100644 dev-workflows/skills/recipe-fullstack-implement/SKILL.md create mode 100644 dev-workflows/skills/recipe-implement/SKILL.md create mode 100644 dev-workflows/skills/recipe-plan/SKILL.md create mode 100644 dev-workflows/skills/recipe-reverse-engineer/SKILL.md create mode 100644 dev-workflows/skills/recipe-review/SKILL.md create mode 100644 dev-workflows/skills/recipe-task/SKILL.md create mode 100644 dev-workflows/skills/recipe-update-doc/SKILL.md create mode 100644 dev-workflows/skills/subagents-orchestration-guide/SKILL.md create mode 100644 dev-workflows/skills/subagents-orchestration-guide/references/monorepo-flow.md create mode 100644 dev-workflows/skills/task-analyzer/SKILL.md create mode 100644 dev-workflows/skills/task-analyzer/references/skills-index.yaml create mode 100644 dev-workflows/skills/testing-principles/SKILL.md diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index 4877640..d7374dc 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -10,9 +10,9 @@ "plugins": [ { "name": "dev-workflows", - "source": "./", - "strict": false, - "version": "0.16.15", + "source": "./dev-workflows", + "strict": true, + "version": "0.16.16", "description": "Skills + Subagents for backend development - Use skills for coding guidance, or run recipe workflows for full orchestrated agentic coding with specialized agents", "author": { "name": "Shinsuke Kagawa", @@ -79,9 +79,9 @@ }, { "name": "dev-workflows-frontend", - "source": "./", - "strict": false, - "version": "0.16.15", + "source": "./dev-workflows-frontend", + "strict": true, + "version": "0.16.16", "description": "Skills + Subagents for React/TypeScript - Use skills for coding guidance, or run recipe workflows for full orchestrated agentic coding with specialized agents", "author": { "name": "Shinsuke Kagawa", @@ -147,9 +147,9 @@ }, { "name": "dev-skills", - "source": "./", - "strict": false, - "version": "0.16.15", + "source": "./dev-skills", + "strict": true, + "version": "0.16.16", "description": "Lightweight skills for users with existing workflows - coding best practices, testing principles, and design guidelines without recipe workflows or agents", "author": { "name": "Shinsuke Kagawa", diff --git a/dev-skills/.claude-plugin/plugin.json b/dev-skills/.claude-plugin/plugin.json new file mode 100644 index 0000000..930301a --- /dev/null +++ b/dev-skills/.claude-plugin/plugin.json @@ -0,0 +1,22 @@ +{ + "name": "dev-skills", + "description": "Lightweight skills for users with existing workflows - coding best practices, testing principles, and design guidelines without recipe workflows or agents", + "version": "0.16.16", + "author": { + "name": "Shinsuke Kagawa", + "url": "https://github.com/shinpr" + }, + "homepage": "https://github.com/shinpr/claude-code-workflows", + "repository": "https://github.com/shinpr/claude-code-workflows.git", + "license": "MIT", + "keywords": [ + "skills", + "best-practices", + "coding-standards", + "testing", + "tdd", + "design-docs", + "development", + "quality" + ] +} diff --git a/dev-skills/skills/ai-development-guide/SKILL.md b/dev-skills/skills/ai-development-guide/SKILL.md new file mode 100644 index 0000000..87ed253 --- /dev/null +++ b/dev-skills/skills/ai-development-guide/SKILL.md @@ -0,0 +1,330 @@ +--- +name: ai-development-guide +description: Technical decision criteria, anti-pattern detection, debugging techniques, and quality check workflow. Use when making technical decisions, detecting code smells, or performing quality assurance. +--- + +# AI Developer Guide - Technical Decision Criteria and Anti-pattern Collection + +## Technical Anti-patterns (Red Flag Patterns) + +Immediately stop and reconsider design when detecting the following patterns: + +### Code Quality Anti-patterns +1. **Writing similar code 3 or more times** - Violates Rule of Three +2. **Multiple responsibilities mixed in a single file** - Violates Single Responsibility Principle (SRP) +3. **Defining same content in multiple files** - Violates DRY principle +4. **Making changes without checking dependencies** - Potential for unexpected impacts +5. **Disabling code with comments** - Should use version control +6. **Error suppression** - Hiding problems creates technical debt +7. **Bypassing safety mechanisms (type systems, validation, contracts)** - Circumventing language's correctness guarantees + +### Design Anti-patterns +- **"Make it work for now" thinking** - Accumulation of technical debt +- **Patchwork implementation** - Unplanned additions to existing code +- **Optimistic implementation of uncertain technology** - Designing unknown elements assuming "it'll probably work" +- **Symptomatic fixes** - Surface-level fixes that don't solve root causes +- **Unplanned large-scale changes** - Lack of incremental approach + +## Fail-Fast Fallback Design Principles + +### Core Principle +Make all errors visible and traceable with full context. Prioritize primary code reliability over fallback implementations. Excessive fallback mechanisms mask errors and make debugging difficult. + +### Implementation Guidelines + +#### Default Approach +- **Propagate all errors explicitly** unless a Design Doc specifies a fallback +- **Make failures explicit**: Errors should be visible and traceable +- **Preserve error context**: Include original error information when re-throwing + +#### When Fallbacks Are Acceptable +- **Only with explicit Design Doc approval**: Document why fallback is necessary +- **Business-critical continuity**: When partial functionality is better than none +- **Graceful degradation paths**: Clearly defined degraded service levels + +#### Layer Responsibilities +- **Infrastructure Layer**: + - Always throw errors upward + - No business logic decisions + - Provide detailed error context + +- **Application Layer**: + - Make business-driven error handling decisions + - Implement fallbacks only when specified in requirements + - Log all fallback activations for monitoring + +### Error Masking Detection + +**Review Triggers** (require design review): +- Writing 3rd error handler in the same feature +- Multiple error handling blocks in single function/method +- Nested error handling structures +- Error handlers that return default values without logging + +**Before Implementing Any Fallback**: +1. Verify Design Doc explicitly defines this fallback +2. Document the business justification +3. Ensure error is logged with full context +4. Add monitoring/alerting for fallback activation + +### Implementation Pattern + +``` +AVOID: Silent fallback that hides errors + : + return DEFAULT_VALUE // Error hidden, debugging impossible + +PREFERRED: Explicit failure with context + : + log_error('Operation failed', context, error) + // Re-throw exception, return Error, return error tuple +``` + +**Adaptation**: Use language-appropriate error handling (exceptions, Result types, error tuples, etc.) + +## Rule of Three - Criteria for Code Duplication + +How to handle duplicate code based on Martin Fowler's "Refactoring": + +| Duplication Count | Action | Reason | +|-------------------|--------|--------| +| 1st time | Inline implementation | Cannot predict future changes | +| 2nd time | Consider future consolidation | Pattern beginning to emerge | +| 3rd time | Implement commonalization | Pattern established | + +### Criteria for Commonalization + +**Cases for Commonalization** +- Business logic duplication +- Complex processing algorithms +- Areas likely requiring bulk changes +- Validation rules + +**Cases to Avoid Commonalization** +- Accidental matches (coincidentally same code) +- Possibility of evolving in different directions +- Significant readability decrease from commonalization +- Simple helpers in test code + +### Implementation Example + +``` +// Immediate commonalization on 1st duplication +validateUserEmail(email) { /* ... */ } +validateContactEmail(email) { /* ... */ } + +// Commonalize on 3rd occurrence with context parameter +validateEmail(email, context) { /* ... */ } +// context: 'user' | 'contact' | 'admin' +``` + +**Adaptation**: Use appropriate abstraction for your codebase (functions, classes, modules, configuration) + +## Common Failure Patterns and Avoidance Methods + +### Pattern 1: Error Fix Chain +**Symptom**: Fixing one error causes new errors +**Cause**: Surface-level fixes without understanding root cause +**Avoidance**: Identify root cause with 5 Whys before fixing + +### Pattern 2: Circumventing Correctness Guarantees +**Symptom**: Bypassing safety mechanisms (type systems, validation, contracts) +**Cause**: Impulse to avoid correctness errors +**Avoidance**: Use language-appropriate safety mechanisms (static checking, runtime validation, contracts, assertions) + +### Pattern 3: Implementation Without Sufficient Testing +**Symptom**: Many bugs after implementation +**Cause**: Ignoring Red-Green-Refactor process +**Avoidance**: Always start with failing tests + +### Pattern 4: Ignoring Technical Uncertainty +**Symptom**: Frequent unexpected errors when introducing new technology +**Cause**: Assuming "it should work according to official documentation" without prior investigation +**Avoidance**: +- Record certainty evaluation at the beginning of task files + ``` + Certainty: low (Reason: no working examples found for this integration) + Exploratory implementation: true + Fallback: use established alternative approach + ``` +- For low certainty cases, create minimal verification code first + +### Pattern 5: Insufficient Existing Code Investigation +**Symptom**: Duplicate implementations, architecture inconsistency, integration failures, adopting outdated patterns +**Cause**: Insufficient understanding of existing code before implementation; referencing only nearby files without verifying representativeness +**Avoidance Methods**: +- Before implementation, always search for similar functionality (using domain, responsibility, configuration patterns as keywords) +- Similar functionality found → Use that implementation (do not create new implementation) +- Similar functionality is technical debt → Create ADR improvement proposal before implementation +- No similar functionality exists → Implement new functionality following existing design philosophy +- Record all decisions and rationale in "Existing Codebase Analysis" section of Design Doc +- **Reference representativeness check**: When adopting a pattern or dependency from nearby code, verify it is representative across the repository before adopting — nearby files alone are an insufficient basis + +## Debugging Techniques + +### 1. Error Analysis Procedure +1. Read error message (first line) accurately +2. Focus on first and last of stack trace +3. Identify first line where your code appears + +### 2. 5 Whys - Root Cause Analysis +``` +Example: +Symptom: Build error +Why1: Contract definitions don't match → Why2: Interface was updated +Why3: Dependency change → Why4: Package update impact +Why5: Major version upgrade with breaking changes +Root cause: Inappropriate version specification in dependency manifest +``` + +### 3. Minimal Reproduction Code +To isolate problems, attempt reproduction with minimal code: +- Remove unrelated parts +- Replace external dependencies with mocks +- Create minimal configuration that reproduces problem + +### 4. Debug Log Output +``` +Pattern: Structured logging with context +{ + context: 'operation-name', + input: { relevant, input, data }, + state: currentState, + timestamp: current_time_ISO8601 +} + +Key elements: +- Operation context (what is being executed) +- Input data (what was received) +- Current state (relevant state variables) +- Timestamp (for correlation) +``` + +## Quality Assurance Mechanism Awareness + +Before executing quality checks, identify what quality mechanisms exist for the change area: +- Primary detection: inspect the change area's file types, project manifest, and configuration to identify applicable quality tools + - Check CI pipeline definitions for checks that cover the affected paths + - Check for domain-specific linter or validator configurations (e.g., schema validators, API spec validators, configuration file linters) + - Check for domain-specific constraints in project configuration (naming rules, length limits, format requirements) +- Supplementary hint: IF task file specifies Quality Assurance Mechanisms → use them as additional hints for which domain-specific checks to look for +- Include discovered domain-specific checks alongside standard quality phases below + +## Quality Check Workflow + +Universal quality assurance phases applicable to all languages: + +### Phase 1: Static Analysis +1. **Code Style Checking**: Verify adherence to style guidelines +2. **Code Formatting**: Ensure consistent formatting +3. **Unused Code Detection**: Identify dead code and unused imports/variables +4. **Static Type Checking**: Verify type correctness (for statically typed languages) +5. **Static Analysis**: Detect potential bugs, security issues, code smells + +### Phase 2: Build Verification +1. **Compilation/Build**: Verify code builds successfully (for compiled languages) +2. **Dependency Resolution**: Ensure all dependencies are available and compatible +3. **Resource Validation**: Check configuration files, assets are valid + +### Phase 3: Testing +1. **Unit Tests**: Run all unit tests +2. **Integration Tests**: Run integration tests +3. **Test Coverage**: Measure and verify coverage meets standards +4. **E2E Tests**: Run end-to-end tests + +### Phase 4: Final Quality Gate +All checks must pass before proceeding: +- Zero static analysis errors +- Build succeeds +- All tests pass +- Coverage meets project-configured threshold + +### Quality Check Pattern (Language-Agnostic) +``` +Workflow: +1. Format check → 2. Lint/Style → 3. Static analysis → +4. Build/Compile → 5. Unit tests → 6. Coverage check → +7. Integration tests → 8. Final gate + +Auto-fix capabilities (when available): +- Format auto-fix +- Lint auto-fix +- Dependency/import organization +- Simple code smell corrections +``` + +## Situations Requiring Technical Decisions + +### Timing of Abstraction +- Extract patterns after writing concrete implementation 3 times +- Be conscious of YAGNI, implement only currently needed features +- Prioritize current simplicity over future extensibility + +### Performance vs Readability +- Prioritize readability unless profiling identifies a measurable bottleneck (e.g., response time exceeding SLA, memory exceeding allocation) +- Measure before optimizing +- Document reason with comments when optimizing + +### Granularity of Contracts and Interfaces +- Overly detailed contracts reduce maintainability +- Design interfaces where each method maps to a single domain operation and parameter types use domain vocabulary +- Use abstraction mechanisms to reduce duplication + +## Implementation Completeness Assurance + +### Impact Analysis: Mandatory 3-Stage Process + +Complete these stages sequentially before any implementation: + +**1. Discovery** - Identify all affected code: +- Implementation references (imports, calls, instantiations) +- Interface dependencies (contracts, types, data structures) +- Test coverage +- Configuration (build configs, env settings, feature flags) +- Documentation (comments, docs, diagrams) + +**2. Understanding** - Analyze each discovered location: +- Role and purpose in the system +- Dependency direction (consumer or provider) +- Data flow (origin → transformations → destination) +- Coupling strength + +**3. Identification** - Produce structured report: +``` +## Impact Analysis +### Direct Impact +- [Unit]: [Reason and modification needed] + +### Indirect Impact +- [System]: [Integration path → reason] + +### Data Flow +[Source] → [Transformation] → [Consumer] + +### Risk Assessment +- High: [Complex dependencies, fragile areas] +- Medium: [Moderate coupling, test gaps] +- Low: [Isolated, well-tested areas] + +### Implementation Order +1. [Start with lowest risk or deepest dependency] +2. [...] +``` + +**Critical**: Do not implement until all 3 stages are documented + +### Unused Code Deletion + +When unused code is detected: +- Will it be used in this work? Yes → Implement now | No → Delete now (Git preserves) +- Applies to: Code, tests, docs, configs, assets + +### Existing Code Modification + +``` +In use? No → Delete + Yes → Working? No → Delete + Reimplement + Yes → Fix/Extend +``` + +**Principle**: Prefer clean implementation over patching broken code \ No newline at end of file diff --git a/dev-skills/skills/coding-principles/SKILL.md b/dev-skills/skills/coding-principles/SKILL.md new file mode 100644 index 0000000..001ccdb --- /dev/null +++ b/dev-skills/skills/coding-principles/SKILL.md @@ -0,0 +1,224 @@ +--- +name: coding-principles +description: Language-agnostic coding principles for maintainability, readability, and quality. Use when implementing features, refactoring code, or reviewing code quality. +--- + +# Language-Agnostic Coding Principles + +## Core Philosophy + +1. **Maintainability over Speed**: Prioritize long-term code health over initial development velocity +2. **Simplicity First**: Choose the simplest solution that meets requirements (YAGNI principle) +3. **Explicit over Implicit**: Make intentions clear through code structure and naming +4. **Delete over Comment**: Remove unused code instead of commenting it out + +## Code Quality + +### Continuous Improvement +- Refactor related code within each change set — address style, naming, or structure issues in the files being modified +- Improve code structure incrementally +- Keep the codebase lean and focused +- Delete unused code immediately + +### Readability +- Use meaningful, descriptive names drawn from the problem domain +- Use full words in names; abbreviations are acceptable only when widely recognized in the domain +- Use descriptive names; single-letter names are acceptable only for loop counters or well-known conventions (i, j, x, y) +- Extract magic numbers and strings into named constants +- Keep code self-documenting where possible + +## Function Design + +### Parameter Management +- **Recommended**: 0-2 parameters per function +- **For 3+ parameters**: Use objects, structs, or dictionaries to group related parameters +- **Example** (conceptual): + ``` + // Instead of: createUser(name, email, age, city, country) + // Use: createUser(userData) + ``` + +### Single Responsibility +- Each function should do one thing well +- Keep functions small and focused (typically < 50 lines) +- Extract complex logic into separate, well-named functions +- Functions should have a single level of abstraction + +### Function Organization +- Pure functions when possible (no side effects) +- Separate data transformation from side effects +- Use early returns to reduce nesting +- Keep nesting to a maximum of 3 levels; use early returns or extracted functions to flatten deeper nesting + +## Error Handling + +### Error Management Principles +- **Always handle errors**: Log with context or propagate explicitly +- **Log appropriately**: Include context for debugging +- **Protect sensitive data**: Mask or exclude passwords, tokens, PII from logs +- **Fail fast**: Detect and report errors as early as possible + +### Error Propagation +- Use language-appropriate error handling mechanisms +- Propagate errors to appropriate handling levels +- Provide meaningful error messages +- Include error context when re-throwing + +## Dependency Management + +### Loose Coupling via Parameterized Dependencies +- Inject external dependencies as parameters (constructor injection for classes, function parameters for procedural/functional code) +- Depend on abstractions, not concrete implementations +- Minimize inter-module dependencies +- Facilitate testing through mockable dependencies + +## Reference Representativeness + +### Verifying References Before Adoption +When adopting patterns, APIs, or dependencies from existing code: +- **IF** referencing only 2-3 nearby files → **THEN** confirm the pattern is representative by checking usage across the repository before adopting +- **IF** multiple approaches coexist in the repository → **THEN** identify the majority pattern and make a deliberate choice — selecting whichever is nearest is insufficient +- **IF** adopting an external dependency (library, plugin, SDK) → **THEN** verify repository-wide usage distribution for the same dependency; if the appropriate version cannot be determined from repository state alone, escalate +- **IF** following an existing pattern → **THEN** state the reason for following it when an alternative exists (e.g., consistency with surrounding code, avoiding breaking changes, pending coordinated update) + +### Principle +Nearby code is a starting point for investigation, not a sufficient basis for adoption. Verify that what you reference is representative of the repository's conventions and current best practices before using it as a model. + +## Performance Considerations + +### Optimization Approach +- **Measure first**: Profile before optimizing +- **Focus on algorithms**: Algorithmic complexity > micro-optimizations +- **Use appropriate data structures**: Choose based on access patterns +- **Resource management**: Handle memory, connections, and files properly + +### When to Optimize +- After identifying actual bottlenecks through profiling +- When performance issues are measurable +- Optimize only after measurable bottlenecks are identified, not during initial development + +## Code Organization + +### Structural Principles +- **Group related functionality**: Keep related code together +- **Separate concerns**: Domain logic, data access, presentation +- **Consistent naming**: Follow project conventions +- **Module cohesion**: High cohesion within modules, low coupling between + +### File Organization +- One primary responsibility per file +- Logical grouping of related functions/classes +- Clear folder structure reflecting architecture +- Avoid "god files" (files > 500 lines) + +## Commenting Principles + +### When to Comment +- **Document "what"**: Describe what the code does +- **Explain "why"**: Clarify reasoning behind decisions +- **Note limitations**: Document known constraints or edge cases +- **API documentation**: Public interfaces need clear documentation + +### Comment Scope +- Comment the "what" and "why"; the code itself communicates the "how" +- Record historical context in version control commit messages, not in comments +- Delete commented-out code (retrieve from git history when needed) +- Write comments that add information beyond what the code states + +### Comment Quality +- Write comments that remain accurate regardless of future code changes; avoid references to dates, versions, or temporary state +- Update comments when changing code +- Use proper grammar and formatting +- Write for future maintainers + +## Refactoring Approach + +### Safe Refactoring +- **Small steps**: Make one change at a time +- **Maintain working state**: Keep tests passing +- **Verify behavior**: Run tests after each change +- **Incremental improvement**: Don't aim for perfection immediately + +### Refactoring Triggers +- Code duplication (DRY principle) +- Functions > 50 lines +- Complex conditional logic +- Unclear naming or structure + +## Testing Considerations + +### Testability +- Write testable code from the start +- Avoid hidden dependencies +- Keep side effects explicit +- Design for parameterized dependencies + +### Test-Driven Development +- Write tests before implementation when appropriate +- Keep tests simple and focused +- Test behavior, not implementation +- Maintain test quality equal to production code + +## Security Principles + +### Secure Defaults +- Store credentials and secrets through environment variables or dedicated secret managers +- Use parameterized queries (prepared statements) for all database access +- Use established cryptographic libraries provided by the language or framework +- Generate security-critical values (tokens, IDs, nonces) with cryptographically secure random generators +- Encrypt sensitive data at rest and in transit using standard protocols + +### Input and Output Boundaries +- Validate all external input at system entry points for expected format, type, and length +- Encode output appropriately for its rendering context (HTML, SQL, shell, URL) +- Return only information necessary for the caller in error responses; log detailed diagnostics server-side + +### Access Control +- Apply authentication to all entry points that handle user data or trigger state changes +- Verify authorization for each resource access, not only at the entry point +- Grant only the permissions required for the operation (files, database connections, API scopes) + +### Knowledge Cutoff Supplement (2026-03) +- OWASP Top 10:2025 shifted from symptoms to root causes; added "Software Supply Chain Failures" (A03) and "Mishandling of Exceptional Conditions" (A10) +- Recent research indicates AI-generated code shows elevated rates of access control gaps — treat authentication and authorization as high-priority review targets +- OpenSSF published "Security-Focused Guide for AI Code Assistant Instructions" — recommends language-specific, actionable constraints over generic advice +- For detailed detection patterns, see `references/security-checks.md` + +## Documentation + +### Code Documentation +- Document public APIs and interfaces +- Include usage examples for complex functionality +- Maintain README files for modules +- Update documentation in the same commit that changes the corresponding behavior + +### Architecture Documentation +- Document high-level design decisions +- Explain integration points +- Clarify data flows and boundaries +- Record trade-offs and alternatives considered + +## Version Control Practices + +### Commit Practices +- Make atomic, focused commits +- Write clear, descriptive commit messages +- Commit working code (passes tests) +- Commit only production-ready code; store secrets in environment variables or secret managers + +### Code Review Readiness +- Self-review before requesting review +- Keep changes focused and reviewable +- Provide context in pull request descriptions +- Respond to feedback constructively + +## Language-Specific Adaptations + +While these principles are language-agnostic, adapt them to your specific programming language: + +- **Static typing**: Use strong types when available +- **Dynamic typing**: Add runtime validation +- **OOP languages**: Apply SOLID principles +- **Functional languages**: Prefer pure functions and immutability +- **Concurrency**: Follow language-specific patterns for thread safety + diff --git a/dev-skills/skills/coding-principles/references/security-checks.md b/dev-skills/skills/coding-principles/references/security-checks.md new file mode 100644 index 0000000..66b71aa --- /dev/null +++ b/dev-skills/skills/coding-principles/references/security-checks.md @@ -0,0 +1,64 @@ +# Security Check Patterns + +Last reviewed: 2026-03-21 + +## Stable Patterns + +These patterns have low false-positive rates and are detectable through grep or static analysis. + +### Hardcoded Secrets +- Credentials, API keys, or tokens assigned as string literals in source code +- Connection strings containing embedded passwords +- Private keys or certificates stored in source files +- Detection approach: search for high-entropy strings near assignment operators, common key names (`password`, `secret`, `api_key`, `token`, `private_key`), and platform-specific token formats + +### SQL String Concatenation +- SQL statements constructed through string concatenation or interpolation with variables +- Detection approach: search for SQL keywords (`SELECT`, `INSERT`, `UPDATE`, `DELETE`) combined with string concatenation operators or string interpolation containing variable references + +### Dynamic Code Execution +- Use of dynamic code execution functions (e.g., `eval`, `exec`) with non-static input +- Dynamic module loading with variable paths +- Detection approach: search for dynamic code execution or module loading calls where the argument is not a static literal + +### Insecure Deserialization +- Deserialization of untrusted input using unsafe loaders or formats that allow arbitrary object construction (e.g., native serialization, YAML without safe loader) +- Parsed data passed directly into dynamic code execution +- Detection approach: search for deserialization calls that accept external input without safe loader or type-restricted configuration + +### Path Traversal +- File system paths constructed from user-supplied input without sanitization +- Patterns where request parameters flow into file read/write operations +- Detection approach: search for file operations where path arguments include request parameters, query strings, or user input variables + +### CORS Wildcard +- `Access-Control-Allow-Origin` set to `*` in production configuration +- CORS middleware configured with wildcard origin +- Detection approach: search for CORS configuration with wildcard values + +### Non-TLS URLs +- HTTP (non-TLS) URLs embedded in source code for production endpoints (outside configuration files, tests, and documentation) +- Detection approach: search for `http://` patterns in source files, excluding localhost, configuration files, tests, and documentation + +## Trend-Sensitive Patterns + +Updated: 2026-03-21 +Sources: OWASP Top 10:2025, DryRun Agentic Coding Security Report (2026-03) + +### Access Control Gaps in AI-Generated Code +- Endpoints or route handlers defined without authentication middleware +- Resource access operations (read, update, delete) without authorization verification +- Administrative or destructive operations accessible without elevated permissions +- AI-generated code frequently omits authentication middleware and authorization checks — flag every route handler and resource access operation for explicit verification during review +- Detection approach: search for route/endpoint handler definitions that lack authentication middleware, and resource operations (read, update, delete) without authorization checks in the call chain + +### Mishandling of Exceptional Conditions (OWASP A10:2025) +- Error handlers that expose internal system details (stack traces, database errors, file paths) in responses +- Error handlers that grant access, skip authentication, or bypass authorization when an exception occurs (fail-open behavior) +- Missing error handling on security-critical operations (authentication, authorization, cryptographic operations) +- Detection approach: search for catch/error handler blocks that return stack traces, database error messages, or file paths in responses; search for catch blocks that call next() or return success without re-validating security state + +### Software Supply Chain Patterns (OWASP A03:2025) +- Dependencies imported without version pinning +- Use of deprecated or unmaintained packages for security-critical functions +- Detection approach: check dependency manifests for unpinned versions and known deprecated packages diff --git a/dev-skills/skills/documentation-criteria/SKILL.md b/dev-skills/skills/documentation-criteria/SKILL.md new file mode 100644 index 0000000..9404392 --- /dev/null +++ b/dev-skills/skills/documentation-criteria/SKILL.md @@ -0,0 +1,236 @@ +--- +name: documentation-criteria +description: Documentation creation criteria including PRD, ADR, Design Doc, and Work Plan requirements with templates. Use when creating or reviewing technical documents, or determining which documents are required. +--- + +# Documentation Creation Criteria + +## Templates + +- **[prd-template.md](references/prd-template.md)** - Product Requirements Document template +- **[adr-template.md](references/adr-template.md)** - Architecture Decision Record template +- **[ui-spec-template.md](references/ui-spec-template.md)** - UI Specification template (frontend/fullstack features) +- **[design-template.md](references/design-template.md)** - Technical Design Document template +- **[plan-template.md](references/plan-template.md)** - Work Plan template +- **[task-template.md](references/task-template.md)** - Task file template for implementation tasks + +## Creation Decision Matrix + +| Condition | Required Documents | Creation Order | +|-----------|-------------------|----------------| +| New Feature Addition (backend) | PRD → [ADR] → Design Doc → Work Plan | After PRD approval | +| New Feature Addition (frontend/fullstack) | PRD → **UI Spec** → [ADR] → Design Doc → Work Plan | UI Spec before Design Doc | +| ADR Conditions Met (see below) | ADR → Design Doc → Work Plan | Start immediately | +| 6+ Files | ADR → Design Doc → Work Plan (Required) | Start immediately | +| 3-5 Files | Design Doc → Work Plan (Recommended) | Start immediately | +| 1-2 Files | None | Direct implementation | + +## ADR Creation Conditions (Required if Any Apply) + +### 1. Contract System Changes +- **Adding nested contracts with 3+ levels**: `Contract A { Contract B { Contract C { field: T } } }` + - Rationale: Deep nesting has high complexity and wide impact scope +- **Changing/deleting contracts used in 3+ locations** + - Rationale: Multiple location impacts require careful consideration +- **Contract responsibility changes** (e.g., DTO→Entity, Request→Domain) + - Rationale: Conceptual model changes affect design philosophy + +### 2. Data Flow Changes +- **Storage location changes** (DB→File, Memory→Cache) +- **Processing order changes with 3+ steps** + - Example: "Input→Validation→Save" to "Input→Save→Async Validation" +- **Data passing method changes** (parameter passing→shared state, direct reference→event-based communication) + +### 3. Architecture Changes +- Layer addition, responsibility changes, component relocation + +### 4. External Dependency Changes +- Library/framework/external API introduction or replacement + +### 5. Complex Implementation Logic (Regardless of Scale) +- Managing 3+ states +- Coordinating 5+ asynchronous processes + +## Detailed Document Definitions + +### PRD (Product Requirements Document) + +**Purpose**: Define business requirements and user value + +**Includes**: +- Business requirements and user value +- Success metrics and KPIs (each metric specifies a numeric target and measurement method) +- User stories and use cases +- MoSCoW prioritization (Must/Should/Could/Won't) +- Acceptance criteria with sequential IDs (AC-001, AC-002, ...) for downstream traceability +- MVP and Future phase separation +- User journey diagram (required) +- Scope boundary diagram (required) + +**Scope**: Business requirements, user value, success metrics, user stories, and prioritization only. Implementation details belong in Design Doc, technical selection rationale in ADR, phases and task breakdown in Work Plan. + +### ADR (Architecture Decision Record) + +**Purpose**: Record technical decision rationale and background + +**Includes**: +- Decision (what was selected) +- Rationale (why that selection was made) +- Option comparison (minimum 3 options) and trade-offs +- Architecture impact +- Principled implementation guidelines (e.g., "Use dependency injection") + +**Scope**: Decision, rationale, option comparison, architecture impact, and principled guidelines only. Implementation procedures and code examples belong in Design Doc, schedule and resource assignments in Work Plan. + +### UI Specification + +**Purpose**: Define UI structure, screen transitions, component decomposition, and interaction design for frontend features + +**Includes**: +- Screen list and transition conditions +- Component decomposition with state x display matrix (default/loading/empty/error/partial) +- Interaction definitions linked to PRD acceptance criteria (EARS format) +- Prototype management (code-based prototypes as attachments, not source of truth) +- AC traceability from PRD to screens/components +- Existing component reuse map and design tokens +- Visual acceptance criteria (golden states, layout constraints) +- Accessibility requirements (keyboard, screen reader, contrast) + +**Scope**: Screen structure, transitions, component decomposition, interaction design, and visual acceptance criteria only. Technical implementation and API contracts belong in Design Doc, test implementation in acceptance-test-generator skeletons, schedule in Work Plan. + +**Required Structural Elements**: +- At least one component with state x display matrix and interaction table +- AC traceability table mapping PRD ACs to screens/states +- Screen list with transition conditions +- Existing component reuse map (reuse/extend/new decisions) + +**Prototype Code Handling**: +- Prototype code provided by user is placed in `docs/ui-spec/assets/{feature-name}/` +- Prototype is an attachment to UI Spec, never the source of truth +- UI Spec + Design Doc are the canonical specifications + +### Design Document + +**Purpose**: Define technical implementation methods in detail + +**Includes**: +- **Existing codebase analysis** (required) + - Implementation path mapping (both existing and new) + - Integration point clarification (connection points with existing code even for new implementations) +- Technical implementation approach (vertical/horizontal/hybrid) +- **Technical dependencies and implementation constraints** (required implementation order) +- Interface and contract definitions +- Data flow and component design +- **Acceptance criteria (each criterion specifies a verifiable condition with pass/fail threshold)** +- Change impact map (clearly specify direct impact/indirect impact/no ripple effect) +- Complete enumeration of integration points +- Data contract clarification +- **Agreement checklist** (agreements with stakeholders) +- **Code inspection evidence** (inspected files/functions during investigation) +- **Field propagation map** (when fields cross component boundaries) +- **Data representation decision** (when introducing new structures) +- **Applicable standards** (explicit/implicit classification) +- **Prerequisite ADRs** (including common ADRs) +- **Verification Strategy** (required) + - Correctness proof method (what "correct" means for this change, how it's verified, when) + - Early verification point (first target to prove the approach works, success criteria, failure response) + +**Required Structural Elements**: +```yaml +Change Impact Map: + Change Target: [Component/Feature] + Direct Impact: [Files/Functions] + Indirect Impact: [Data format/Processing time] + No Ripple Effect: [Unaffected features] + +Interface Change Matrix: + Existing: [Function/method/operation name] + New: [Function/method/operation name] + Conversion Required: [Yes/No] + Compatibility Method: [Approach] +``` + +**Scope**: Technical implementation methods, interfaces, data flow, acceptance criteria, and verification strategy only. Technology selection rationale belongs in ADR, schedule and assignments in Work Plan. + +### Work Plan + +**Purpose**: Implementation task management and progress tracking + +**Includes**: +- Task breakdown and dependencies (maximum 2 levels) +- Schedule and duration estimates +- **Include test skeleton file paths from acceptance-test-generator** (integration and E2E) +- **Verification Strategy summary** (extracted from Design Doc) +- **Final Quality Assurance Phase (required)** +- Progress records (checkbox format) + +**Scope**: Task breakdown, dependencies, schedule, verification strategy summary, and progress tracking only. Technical rationale belongs in ADR, design details in Design Doc. + +**Phase Division Criteria** (adapt to implementation approach from Design Doc): + +**When Vertical Slice selected**: +- Each phase = one value unit (feature, component, or migration target) +- Each phase includes its own implementation + verification per Verification Strategy + +**When Horizontal Slice selected**: +1. **Phase 1: Foundation Implementation** - Contract definitions, interfaces/signatures, test preparation +2. **Phase 2: Core Feature Implementation** - Business logic, unit tests +3. **Phase 3: Integration Implementation** - External connections, presentation layer + +**When Hybrid selected**: +- Combine vertical and horizontal as defined in Design Doc implementation approach + +**All approaches**: Final phase is always Quality Assurance (acceptance criteria achievement, all tests passing, quality checks). Each phase's verification method follows Verification Strategy from Design Doc. + +**Three Elements of Task Completion Definition**: +1. **Implementation Complete**: Code is functional +2. **Quality Complete**: Tests, static checks, linting pass +3. **Integration Complete**: Verified connection with other components + +## Creation Process + +1. **Problem Analysis**: Change scale assessment, ADR condition check + - Identify explicit and implicit project standards before investigation +2. **ADR Option Consideration** (ADR only): Compare 3+ options, specify trade-offs +3. **Creation**: Use templates, include measurable conditions +4. **Approval**: "Accepted" after review enables implementation + +## Storage Locations + +| Document | Path | Naming Convention | Template | +|----------|------|------------------|----------| +| PRD | `docs/prd/` | `[feature-name]-prd.md` | [prd-template.md](references/prd-template.md) | +| ADR | `docs/adr/` | `ADR-[4-digits]-[title].md` | [adr-template.md](references/adr-template.md) | +| UI Spec | `docs/ui-spec/` | `[feature-name]-ui-spec.md` | [ui-spec-template.md](references/ui-spec-template.md) | +| UI Spec Assets | `docs/ui-spec/assets/{feature-name}/` | Prototype code files | - | +| Design Doc | `docs/design/` | `[feature-name]-design.md` | [design-template.md](references/design-template.md) | +| Work Plan | `docs/plans/` | `YYYYMMDD-{type}-{description}.md` | [plan-template.md](references/plan-template.md) | +| Task File | `docs/plans/tasks/` | `{plan-name}-task-{number}.md` | [task-template.md](references/task-template.md) | + +*Note: Work plans are excluded by `.gitignore` + +## ADR Status +`Proposed` → `Accepted` → `Deprecated`/`Superseded`/`Rejected` + +## AI Automation Rules +- 5+ files: Suggest ADR creation +- Contract/data flow change detected: ADR mandatory +- Check existing ADRs before implementation + +## Diagram Requirements + +Required diagrams for each document (using mermaid notation): + +| Document | Required Diagrams | Purpose | +|----------|------------------|---------| +| PRD | User journey diagram, Scope boundary diagram | Clarify user experience and scope | +| ADR | Option comparison diagram (when needed) | Visualize trade-offs | +| UI Spec | Screen transition diagram, Component tree diagram | Clarify screen flow and component structure | +| Design Doc | Architecture diagram, Data flow diagram | Understand technical structure | +| Work Plan | Phase structure diagram, Task dependency diagram | Clarify implementation order | + +## Common ADR Relationships +1. **At creation**: Identify common technical areas (logging, error handling, async processing, etc.), reference existing common ADRs +2. **When missing**: Consider creating necessary common ADRs +3. **Design Doc**: Specify common ADRs in "Prerequisite ADRs" section +4. **Compliance check**: Verify design aligns with common ADR decisions \ No newline at end of file diff --git a/dev-skills/skills/documentation-criteria/references/adr-template.md b/dev-skills/skills/documentation-criteria/references/adr-template.md new file mode 100644 index 0000000..2f7f490 --- /dev/null +++ b/dev-skills/skills/documentation-criteria/references/adr-template.md @@ -0,0 +1,68 @@ +# [ADR Number] [Title] + +## Status + +[Proposed | Accepted | Deprecated | Superseded | Rejected] + +## Context + +[Describe the background and reasons why this decision is needed. Include the essence of the problem, current challenges, and constraints] + +## Decision + +[Describe the actual decision made. Aim for specific and clear descriptions] + +### Decision Details + +| Item | Content | +|------|---------| +| **Decision** | [The decision in one sentence] | +| **Why now** | [Why this needs to happen now (timing rationale)] | +| **Why this** | [Why this option over alternatives (1-3 lines)] | +| **Known unknowns** | [At least one uncertainty at this point] | +| **Kill criteria** | [One signal that should trigger reversal of this decision] | + +## Rationale + +[Explain why this decision was made and why it is the best option compared to alternatives] + +### Options Considered + +1. **Option 1**: [Description] + - Pros: [List advantages] + - Cons: [List disadvantages] + +2. **Option 2**: [Description] + - Pros: [List advantages] + - Cons: [List disadvantages] + +3. **Option 3 (Selected)**: [Description] + - Pros: [List advantages] + - Cons: [List disadvantages] + +## Consequences + +### Positive Consequences + +- [List positive impacts on the project or system] + +### Negative Consequences + +- [List negative impacts or trade-offs that need to be accepted] + +### Neutral Consequences + +- [List changes that are neither good nor bad] + +## Architecture Impact + +[Describe how this decision affects existing architecture: (1) components that change, (2) new dependencies introduced, (3) architectural constraints added or removed] + +## Implementation Guidance + +[Principled direction only. Implementation procedures go to Design Doc] +Example: "Use dependency injection" ✓, "Implement in Phase 1" ✗ + +## Related Information + +- [Links to related ADRs, documents, issues, PRs, etc.] diff --git a/dev-skills/skills/documentation-criteria/references/design-template.md b/dev-skills/skills/documentation-criteria/references/design-template.md new file mode 100644 index 0000000..28e9c5b --- /dev/null +++ b/dev-skills/skills/documentation-criteria/references/design-template.md @@ -0,0 +1,388 @@ +# [Feature Name] Design Document + +## Overview + +[Explain the purpose and overview of this feature in 2-3 sentences] + +### Referenced UI Spec (when feature includes frontend) +- UI Spec path: [docs/ui-spec/xxx-ui-spec.md] +- Component structure and state design are inherited from UI Spec + +## Design Summary (Meta) + +```yaml +design_type: "new_feature|extension|refactoring" +risk_level: "low|medium|high" +complexity_level: "low|medium|high" +complexity_rationale: "[Required if medium/high: (1) which requirements/ACs necessitate this complexity, (2) which constraints/risks it addresses]" +main_constraints: + - "[constraint 1]" + - "[constraint 2]" +biggest_risks: + - "[risk 1]" + - "[risk 2]" +unknowns: + - "[uncertainty 1]" + - "[uncertainty 2]" +``` + +## Background and Context + +### Prerequisite ADRs + +- [ADR File Name]: [Related decision items] +- Reference common technical ADRs when applicable + +### Agreement Checklist + +#### Scope +- [ ] [Features/components to change] +- [ ] [Features to add] + +#### Non-Scope (Explicitly not changing) +- [ ] [Features/components not to change] +- [ ] [Existing logic to preserve] + +#### Constraints +- [ ] Parallel operation: [Yes/No] +- [ ] Backward compatibility: [Required/Not required] +- [ ] Performance measurement: [Required/Not required] + +#### Applicable Standards +- [ ] [Standard/convention] `[explicit]` - Source: [config / rule file / documentation path] +- [ ] [Observed pattern] `[implicit]` - Evidence: [file paths] - Confirmed: [Yes/No] + +#### Quality Assurance Mechanisms +How quality is enforced in the change area. Each item is either adopted (will be enforced during implementation) or noted (observed but not adopted, with reason). + +- [ ] [Tool/check name] — Enforces: [what] — Config: [path] — Covers: [file paths/patterns, or "project-wide"] — Status: `adopted` / `noted (reason)` +- [ ] [Domain-specific constraint] — Enforces: [what] — Source: [path] — Covers: [file paths/patterns, or "project-wide"] — Status: `adopted` / `noted (reason)` + +### Problem to Solve + +[Specific problems or challenges this feature aims to address] + +### Current Challenges + +[Current system issues or limitations] + +### Requirements + +#### Functional Requirements + +- [List mandatory functional requirements] + +#### Non-Functional Requirements + +- **Performance**: [Response time, throughput requirements] +- **Scalability**: [Requirements for handling increased load] +- **Reliability**: [Error rate, availability requirements] +- **Maintainability**: [Code readability and changeability] + +## Acceptance Criteria (AC) - EARS Format + +Each AC is written in EARS (Easy Approach to Requirements Syntax) format. +Keywords determine test type and reduce ambiguity. + +**EARS Keywords**: +| Keyword | Usage | Test Type | +|---------|-------|-----------| +| **When** | Event-triggered behavior | Event-driven test | +| **While** | State-dependent behavior | State condition test | +| **If-then** | Conditional behavior | Branch coverage test | +| (none) | Ubiquitous behavior | Basic functionality test | + +**Format**: `[Keyword] , the system shall ` + +### [Functional Requirement 1] + +- [ ] **When** user clicks login button with valid credentials, the system shall authenticate and redirect to dashboard +- [ ] **If** credentials are invalid, **then** the system shall display error message "Invalid credentials" +- [ ] **While** user is logged in, the system shall maintain the session for configured timeout period + +### [Functional Requirement 2] + +- [ ] The system shall display data list with pagination of 10 items per page +- [ ] **When** input is entered in search field, the system shall apply real-time filtering + +## Existing Codebase Analysis + +### Implementation Path Mapping +| Type | Path | Description | +|------|------|-------------| +| Existing | src/[actual-path] | [Current implementation] | +| New | src/[planned-path] | [Planned new creation] | + +### Integration Points (Include even for new implementations) +- **Integration Target**: [What to connect with] +- **Invocation Method**: [How it will be invoked] + +### Code Inspection Evidence + +| File/Function | Relevance | +|---------------|-----------| +| [path:function] | [similar functionality / integration point / pattern reference] | + +### Fact Disposition Table + +One row per codebase analysis `focusAreas` entry. This table is the single binding between existing-behavior facts and the design — other sections that describe existing behavior reference the row by Focus Area name. + +| Fact ID | Focus Area | Disposition | Rationale | Evidence | +|---------|------------|-------------|-----------|----------| +| [fact_id from focusAreas] | [area name from focusAreas] | preserve / transform / remove / out-of-scope | [for transform: state new outcome; for remove: state reason; for out-of-scope: state which scope boundary excludes it; for preserve: brief confirmation] | [evidence value carried verbatim from focusAreas] | + +## Design + +### Change Impact Map + +```yaml +Change Target: [Component/feature to change] +Direct Impact: + - [Files/functions requiring direct changes] + - [Interface change points] +Indirect Impact: + - [Data format changes] + - [Processing time changes] +No Ripple Effect: + - [Explicitly specify unaffected features] +``` + +### Interface Change Matrix + +| Existing | New | Conversion Required | Compatibility Method | +|----------|-----|--------------------|--------------------| +| [Function/method/operation name] | [Function/method/operation name] | [Yes/No] | [Approach: adapter, wrapper, deprecation, etc.] | + +### Architecture Overview + +[How this feature is positioned within the overall system] + +### Data Flow + +``` +[Express data flow using diagrams or pseudo-code] +``` + +### Integration Points List + +| Integration Point | Location | Old Implementation | New Implementation | Switching Method | Verification Method | +|-------------------|----------|-------------------|-------------------|------------------|-------------------| +| Integration Point 1 | [Class/Function] | [Existing Process] | [New Process] | [DI/Factory etc.] | [How to verify this switching works] | +| Integration Point 2 | [Another Location] | [Existing] | [New] | [Method] | [Verification approach] | + +### Main Components + +#### Component 1 + +- **Responsibility**: [Scope of responsibility for this component] +- **Interface**: [APIs and contract definitions provided] +- **Dependencies**: [Relationships with other components] + +#### Component 2 + +- **Responsibility**: [Scope of responsibility for this component] +- **Interface**: [APIs and contract definitions provided] +- **Dependencies**: [Relationships with other components] + +### Data Representation Decision (When Introducing New Structures) + +| Criterion | Assessment | Reason | +|-----------|-----------|--------| +| Semantic Fit | [Yes/No] | [Does existing structure's meaning align?] | +| Responsibility Fit | [Yes/No] | [Same bounded context?] | +| Lifecycle Fit | [Yes/No] | [Same creation/mutation/deletion timing?] | +| Boundary/Interop Cost | [Low/Medium/High] | [Cost of sharing across boundaries?] | + +**Decision**: [reuse / extend / new] — [rationale in 1-2 sentences] + +### Contract Definitions + +``` +// Record major contract/interface definitions here +``` + +### Data Contract + +#### Component 1 + +```yaml +Input: + Type: [Data shape, contract, or schema] + Preconditions: [Required items, format constraints] + Validation: [Validation method] + +Output: + Type: [Data shape, contract, or schema] + Guarantees: [Conditions that must always be met] + On Error: [Exception/null/default value] + +Invariants: + - [Conditions that remain unchanged before and after processing] +``` + +### Field Propagation Map (When Fields Cross Boundaries) + +| Field | Boundary | Status | Detail | +|-------|----------|--------|--------| +| [field name] | [Component A → B] | preserved / transformed / dropped | [logic or reason] | + +### State Transitions and Invariants (When Applicable) + +```yaml +State Definition: + - Initial State: [Initial values and conditions] + - Possible States: [List of states] + +State Transitions: + Current State → Event → Next State + +System Invariants: + - [Conditions that hold in any state] +``` + +### UI Error State Design (when feature includes frontend) + +| Component / Screen | Loading | Empty | Error | Partial | +|-------------------|---------|-------|-------|---------| +| [Component name] | [Skeleton / spinner] | [Empty state + CTA] | [Error message + Retry] | [Cached display + Banner] | + +### Client State Design (when feature includes frontend) + +| State Category | State | Management Method | Sync Strategy | +|---------------|-------|-------------------|---------------| +| Server state | [Fetched data] | [Cache library / custom hook] | [Polling / WebSocket / manual refresh] | +| Local UI state | [Modal open, tab selection] | [useState / useReducer] | - | +| Temporary state | [Form input, draft] | [useState / form library] | [Auto-save / manual save] | + +### UI Action - API Contract Mapping (when feature includes frontend) + +| UI Action | API Endpoint | Request | Response | Error Contract | +|-----------|-------------|---------|----------|----------------| +| [Button click / form submit] | [POST /api/xxx] | [Request body fields] | [Response fields] | [Error codes and UI handling] | + +### Error Handling + +| Error Category | Example | Detection | Recovery Strategy | User Impact | +|---------------|---------|-----------|-------------------|-------------| +| [Validation / External / Infrastructure / Business logic] | [Specific error] | [How detected] | [Retry / Fallback / Propagate / Log-and-continue] | [User-facing message or silent handling] | + +### Logging and Monitoring + +- **Log events**: [Key events to log: state transitions, external calls, error occurrences, performance thresholds] +- **Log levels**: [Which events at DEBUG/INFO/WARN/ERROR] +- **Sensitive data**: [Fields to mask or exclude — coordinate with Security Considerations] +- **Monitoring**: [Metrics to track, alert thresholds, dashboard requirements] + +## Implementation Plan + +### Implementation Approach + +**Selected Approach**: [Approach name or combination] +**Selection Reason**: [Reason considering project constraints and technical dependencies] + +### Technical Dependencies and Implementation Order + +#### Required Implementation Order +1. **[Component/Feature A]** + - Technical Reason: [Why this needs to be implemented first] + - Dependent Elements: [Other components that depend on this] + +2. **[Component/Feature B]** + - Technical Reason: [Technical necessity to implement after A] + - Prerequisites: [Required pre-implementations] + +### Migration Strategy + +[Technical migration approach, ensuring backward compatibility] + +## Security Considerations + +Evaluate the following for this feature's trust boundaries and data flow: + +- **Authentication & Authorization**: What authentication is required for new entry points? What authorization checks protect resource access? +- **Input Validation**: Where does external input enter the system? How is it validated before processing? +- **Sensitive Data Handling**: What data requires protection (encryption, masking, access control)? What data is safe to include in logs and error responses? + +Mark items as N/A with brief rationale when the feature has no relevant trust boundary. + +## Test Boundaries + +### Mock Boundary Decisions + +| Component/Dependency | Mock? | Rationale | +|---------------------|-------|-----------| +| [External API / DB / File system / etc.] | [Yes/No] | [Why this boundary was chosen] | + +### Data Layer Testing Strategy + +- **Schema dependencies**: [List tables/models this feature reads from or writes to, with paths to their definitions] +- **Test data approach**: [How test data is provided — fixtures, factories, seed scripts, or real database] +- **Mock limitations acknowledged**: [What cannot be reliably tested with mocks alone for this feature] + +Mark as N/A with brief rationale when the feature has no data layer dependencies. + +### Integration Verification Points + +- [List critical integration points that require testing beyond unit-level mocks] + +## Verification Strategy + +Verification Strategy defines what correctness means and how to prove it at design time. L1/L2/L3 (from implementation-approach skill) define completion verification granularity at task execution time. + +### Correctness Proof Method + +How will this change's correctness be demonstrated? + +- **Correctness definition**: [What "correct" means for this change — e.g., "output matches existing behavior", "all ACs pass in production-equivalent environment", "generated queries execute without error on target DB"] +- **Verification method**: [Specific technique — e.g., "compare new implementation output against existing implementation", "run against staging DB", "contract test with real API"] +- **Verification timing**: [When verification occurs — e.g., "after first vertical slice", "per repository", "at integration phase"] + +### Early Verification Point + +What is verified first, and how, to confirm the approach is correct before scaling? + +- **First verification target**: [The smallest unit that proves the approach works — e.g., "first repository migration", "single API endpoint", "one screen flow"] +- **Success criteria**: [Observable outcome — e.g., "CSV download produces identical output to legacy", "API returns 200 with expected schema"] +- **Failure response**: [What to do if early verification fails — e.g., "reassess approach before proceeding", "escalate to user"] + +### Output Comparison (When Replacing or Modifying Existing Behavior) + +How will behavioral equivalence be verified between existing and new implementation? + +- **Comparison input**: [Identical input used for both implementations — e.g., "same DB snapshot", "same API request payload"] +- **Expected output fields**: [Specific fields/columns to compare — e.g., "all output columns", "response body fields: id, status, amount"] +- **Diff method**: [How to compare — e.g., "file-level diff", "JSON field-by-field comparison", "row count + spot check"] +- **Transformation pipeline coverage**: [Each step from codebase analysis `dataTransformationPipelines` and what the comparison covers] + +Mark as N/A with brief rationale when the design introduces entirely new behavior with no existing equivalent. + +## Future Extensibility + +- **Extension points**: [Interfaces, hooks, or plugin mechanisms designed for future use] +- **Known future requirements**: [Planned features that influenced current design decisions] +- **Intentional limitations**: [What was deliberately kept simple and why] + +## Alternative Solutions + +### Alternative 1 + +- **Overview**: [Description of alternative solution] +- **Advantages**: [Advantages] +- **Disadvantages**: [Disadvantages] +- **Reason for Rejection**: [Why it wasn't adopted] + +## Risks and Mitigation + +| Risk | Impact | Probability | Mitigation | +|------|--------|-------------|------------| +| [Risk 1] | High/Medium/Low | High/Medium/Low | [Countermeasure] | + +## References + +- [Related documentation and links] + +## Update History + +| Date | Version | Changes | Author | +|------|---------|---------|--------| +| YYYY-MM-DD | 1.0 | Initial version | [Name] | diff --git a/dev-skills/skills/documentation-criteria/references/plan-template.md b/dev-skills/skills/documentation-criteria/references/plan-template.md new file mode 100644 index 0000000..1120318 --- /dev/null +++ b/dev-skills/skills/documentation-criteria/references/plan-template.md @@ -0,0 +1,192 @@ +# Work Plan: [Feature Name] Implementation + +Created Date: YYYY-MM-DD +Type: feature|fix|refactor +Estimated Duration: X days +Estimated Impact: X files +Related Issue/PR: #XXX (if any) + +## Related Documents +- Design Doc(s): + - [docs/design/XXX.md] + - [docs/design/YYY.md] (if multiple, e.g. backend + frontend) +- ADR: [docs/adr/ADR-XXXX.md] (if any) +- PRD: [docs/prd/XXX.md] (if any) + +## Verification Strategy (from Design Doc) + +### Correctness Proof Method +- **Correctness definition**: [extracted from Design Doc] +- **Verification method**: [extracted from Design Doc] +- **Verification timing**: [extracted from Design Doc] + +### Early Verification Point +- **First verification target**: [extracted from Design Doc] +- **Success criteria**: [extracted from Design Doc] +- **Failure response**: [extracted from Design Doc] + +## Quality Assurance Mechanisms (from Design Doc) + +Adopted quality gates for the change area. Each task in this plan must satisfy these mechanisms. + +| Mechanism | Enforces | Config Location | Covered Files | +|-----------|----------|-----------------|---------------| +| [Tool/check name] | [What quality aspect it enforces] | [path/to/config] | [file paths or patterns covered, or "project-wide"] | +| [Domain constraint] | [What it enforces] | [path/to/source] | [file paths or patterns covered, or "project-wide"] | + +## Design-to-Plan Traceability + +Maps each Design Doc technical requirement to the covering task(s). One row per extracted item. Every row must have at least one covering task, or an explicit gap justification. + +| DD Section | DD Item | Category | Covered By Task(s) | Gap Status | Notes | +|---|---|---|---|---|---| +| [Section name from DD] | [Specific item] | impl-target / connection-switching / contract-change / verification / prerequisite | [Phase X Task Y] | covered | | + +**Category values**: `impl-target` (implementation target), `connection-switching` (connection/switching/registration), `contract-change` (contract change and propagation), `verification` (verification requirement), `prerequisite` (prerequisite work) + +**Gap Status values**: `covered` (task exists), `gap` (no task — requires justification in Notes, user confirmation required before plan approval) + +## Objective +[Why this change is necessary, what problem it solves] + +## Background +[Current state and why changes are needed] + +## Risks and Countermeasures + +### Technical Risks +- **Risk**: [Risk description] + - **Impact**: [Impact assessment] + - **Countermeasure**: [How to address it] + +### Schedule Risks +- **Risk**: [Risk description] + - **Impact**: [Impact assessment] + - **Countermeasure**: [How to address it] + +## Implementation Phases + +Select ONE phase structure based on implementation approach from Design Doc. +See documentation-criteria skill for detailed Phase Division Criteria. +All quality checks follow Quality Check Workflow from ai-development-guide skill. + +### Option A: Vertical Slice Phase Structure + +Use when implementation approach is Vertical Slice. Each phase = one value unit with verification. + +### Phase 1: [Value Unit 1 Name] (Estimated commits: X) +**Purpose**: [First vertical slice — proves approach works] +**Verification**: [From Verification Strategy: early verification point] + +#### Tasks +- [ ] Task 1: Implementation +- [ ] Task 2: Verification per Verification Strategy +- [ ] Quality check (staged) + +#### Phase Completion Criteria +- [ ] Early verification point passed +- [ ] [Functional criteria] + +### Phase 2: [Value Unit 2 Name] (Estimated commits: X) +**Purpose**: [Subsequent value unit] +**Verification**: [From Verification Strategy] + +#### Tasks +- [ ] Task 1: Implementation +- [ ] Task 2: Verification per Verification Strategy +- [ ] Quality check + +#### Phase Completion Criteria +- [ ] [Functional criteria] +- [ ] [Quality criteria] + +### Option B: Horizontal Slice Phase Structure + +Use when implementation approach is Horizontal Slice. Phases follow Foundation → Core → Integration → QA. + +### Phase 1: [Foundation] (Estimated commits: X) +**Purpose**: Contract definitions, interfaces, test preparation + +#### Tasks +- [ ] Task 1: Specific work content +- [ ] Task 2: Specific work content +- [ ] Quality check (staged) +- [ ] Unit tests: All related tests pass + +#### Phase Completion Criteria +- [ ] [Functional completion criteria] +- [ ] [Quality completion criteria] + +### Phase 2: [Core Feature] (Estimated commits: X) +**Purpose**: Business logic, unit tests + +#### Tasks +- [ ] Task 1: Specific work content +- [ ] Task 2: Specific work content +- [ ] Quality check (staged) +- [ ] Integration tests: Verify overall feature functionality + +#### Phase Completion Criteria +- [ ] [Functional completion criteria] +- [ ] [Quality completion criteria] + +### Phase 3: [Integration] (Estimated commits: X) +**Purpose**: External connections, presentation layer + +#### Tasks +- [ ] Task 1: Specific work content +- [ ] Task 2: Specific work content +- [ ] Quality check +- [ ] Integration tests: Verify component coordination + +#### Phase Completion Criteria +- [ ] [Functional completion criteria] +- [ ] [Quality completion criteria] + +### Option C: Hybrid Phase Structure + +Use when implementation approach is Hybrid. Combine vertical and horizontal phases as defined in Design Doc implementation approach. Structure phases per Design Doc specification, ensuring each phase has Tasks, Verification, and Phase Completion Criteria sections matching the format above. + +### Final Phase: Quality Assurance (Required) (Estimated commits: 1) + +This phase is required for ALL implementation approaches. + +**Purpose**: Cross-cutting quality assurance and Design Doc consistency verification + +#### Tasks +- [ ] Verify all Design Doc acceptance criteria achieved +- [ ] Security review: Verify security considerations from Design Doc are implemented +- [ ] Quality checks (types, lint, format) +- [ ] Execute all tests (including integration/E2E from test skeletons, when provided) +- [ ] Coverage 70%+ +- [ ] Document updates + +### Quality Assurance +- [ ] Quality check (staged) +- [ ] All tests pass +- [ ] Static check pass +- [ ] Lint check pass +- [ ] Build success + +## Completion Criteria +- [ ] All phases completed +- [ ] All integration/E2E tests passing (when test skeletons provided) +- [ ] Design Doc acceptance criteria satisfied +- [ ] Staged quality checks completed (zero errors) +- [ ] All tests pass +- [ ] Necessary documentation updated +- [ ] User review approval obtained + +## Progress Tracking +### Phase 1 +- Start: YYYY-MM-DD HH:MM +- Complete: YYYY-MM-DD HH:MM +- Notes: [Any special remarks] + +### Phase 2 +- Start: YYYY-MM-DD HH:MM +- Complete: YYYY-MM-DD HH:MM +- Notes: [Any special remarks] + +## Notes +[Special notes, reference information, important points, etc.] diff --git a/dev-skills/skills/documentation-criteria/references/prd-template.md b/dev-skills/skills/documentation-criteria/references/prd-template.md new file mode 100644 index 0000000..8a670c3 --- /dev/null +++ b/dev-skills/skills/documentation-criteria/references/prd-template.md @@ -0,0 +1,142 @@ +# PRD: [Feature Name] + +## Overview + +### One-line Summary +[Describe this feature in one line] + +### Background +[Why is this feature needed? What problem does it solve?] + +## User Stories + +### Primary Users +[Define the main target users] + +### User Stories +``` +As a [user type] +I want to [goal/desire] +So that [expected value/benefit] +``` + +### Use Cases +1. [Specific usage scenario 1] +2. [Specific usage scenario 2] +3. [Specific usage scenario 3] + +### User Journey Diagram +```mermaid +journey + title [Feature Name] User Journey + section [Phase 1] + [Step]: [satisfaction score]: [actor] +``` +[Map the end-to-end user experience from trigger event to goal completion] + +### Scope Boundary Diagram +```mermaid +C4Context + Boundary(scope, "In Scope") { + [Components in scope] + } + Boundary(out, "Out of Scope") { + [Components out of scope] + } +``` +[Clarify what is and is not included in this feature] + +## Functional Requirements + +### Must Have (P1 - MVP) +- [ ] Requirement 1: [Detailed description] + - AC-001: [Acceptance criteria - Given/When/Then format or measurable standard] + - AC-002: [Acceptance criteria] +- [ ] Requirement 2: [Detailed description] + - AC-003: [Acceptance criteria] + +### Should Have (P2) +- [ ] Requirement 1: [Detailed description] + - AC-004: [Acceptance criteria] + +### Could Have (P3) +- [ ] Requirement 1: [Detailed description] + +### Won't Have (this release) +- Item 1: [Description and reason for exclusion] +- Item 2: [Description and reason for exclusion] + +## Non-Functional Requirements + +### Performance +- Response Time: [Target value] +- Throughput: [Target value] +- Concurrency: [Target value] + +### Reliability +- Availability: [Target value] +- Error Rate: [Target value] + +### Security +- [Security requirements details] + +### Scalability +- [Considerations for future scaling] + +### Accessibility (when feature includes UI) +- Compliance standard: [Default: WCAG 2.1 AA (use organization standard if available)] +- Target assistive technologies: [Screen reader, keyboard operation, voice control, etc.] +- Platform requirements: [e.g., app store review requirements] +- Known constraints: [e.g., external library limitations] + +## Success Criteria + +### Quantitative Metrics +1. [Metric name]: [numeric target] measured by [method] within [timeframe] +2. [Metric name]: [numeric target] measured by [method] within [timeframe] +3. [Metric name]: [numeric target] measured by [method] within [timeframe] + +### Qualitative Metrics +1. [User experience metric 1] +2. [User experience metric 2] + +### UI Quality Metrics (when feature includes UI) +1. [Key operation completion rate / error recovery rate / retry success rate] +2. [Accessibility audit target score] + +## Technical Considerations + +### Dependencies +- [Dependencies on existing systems] +- [Dependencies on external services] + +### Constraints +- [Technical constraints] +- [Resource constraints] + +### Assumptions +- [Prerequisite requiring validation 1] +- [Prerequisite requiring validation 2] + +### Risks and Mitigation +| Risk | Impact | Probability | Mitigation | +|------|--------|-------------|------------| +| [Risk 1] | High/Medium/Low | High/Medium/Low | [Countermeasure] | +| [Risk 2] | High/Medium/Low | High/Medium/Low | [Countermeasure] | + +## Undetermined Items + +- [ ] [Question 1]: [Description of options or impacts] +- [ ] [Question 2]: [Description of options or impacts] + +*Discuss with user until this section is empty, then delete after confirmation* + +## Appendix + +### References +- [Related document 1] +- [Related document 2] + +### Glossary +- **Term 1**: [Definition] +- **Term 2**: [Definition] diff --git a/dev-skills/skills/documentation-criteria/references/task-template.md b/dev-skills/skills/documentation-criteria/references/task-template.md new file mode 100644 index 0000000..6207b79 --- /dev/null +++ b/dev-skills/skills/documentation-criteria/references/task-template.md @@ -0,0 +1,54 @@ +# Task: [Task Name] + +Metadata: +- Dependencies: task-01 → Deliverable: docs/plans/analysis/research-results.md +- Provides: docs/plans/analysis/api-spec.md (for research/design tasks) +- Size: Small (1-2 files) + +## Implementation Content +[What this task will achieve] +*Reference dependency deliverables if applicable + +## Target Files +- [ ] [Implementation file path] +- [ ] [Test file path] + +## Investigation Targets +Files to read before starting implementation (file path, with optional search hint): +- [e.g., src/orders/checkout (processOrder function) — determined by task-decomposer based on task nature] + +## Implementation Steps (TDD: Red-Green-Refactor) +### 1. Red Phase +- [ ] Read all Investigation Targets and record key observations +- [ ] Review dependency deliverables (if any) +- [ ] Verify/create contract definitions +- [ ] Write failing tests +- [ ] Run tests and confirm failure + +### 2. Green Phase +- [ ] Add minimal implementation to pass tests +- [ ] Run only added tests and confirm they pass + +### 3. Refactor Phase +- [ ] Improve code (maintain passing tests) +- [ ] Confirm added tests still pass + +## Quality Assurance Mechanisms +(From work plan header — mechanisms relevant to this task's target files) +- [Tool/check name] — Enforces: [what] — Config: [path] + +## Operation Verification Methods +(Derived from Verification Strategy in work plan) +- **Verification method**: [What to verify and how — e.g., "compare new implementation output against existing implementation at src/legacy/order_calc", "run endpoint against test database and verify response matches contract"] +- **Success criteria**: [Observable outcome that proves correctness — e.g., "output matches existing implementation for all input combinations", "API returns 200 with expected schema"] +- **Failure response**: [What to do if verification fails — e.g., "reassess approach before proceeding", "escalate to user"] +- **Verification level**: [L1: Functional operation as end-user feature / L2: New tests added and passing / L3: Code builds without errors] + +## Completion Criteria +- [ ] All added tests pass +- [ ] Operation verified per Operation Verification Methods above +- [ ] Deliverables created (for research/design tasks) + +## Notes +- Impact scope: [Areas where changes may propagate] +- Scope boundary: [Files to preserve unchanged — path and reason] diff --git a/dev-skills/skills/documentation-criteria/references/ui-spec-template.md b/dev-skills/skills/documentation-criteria/references/ui-spec-template.md new file mode 100644 index 0000000..134fafc --- /dev/null +++ b/dev-skills/skills/documentation-criteria/references/ui-spec-template.md @@ -0,0 +1,199 @@ +# [Feature Name] UI Specification + +## Overview + +[Purpose and scope of this UI Specification in 2-3 sentences] + +### Target PRD +- PRD path: [docs/prd/xxx-prd.md | "N/A — based on requirement-analyzer output"] +- Feature scope: [Which PRD requirements this UI Spec covers | Summary of analyzed requirements] + +### Design Source +| Source | Path | Version | +|--------|------|---------| +| Prototype code | [docs/ui-spec/assets/xxx/] | [commit SHA / tag] | + +## Prototype Management + +Prototype code is an **attachment** to this UI Spec. The canonical specification is always this document + the Design Doc. + +- **Attachment path**: [docs/ui-spec/assets/{feature-name}/] +- **Version identification**: [commit SHA / tag] +- **Compliance premise**: [e.g., design system compliance, component library usage] +- **Relationship to canonical spec**: Differences between prototype and this spec are resolved in favor of this document. Prototype serves as visual/behavioral reference only. + +## AC Traceability (Prototype) + +Map PRD acceptance criteria to prototype references. Skip this section if no prototype is provided. + +| AC ID | AC Summary | Screen / State | Prototype Reference (element ID / path) | Adoption Decision | +|-------|-----------|----------------|----------------------------------------|-------------------| +| AC-001 | [EARS AC summary] | [Screen / state name] | [element or file reference] | Adopted / Not adopted / On hold | + +## Screen List and Transitions + +### Screen List + +| Screen ID | Screen Name | Description | Entry Condition | +|-----------|------------|-------------|-----------------| +| S-01 | [Screen name] | [Purpose] | [How user reaches this screen] | + +### Transition Conditions + +| Source | Destination | Trigger | Guard Condition | +|--------|------------|---------|-----------------| +| S-01 | S-02 | [User action] | [Precondition if any] | + +## Component Decomposition + +### Component Tree + +``` +[Page/Screen] + +-- [Container Component] + | +-- [Presentational Component A] + | +-- [Presentational Component B] + +-- [Container Component] + +-- [Presentational Component C] +``` + +### Component: [ComponentName] + +#### State x Display Matrix + +| State | Default | Loading | Empty | Error | Partial | +|-------|---------|---------|-------|-------|---------| +| Display | [Normal display] | [Specific pattern: e.g., Skeleton of `ExistingComponent` / Spinner from `ui/Spinner`] | [Empty state message + CTA: e.g., "No items yet" + `Button` "Create first item"] | [Error message + recovery: e.g., `Alert` variant="error" + `Button` "Retry"] | [Cached display + `Banner` "Connection lost, showing cached data"] | + +#### Interaction Definition + +| AC ID | EARS Condition | User Action | System Response | State Transition | Error Handling | +|-------|---------------|-------------|-----------------|-----------------|----------------| +| AC-001 | When [trigger] | [Click / input / etc.] | [Expected behavior] | [From state -> To state] | [Retry / Reset / Fallback] | + +### Component: [ComponentName2] + +[Repeat State x Display Matrix and Interaction Definition for each component] + +## Design Tokens and Component Map + +### Environment Constraints + +- Target browsers: [e.g., Chrome 120+, Safari 17+] +- Theme support: [e.g., light/dark, system preference] + +#### Responsive Behavior + +| Breakpoint | Width | Key Changes | +|-----------|-------|-------------| +| Mobile | [e.g., < 768px] | [e.g., single column, hamburger nav, 14px body text] | +| Tablet | [e.g., 768px - 1023px] | [e.g., 2-column grid, collapsed sidebar] | +| Desktop | [e.g., ≥ 1024px] | [e.g., full layout, expanded nav, sidebar visible] | + +### Existing Component Reuse Map + +| UI Element | Decision | Existing Component | Notes | +|-----------|----------|-------------------|-------| +| [Button] | Reuse | [components/ui/Button] | [No modifications needed] | +| [DataTable] | Extend | [components/ui/Table] | [Add sorting support] | +| [FeatureCard] | New | - | [No similar component exists] | + +### Design Tokens + +#### Color Roles + +| Role | Token | Value | Usage | +|------|-------|-------|-------| +| Background Surface | [bg-primary] | [e.g., #FFFFFF] | [Page background] | +| Background Surface | [bg-secondary] | [e.g., #F9FAFB] | [Card, section background] | +| Text | [text-primary] | [e.g., #111827] | [Headings, body text] | +| Text | [text-secondary] | [e.g., #6B7280] | [Captions, placeholders] | +| Brand / Accent | [color-brand] | [e.g., #1A73E8] | [Primary actions, links] | +| Status | [color-success] | [e.g., #22C55E] | [Success states, confirmations] | +| Status | [color-error] | [e.g., #EF4444] | [Error states, destructive actions] | +| Border | [border-primary] | [e.g., #E5E7EB] | [Card borders, dividers] | + +#### Typography Hierarchy + +| Role | Font | Size | Weight | Line Height | Letter Spacing | +|------|------|------|--------|-------------|----------------| +| Heading 1 | [e.g., Inter] | [e.g., 30px] | [e.g., 700] | [e.g., 1.2] | [e.g., -0.02em] | +| Heading 2 | [e.g., Inter] | [e.g., 24px] | [e.g., 600] | [e.g., 1.3] | [e.g., -0.01em] | +| Body | [e.g., Inter] | [e.g., 16px] | [e.g., 400] | [e.g., 1.5] | [e.g., 0] | +| Caption | [e.g., Inter] | [e.g., 12px] | [e.g., 400] | [e.g., 1.4] | [e.g., 0.01em] | +| Monospace | [e.g., JetBrains Mono] | [e.g., 14px] | [e.g., 400] | [e.g., 1.6] | [e.g., 0] | + +#### Spacing Scale + +| Token | Value | Usage | +|-------|-------|-------| +| [spacing-xs] | [e.g., 4px] | [Inline element gaps] | +| [spacing-sm] | [e.g., 8px] | [Compact padding] | +| [spacing-md] | [e.g., 16px] | [Default component padding] | +| [spacing-lg] | [e.g., 24px] | [Section spacing] | +| [spacing-xl] | [e.g., 40px] | [Page section separation] | + +#### Elevation (Depth) + +| Level | Treatment | Usage | +|-------|-----------|-------| +| 0 (Flat) | [e.g., none] | [Inline elements, text] | +| 1 (Raised) | [e.g., 0 1px 2px rgba(0,0,0,0.05)] | [Cards, buttons] | +| 2 (Floating) | [e.g., 0 4px 12px rgba(0,0,0,0.1)] | [Dropdowns, popovers] | +| 3 (Overlay) | [e.g., 0 8px 24px rgba(0,0,0,0.15)] | [Modals, dialogs] | + +#### Border Radius Scale + +| Token | Value | Usage | +|-------|-------|-------| +| [radius-sm] | [e.g., 4px] | [Badges, chips] | +| [radius-md] | [e.g., 8px] | [Cards, inputs] | +| [radius-lg] | [e.g., 12px] | [Modals, panels] | +| [radius-full] | [e.g., 9999px] | [Avatars, pills] | + +## Visual Acceptance + +### Golden States +Define the key visual states that serve as acceptance benchmarks: + +1. **[State name]**: [Description of what should be visually confirmed] +2. **[State name]**: [Description] + +### Layout Constraints +- [Min/max width, height constraints] +- [Spacing rules between components] +- [Overflow behavior] + +## Accessibility Requirements + +### Keyboard Navigation + +| Component | Tab Order | Key Binding | Behavior | +|-----------|-----------|-------------|----------| +| [Component] | [Order number] | [Enter / Space / Arrow] | [Expected behavior] | + +### Screen Reader + +| Component | Role | Accessible Name | Live Region | +|-----------|------|-----------------|-------------| +| [Component] | [ARIA role] | [aria-label / aria-labelledby] | [polite / assertive / none] | + +### Contrast Requirements + +| Element | Foreground | Background | Ratio Target | +|---------|-----------|------------|-------------| +| [Text element] | [Color] | [Color] | [4.5:1 for normal text / 3:1 for large text] | + +## Open Items + +| ID | Description | Owner | Deadline | +|----|-------------|-------|----------| +| TBD-01 | [Unresolved question or decision] | [Who resolves] | [Target date] | + +*All TBDs must have an owner and deadline. Resolve before Design Doc creation.* + +## Update History + +| Date | Version | Changes | Author | +|------|---------|---------|--------| +| YYYY-MM-DD | 1.0 | Initial version | [Name] | diff --git a/dev-skills/skills/frontend-ai-guide/SKILL.md b/dev-skills/skills/frontend-ai-guide/SKILL.md new file mode 100644 index 0000000..8a05065 --- /dev/null +++ b/dev-skills/skills/frontend-ai-guide/SKILL.md @@ -0,0 +1,250 @@ +--- +name: frontend-ai-guide +description: Frontend-specific technical decision criteria, anti-patterns, debugging techniques, and quality check workflow. Use when making frontend technical decisions or performing quality assurance. +--- + +# AI Developer Guide - Technical Decision Criteria and Anti-pattern Collection (Frontend) + +## Technical Anti-patterns (Red Flag Patterns) + +Immediately stop and reconsider design when detecting the following patterns: + +### Code Quality Anti-patterns +1. **Writing similar code 3 or more times** - Violates Rule of Three +2. **Multiple responsibilities mixed in a single component** - Violates Single Responsibility Principle (SRP) +3. **Defining same content in multiple components** - Violates DRY principle +4. **Making changes without checking dependencies** - Potential for unexpected impacts +5. **Disabling code with comments** - Should use version control +6. **Error suppression** - Hiding problems creates technical debt +7. **Excessive use of type assertions (as)** - Abandoning type safety +8. **Prop drilling through 3+ levels** - Should use Context API or state management +9. **Massive components (300+ lines)** - Split into smaller components + +### Design Anti-patterns +- **"Make it work for now" thinking** - Accumulation of technical debt +- **Patchwork implementation** - Unplanned additions to existing components +- **Optimistic implementation of uncertain technology** - Designing unknown elements assuming "it'll probably work" +- **Symptomatic fixes** - Surface-level fixes that don't solve root causes +- **Unplanned large-scale changes** - Lack of incremental approach + +## Fallback Design Principles + +### Core Principle: Fail-Fast +Design philosophy that prioritizes improving primary code reliability over fallback implementations. + +### Criteria for Fallback Implementation +- **Fallback rule**: Implement fallbacks only when explicitly defined in Design Doc +- **Layer Responsibilities**: + - Component Layer: Use Error Boundary for error handling + - Hook Layer: Implement decisions based on business requirements + +### Detection of Excessive Fallbacks +- Require design review when writing the 3rd catch statement in the same feature +- Verify Design Doc definition before implementing fallbacks +- Properly log errors and make failures explicit + +## Rule of Three - Criteria for Code Duplication + +How to handle duplicate code based on Martin Fowler's "Refactoring": + +| Duplication Count | Action | Reason | +|-------------------|--------|--------| +| 1st time | Inline implementation | Cannot predict future changes | +| 2nd time | Consider future consolidation | Pattern beginning to emerge | +| 3rd time | Implement commonalization | Pattern established | + +### Criteria for Commonalization + +**Cases for Commonalization** +- Business logic duplication +- Complex processing algorithms +- Component patterns (form fields, cards, etc.) +- Custom hooks +- Validation rules + +**Cases to Avoid Commonalization** +- Accidental matches (coincidentally same code) +- Possibility of evolving in different directions +- Significant readability decrease from commonalization +- Simple helpers in test code + +### Implementation Example +```typescript +// Immediate commonalization on 1st duplication +function UserEmailInput() { /* ... */ } +function ContactEmailInput() { /* ... */ } + +// Commonalize on 3rd occurrence +function EmailInput({ context }: { context: 'user' | 'contact' | 'admin' }) { /* ... */ } +``` + +## Common Failure Patterns and Avoidance Methods + +### Pattern 1: Error Fix Chain +**Symptom**: Fixing one error causes new errors +**Cause**: Surface-level fixes without understanding root cause +**Avoidance**: Identify root cause with 5 Whys before fixing + +### Pattern 2: Abandoning Type Safety +**Symptom**: Excessive use of any type or as +**Cause**: Impulse to avoid type errors +**Avoidance**: Handle safely with unknown type and type guards + +### Pattern 3: Implementation Without Sufficient Testing +**Symptom**: Many bugs after implementation +**Cause**: Ignoring Red-Green-Refactor process +**Avoidance**: Always start with failing tests + +### Pattern 4: Ignoring Technical Uncertainty +**Symptom**: Frequent unexpected errors when introducing new technology +**Cause**: Assuming "it should work according to official documentation" without prior investigation +**Avoidance**: +- Record certainty evaluation at the beginning of task files + ``` + Certainty: low (Reason: new experimental feature with limited production examples) + Exploratory implementation: true + Fallback: use established patterns + ``` +- For low certainty cases, create minimal verification code first + +### Pattern 5: Insufficient Existing Code Investigation +**Symptom**: Duplicate implementations, architecture inconsistency, integration failures +**Cause**: Insufficient understanding of existing code before implementation +**Avoidance Methods**: +- Before implementation, always search for similar functionality (using domain, responsibility, component patterns as keywords) +- Similar functionality found → Use that implementation (do not create new implementation) +- Similar functionality is technical debt → Create ADR improvement proposal before implementation +- No similar functionality exists → Implement new functionality following existing design philosophy +- Record all decisions and rationale in "Existing Codebase Analysis" section of Design Doc + +## Debugging Techniques + +### 1. Error Analysis Procedure +1. Read error message (first line) accurately +2. Focus on first and last of stack trace +3. Identify first line where your code appears +4. Check React DevTools for component hierarchy + +### 2. 5 Whys - Root Cause Analysis +``` +Symptom: Component not rendering +Why1: Props are undefined → Why2: Parent component didn't pass props +Why3: Parent using old prop names → Why4: Component interface was updated +Why5: No update to parent after refactoring +Root cause: Incomplete refactoring, missing call-site updates +``` + +### 3. Minimal Reproduction Code +To isolate problems, attempt reproduction with minimal code: +- Remove unrelated components +- Replace API calls with mocks +- Create minimal configuration that reproduces problem +- Use React DevTools to inspect component tree + +### 4. Debug Log Output +```typescript +console.log('DEBUG:', { + context: 'user-form-submission', + props: { email, name }, + state: currentState, + timestamp: new Date().toISOString() +}) +``` + +## Quality Check Workflow + +Use the appropriate run command based on the `packageManager` field in package.json. + +### Build Commands +- `dev` - Development server +- `build` - Production build +- `preview` - Preview production build +- `type-check` - Type check (no emit) + +### Quality Check Phases + +**Phase 1-3: Basic Checks** +- `check` - Biome (lint + format) +- `build` - TypeScript build + +**Phase 4-5: Tests and Final Confirmation** +- `test` - Test execution +- `test:coverage:fresh` - Coverage measurement (fresh cache) +- `check:all` - Overall integrated check + +### Auxiliary Commands +- `test:coverage` - Run tests with coverage +- `test:safe` - Safe test execution (with auto cleanup) +- `cleanup:processes` - Cleanup Vitest processes +- `format` - Format fixes +- `lint:fix` - Lint fixes +- `open coverage/index.html` - Check coverage report + +### Troubleshooting +- **Port in use error**: Run `cleanup:processes` script +- **Cache issues**: Run `test:coverage:fresh` script +- **Dependency errors**: Clean reinstall dependencies +- **Vite preview not starting**: Check port 4173 availability + +## Situations Requiring Technical Decisions + +### Timing of Abstraction +- Extract patterns after writing concrete implementation 3 times +- Be conscious of YAGNI, implement only currently needed features +- Prioritize current simplicity over future extensibility + +### Performance vs Readability +- Prioritize readability unless React DevTools Profiler identifies a measurable bottleneck (e.g., render time exceeding 16ms, unnecessary re-renders) +- Measure before optimizing with React DevTools Profiler +- Document reason with comments when optimizing + +### Granularity of Component/Type Definitions +- Overly detailed components/types reduce maintainability +- Design components that appropriately express UI patterns +- Use composition over inheritance + +## Implementation Completeness Assurance + +### Required Procedure for Impact Analysis + +**Completion Criteria**: Complete all 3 stages + +#### 1. Discovery +```bash +Grep -n "ComponentName\|hookName" -o content +Grep -n "importedFunction" -o content +Grep -n "propsType\|StateType" -o content +``` + +#### 2. Understanding +**Mandatory**: Read all discovered files and include necessary parts in context: +- Caller's purpose and context +- Component hierarchy +- Data flow: Props → State → Event handlers → Callbacks + +#### 3. Identification +Structured impact report (mandatory): +``` +## Impact Analysis +### Direct Impact: ComponentA, ComponentB (with reasons) +### Indirect Impact: FeatureX, PageY (with integration paths) +### Processing Flow: Props → Render → Events → Callbacks +``` + +**Important**: Execute all 3 stages to completion + +### Unused Code Deletion Rule + +When unused code is detected → Will it be used? +- Yes → Implement immediately (no deferral allowed) +- No → Delete immediately (remains in Git history) + +Target: Components, hooks, utilities, documentation, configuration files + +### Existing Code Deletion Decision Flow + +``` +In use? No → Delete immediately (remains in Git history) + Yes → Working? No → Delete + Reimplement + Yes → Fix +``` \ No newline at end of file diff --git a/dev-skills/skills/implementation-approach/SKILL.md b/dev-skills/skills/implementation-approach/SKILL.md new file mode 100644 index 0000000..7585656 --- /dev/null +++ b/dev-skills/skills/implementation-approach/SKILL.md @@ -0,0 +1,144 @@ +--- +name: implementation-approach +description: Implementation strategy selection framework. Use when planning implementation strategy, selecting development approach, or defining verification criteria. +--- + +# Implementation Strategy Selection Framework (Meta-cognitive Approach) + +## Meta-cognitive Strategy Selection Process + +### Phase 1: Comprehensive Current State Analysis + +**Core Question**: "What does the existing implementation look like?" + +#### Analysis Framework +```yaml +Architecture Analysis: Responsibility separation, data flow, dependencies, technical debt +Implementation Quality Assessment: Code quality, test coverage, performance, security +Historical Context Understanding: Current form rationale, past decision validity, constraint changes, requirement evolution +``` + +#### Meta-cognitive Question List +- What is the true responsibility of this implementation? +- Which parts are business essence and which derive from technical constraints? +- What dependencies or implicit preconditions are unclear from the code? +- What benefits and constraints does the current design bring? + +### Phase 2: Strategy Exploration and Creation + +**Core Question**: "When determining before → after, what implementation patterns or strategies should be referenced?" + +#### Strategy Discovery Process +```yaml +Research and Exploration: Tech stack examples (WebSearch), similar projects, OSS references, literature/blogs +Creative Thinking: Strategy combinations, constraint-based design, phase division, extension point design +``` + +#### Reference Strategy Patterns (Creative Combinations Encouraged) + +**Legacy Handling Strategies**: +- Strangler Pattern: Gradual migration through phased replacement +- Facade Pattern: Complexity hiding through unified interface +- Adapter Pattern: Bridge with existing systems + +**New Development Strategies**: +- Feature-driven Development: Vertical implementation prioritizing user value +- Foundation-driven Development: Foundation-first construction prioritizing stability +- Risk-driven Development: Prioritize addressing maximum risk elements + +**Integration/Migration Strategies**: +- Proxy Pattern: Transparent feature extension +- Decorator Pattern: Phased enhancement of existing features +- Bridge Pattern: Flexibility through abstraction + +**Important**: The optimal solution is discovered through creative thinking according to each project's context. + +### Phase 3: Risk Assessment and Control + +**Core Question**: "What risks arise when applying this to existing implementation, and what's the best way to control them?" + +#### Risk Analysis Matrix +```yaml +Technical Risks: System impact, data consistency, performance degradation, integration complexity +Operational Risks: Service availability, deployment downtime, process changes, rollback procedures +Project Risks: Schedule delays, learning costs, quality achievement, team coordination +``` + +#### Risk Control Strategies +```yaml +Preventive Measures: Phased migration, parallel operation verification, integration/regression tests, monitoring setup +Incident Response: Rollback procedures, log/metrics preparation, communication system, service continuation procedures +``` + +### Phase 4: Constraint Compatibility Verification + +**Core Question**: "What are this project's constraints?" + +#### Constraint Checklist +```yaml +Technical Constraints: Library compatibility, resource capacity, mandatory requirements, numerical targets +Temporal Constraints: Deadlines/priorities, dependencies, milestones, learning periods +Resource Constraints: Team/skills, work hours/systems, budget, external contracts +Business Constraints: Market launch timing, customer impact, regulatory compliance +``` + +### Phase 5: Implementation Approach Decision + +Select optimal solution from basic implementation approaches (creative combinations encouraged): + +#### Vertical Slice (Feature-driven) +**Characteristics**: Vertical implementation across all layers by feature unit +**Application Conditions**: Features share fewer than 2 data models, each feature is independently deliverable, changes touch 3+ architecture layers +**Verification Method**: End-user value delivery at each feature completion + +#### Horizontal Slice (Foundation-driven) +**Characteristics**: Phased construction by architecture layer +**Application Conditions**: 3+ features depend on a common foundation layer, foundation changes require stability verification before consumers can proceed +**Verification Method**: Integrated operation verification when all foundation layers complete + +#### Hybrid (Creative Combination) +**Characteristics**: Flexible combination according to project characteristics +**Application Conditions**: Unclear requirements, need to change approach per phase, transition from prototyping to full implementation +**Verification Method**: Verify at appropriate L1/L2/L3 levels according to each phase's goals + +### Phase 6: Decision Rationale Documentation + +**Design Doc Documentation**: Record in the Design Doc's implementation approach section: +1. Selected strategy name and characteristics +2. Alternatives considered and reason for rejection +3. Risk mitigation plan (from Phase 3) +4. Constraint compliance summary (from Phase 4) +5. Verification level (L1/L2/L3) and integration point definition + +## Verification Level Definitions + +Priority for completion verification of each task: + +- **L1: Functional Operation Verification** - Operates as end-user feature (e.g., search executable) +- **L2: Test Operation Verification** - New tests added and passing +- **L3: Build Success Verification** - Code builds/runs without errors + +**Priority**: L1 > L2 > L3 in order of verifiability importance + +## Integration Point Definitions + +Define integration points according to selected strategy: +- **Strangler-based**: When switching between old and new systems for each feature +- **Feature-driven**: When users can actually use the feature +- **Foundation-driven**: When all architecture layers are ready and E2E tests pass +- **Hybrid**: When individual goals defined for each phase are achieved + +## Quality Checks + +1. Verify at least one strategy combination beyond listed patterns was considered +2. Confirm Phase 1 analysis framework is complete before selecting strategy +3. Confirm Phase 3 risk analysis matrix is populated before implementation starts +4. Confirm Phase 4 constraint checklist is reviewed before strategy decision +5. Confirm Phase 6 documentation template is filled with selection rationale + +## Guidelines for Meta-cognitive Execution + +1. **Leverage Known Patterns**: Use as starting point, explore creative combinations +2. **Active WebSearch Use**: Research implementation examples from similar tech stacks +3. **Apply 5 Whys**: Pursue root causes to grasp essence +4. **Multi-perspective Evaluation**: Comprehensively evaluate from each Phase 1-4 perspective \ No newline at end of file diff --git a/dev-skills/skills/integration-e2e-testing/SKILL.md b/dev-skills/skills/integration-e2e-testing/SKILL.md new file mode 100644 index 0000000..6ad9889 --- /dev/null +++ b/dev-skills/skills/integration-e2e-testing/SKILL.md @@ -0,0 +1,154 @@ +--- +name: integration-e2e-testing +description: Integration and E2E test design principles, ROI calculation, test skeleton specification, and review criteria. Use when designing integration tests, E2E tests, or reviewing test quality. +--- + +# Integration and E2E Testing Principles + +## References + +**E2E test design with Playwright**: See [references/e2e-design.md](references/e2e-design.md) for UI Spec-driven E2E test candidate selection and Playwright test architecture. + +## Test Type Definition and Limits + +| Test Type | Purpose | Scope | Limit per Feature | Implementation Timing | +|-----------|---------|-------|-------------------|----------------------| +| Integration | Verify component interactions | Partial system integration | MAX 3 | Created alongside implementation | +| E2E | Verify critical user journeys | Full system | MAX 1-2 | Executed in final phase only | + +## Behavior-First Principle + +### Include (High ROI) +- Business logic correctness (calculations, state transitions, data transformations) +- Data integrity and persistence behavior +- User-visible functionality completeness +- Error handling behavior (what user sees/experiences) + +### Redirect to Other Test Types +- External service connections → Verify via contract/interface tests +- Performance metrics → Verify via dedicated load testing +- Implementation details → Verify observable behavior instead +- UI layout specifics → Verify information availability instead + +**Principle**: Test = User-observable behavior verifiable in isolated CI environment + +## ROI Calculation + +ROI is used to **rank candidates within the same test type** (integration candidates against each other, E2E candidates against each other). Cross-type comparison is unnecessary because integration and E2E budgets are selected independently. + +``` +ROI Score = Business Value × User Frequency + Legal Requirement × 10 + Defect Detection + (range: 0–120) +``` + +Higher ROI Score = higher priority within its test type. No normalization or capping is applied — the raw score is used directly for ranking. Deduplication is a separate step that removes candidates entirely; it does not modify scores. + +### ROI Threshold for E2E + +E2E tests have high ownership cost (creation, execution, and maintenance are each 3-10× higher than integration tests). To justify creation, an E2E candidate (beyond the must-keep reserved slot) requires **ROI Score > 50**. + +### ROI Calculation Examples + +| Scenario | BV | Freq | Legal | Defect | ROI Score | Test Type | Selection Outcome | +|----------|----|------|-------|--------|-----------|-----------|-------------------| +| Core checkout flow | 10 | 9 | true | 9 | 109 | E2E | Selected (reserved slot: user-facing multi-step journey) | +| Payment error handling | 8 | 3 | false | 7 | 31 | E2E | Below threshold (31 < 50), not selected | +| Profile save flow | 7 | 6 | false | 6 | 48 | E2E | Below threshold (48 < 50), not selected | +| DB persistence check | 8 | 8 | false | 8 | 72 | Integration | Selected (rank 1 of 3) | +| Error message display | 5 | 3 | false | 4 | 19 | Integration | Selected (rank 2 of 3) | +| Optional filter toggle | 3 | 4 | false | 2 | 14 | Integration | Not selected (rank 4, budget full) | + +## Multi-Step User Journey Definition + +A feature qualifies as containing a **multi-step user journey** when ALL of the following are true: + +1. **2+ distinct interaction boundaries** are traversed in sequence to complete a user goal. What counts as a boundary depends on the system type: + - Web: distinct routes/pages + - Mobile native: distinct screens/views + - CLI: distinct command invocations or interactive prompts + - API: distinct API calls forming a transaction (e.g., create → confirm → finalize) +2. **State carries across steps** — data produced or actions taken in one step affect what the next step accepts or displays +3. **The journey has a completion point** — a final state the user or caller reaches (e.g., confirmation page, saved record, API success response, completed workflow) + +### User-Facing vs Service-Internal Journeys + +Multi-step journeys are further classified for E2E budget decisions: + +| Classification | Condition | E2E Reserved Slot | Example | +|---|---|---|---| +| **User-facing** | A human user directly triggers and observes the steps (via UI, CLI, or direct API interaction) | Eligible | Web checkout flow, CLI setup wizard, mobile onboarding | +| **Service-internal** | Steps are triggered by backend services without direct user interaction | Not eligible (use integration tests) | Async job pipeline, service-to-service saga, scheduled batch processing | + +This classification applies only to the reserved E2E slot and the E2E Gap Check. Service-internal journeys are still valid E2E candidates through the normal ROI > 50 path if they warrant full-system verification. + +Use this definition when evaluating E2E test candidates and E2E gap detection. + +## Test Skeleton Specification + +### Required Comment Patterns + +Each test MUST include the following annotations: + +``` +AC: [Original acceptance criteria text] +Behavior: [Trigger] → [Process] → [Observable Result] +@category: core-functionality | integration | edge-case | e2e +@dependency: none | [component names] | full-system +@complexity: low | medium | high +ROI: [score] +``` + +Use the project's comment syntax to wrap these annotations (e.g., `//` for C-family, `#` for Python/Ruby/Shell). + +### Verification Items (Optional) + +When verification points need explicit enumeration: +``` +Verification items: +- [Item 1] +- [Item 2] +``` + +## EARS Format Mapping + +| EARS Keyword | Test Type | Generation Approach | +|--------------|-----------|---------------------| +| **When** | Event-driven | Trigger event → verify outcome | +| **While** | State condition | Setup state → verify behavior | +| **If-then** | Branch coverage | Both condition paths verified | +| (none) | Basic functionality | Direct invocation → verify result | + +## Test File Naming Convention + +- Integration tests: `*.int.test.*` or `*.integration.test.*` +- E2E tests: `*.e2e.test.*` + +The test runner or framework in the project determines the appropriate file extension. + +## Review Criteria + +### Skeleton and Implementation Consistency + +| Check | Failure Condition | +|-------|-------------------| +| Behavior Verification | No assertion for "observable result" in skeleton | +| Verification Item Coverage | Listed items not all covered by assertions | +| Mock Boundary | Internal components mocked in integration test | + +### Implementation Quality + +| Check | Failure Condition | +|-------|-------------------| +| AAA Structure | Arrange/Act/Assert separation unclear | +| Independence | State sharing between tests, order dependency | +| Reproducibility | Date/random dependency, varying results | +| Readability | Test name doesn't match verification content | + +## Quality Standards + +### Required +- Each test verifies one behavior +- Clear AAA (Arrange-Act-Assert) structure +- No test interdependencies +- Deterministic execution + diff --git a/dev-skills/skills/integration-e2e-testing/references/e2e-design.md b/dev-skills/skills/integration-e2e-testing/references/e2e-design.md new file mode 100644 index 0000000..f4e9e90 --- /dev/null +++ b/dev-skills/skills/integration-e2e-testing/references/e2e-design.md @@ -0,0 +1,86 @@ +# E2E Test Design with Playwright + +## When to Create E2E Tests + +E2E tests target **critical user journeys** that span multiple pages or require real browser interaction. Apply the same ROI framework from the parent skill — only create E2E tests when ROI > 50. + +### Candidate Sources + +| Source | What to Extract | +|--------|----------------| +| **Design Doc ACs** | User journeys with EARS "When" keyword spanning multiple screens | +| **UI Spec Screen Transitions** | Multi-step flows (e.g., form wizard, checkout) | +| **UI Spec State x Display Matrix** | Error/empty/loading states requiring browser-level verification | +| **UI Spec Interaction Definitions** | Complex interactions (drag-drop, keyboard navigation, responsive behavior) | + +### Selection Criteria + +**Include** (high E2E ROI): +- Multi-page user journeys (login → dashboard → action → confirmation) +- Flows requiring real browser APIs (navigation, cookies, localStorage) +- Accessibility verification requiring actual DOM rendering +- Responsive behavior across viewports + +**Use integration tests instead when**: +- Testing single-component state changes → RTL +- Testing API response handling → MSW + RTL +- Testing pure data transformations → unit tests + +## UI Spec to E2E Test Mapping + +When a UI Spec exists, use it as the primary source for E2E test design: + +1. **Extract screen transitions** → Each multi-step transition = 1 E2E candidate +2. **Check state x display matrix** → Error states requiring navigation = E2E candidate +3. **Review interaction definitions** → Browser-dependent interactions = E2E candidate +4. **Cross-reference with Design Doc ACs** → Ensure E2E candidates map to acceptance criteria + +### Mapping Template + +``` +Screen Transition: [Screen A] → [Screen B] → [Screen C] +AC Reference: AC-{id} +User Journey: [Description of what the user accomplishes] +Preconditions: [Auth state, data state] +Verification Points: + - [What to assert at each step] +E2E ROI Score: [calculated score] +``` + +## Playwright Test Architecture + +### Page Object Pattern + +Organize browser interactions through page objects for maintainability: + +``` +tests/ +├── e2e/ +│ ├── pages/ # Page objects +│ ├── fixtures/ # Test fixtures and helpers +│ └── *.e2e.test.ts # Test files +``` + +### Test Isolation + +- Each test starts from a clean browser context +- No shared state between tests +- Use `beforeEach` for common setup (auth, navigation) +- Prefer `page.goto()` over in-test navigation for setup + +### Viewport Testing + +When UI Spec defines responsive behavior, test critical breakpoints: + +| Breakpoint | Width | When to Test | +|-----------|-------|-------------| +| Mobile | 375px | If UI Spec defines mobile-specific interactions | +| Tablet | 768px | If UI Spec defines tablet layout differences | +| Desktop | 1280px | Default — always test | + +## Budget Enforcement + +Hard limits per feature (same as parent skill): +- **E2E Tests**: MAX 1-2 tests +- Only generate if ROI score > 50 +- Prefer fewer, comprehensive journey tests over many granular tests diff --git a/dev-skills/skills/test-implement/SKILL.md b/dev-skills/skills/test-implement/SKILL.md new file mode 100644 index 0000000..ce67d77 --- /dev/null +++ b/dev-skills/skills/test-implement/SKILL.md @@ -0,0 +1,30 @@ +--- +name: test-implement +description: Test implementation patterns and conventions. Use when implementing unit tests, integration tests, or E2E tests, including RTL+Vitest+MSW component testing and Playwright E2E testing. +--- + +# Test Implementation Patterns + +## Reference Selection + +| Test Type | Reference | When to Use | +|-----------|-----------|-------------| +| **Unit / Integration** | [references/frontend.md](references/frontend.md) | Implementing React component tests with RTL + Vitest + MSW | +| **E2E** | [references/e2e.md](references/e2e.md) | Implementing browser-level E2E tests with Playwright | + +## Common Principles + +### AAA Structure +All tests follow **Arrange-Act-Assert**: +- **Arrange**: Set up preconditions and inputs +- **Act**: Execute the behavior under test +- **Assert**: Verify the expected outcome + +### Test Independence +- Each test runs independently without depending on other tests +- No shared mutable state between tests +- Deterministic execution — no random or time dependencies without mocking + +### Naming +- Test names describe expected behavior from user perspective +- One test verifies one behavior diff --git a/dev-skills/skills/test-implement/references/e2e.md b/dev-skills/skills/test-implement/references/e2e.md new file mode 100644 index 0000000..573f765 --- /dev/null +++ b/dev-skills/skills/test-implement/references/e2e.md @@ -0,0 +1,252 @@ +# E2E Test Implementation with Playwright + +## Test Framework +- **Playwright Test**: `@playwright/test` +- Test imports: `import { test, expect } from '@playwright/test'` + +## Test Structure + +### Directory Layout +``` +tests/ +└── e2e/ + ├── pages/ # Page objects + │ ├── login.page.ts + │ └── dashboard.page.ts + ├── fixtures/ # Test fixtures + │ └── auth.fixture.ts + └── *.e2e.test.ts # Test files +``` + +### Naming Conventions +- Test files: `{FeatureName}.e2e.test.ts` +- Page objects: `{PageName}.page.ts` +- Fixtures: `{Purpose}.fixture.ts` + +## Page Object Pattern + +Encapsulate page interactions for reusability and maintainability: + +```typescript +import { type Page, type Locator } from '@playwright/test' + +export class LoginPage { + readonly emailInput: Locator + readonly passwordInput: Locator + readonly submitButton: Locator + + constructor(private page: Page) { + this.emailInput = page.getByLabel('Email') + this.passwordInput = page.getByLabel('Password') + this.submitButton = page.getByRole('button', { name: 'Sign in' }) + } + + async login(email: string, password: string) { + await this.emailInput.fill(email) + await this.passwordInput.fill(password) + await this.submitButton.click() + } +} +``` + +## Test Patterns + +### Basic Test +```typescript +import { test, expect } from '@playwright/test' + +test('user can navigate to dashboard after login', async ({ page }) => { + // Arrange + await page.goto('/login') + + // Act + await page.getByLabel('Email').fill('user@example.com') + await page.getByLabel('Password').fill('password') + await page.getByRole('button', { name: 'Sign in' }).click() + + // Assert + await expect(page).toHaveURL('/dashboard') + await expect(page.getByRole('heading', { name: 'Dashboard' })).toBeVisible() +}) +``` + +### With Page Objects +```typescript +import { test, expect } from '@playwright/test' +import { LoginPage } from './pages/login.page' +import { DashboardPage } from './pages/dashboard.page' + +test('user completes purchase flow', async ({ page }) => { + const loginPage = new LoginPage(page) + const dashboardPage = new DashboardPage(page) + + await page.goto('/login') + await loginPage.login('user@example.com', 'password') + await expect(dashboardPage.heading).toBeVisible() +}) +``` + +### Auth Fixture +```typescript +import { test as base } from '@playwright/test' + +export const test = base.extend<{ authenticatedPage: Page }>({ + authenticatedPage: async ({ page }, use) => { + await page.goto('/login') + await page.getByLabel('Email').fill('user@example.com') + await page.getByLabel('Password').fill('password') + await page.getByRole('button', { name: 'Sign in' }).click() + await page.waitForURL('/dashboard') + await use(page) + }, +}) +``` + +## E2E Environment Prerequisites + +E2E tests require a running application with real data state. Unlike unit/integration tests, environment setup is part of E2E test implementation scope. + +### Seed Data Strategy + +Prepare test data via API calls or database seeding: + +```typescript +// fixtures/seed.fixture.ts +import { test as base } from '@playwright/test' + +export const test = base.extend<{ seededData: SeedResult }>({ + seededData: async ({ request }, use) => { + // Arrange: Create test data via API before test + // Example: adjust to the project's actual seeding mechanism + const result = await request.post('/api/test/seed', { + data: { scenario: 'e2e-user-with-subscription' } + }) + const seedData = await result.json() + + await use(seedData) + + // Cleanup: Remove test data after test + await request.delete(`/api/test/seed/${seedData.id}`) + }, +}) +``` + +**Principles**: +- Use the application's existing seeding mechanism if present; create new seed endpoints only when no alternative exists +- Seed data setup belongs to test fixtures, not to a separate manual step +- Each test must be self-contained: create its own data, clean up after +- Seed data via API endpoints or direct DB access only + +### Authentication Fixture + +Implement auth fixtures that match the application's actual login flow: + +```typescript +// fixtures/auth.fixture.ts +export const test = base.extend<{ playerPage: Page }>({ + playerPage: async ({ page, request }, use) => { + // Use the application's existing auth endpoint — not admin backdoors + // Example: adjust the URL and payload to match the project's actual login flow + await request.post('/api/login', { + data: { loginId: E2E_LOGIN_ID, password: E2E_PASSWORD } + }) + // Transfer session to browser context + await page.goto('/') + await use(page) + }, +}) +``` + +**Principles**: +- Use the application's existing authentication flow; auth fixtures must follow the same path that real users use +- Use the application's production authentication flow for E2E auth (the same endpoints real users hit) +- Store test credentials in environment variables only (`E2E_*` prefixed) +- If the auth flow requires specific user records, seed them in the fixture + +### Environment Checklist + +Before E2E tests can pass, verify: +- [ ] Application is running and accessible at `baseURL` +- [ ] Database has required seed data (test users, subscriptions, content) +- [ ] Authentication flow works with test credentials +- [ ] Environment variables are set (`E2E_*` prefixed) +- [ ] External services are either available or mocked via `page.route()` + +When the work plan includes dedicated environment setup tasks (Phase 0), follow those tasks. When no setup tasks exist in the plan, address missing prerequisites as part of the E2E test implementation task itself. + +## Locator Strategy + +Prefer accessible locators in this order: +1. `page.getByRole()` — best for accessibility +2. `page.getByLabel()` — form elements +3. `page.getByText()` — visible text +4. `page.getByTestId()` — last resort + +```typescript +await page.getByRole('button', { name: 'Submit' }).click() +``` + +## Assertions + +```typescript +// Visibility +await expect(page.getByText('Success')).toBeVisible() +await expect(page.getByText('Error')).not.toBeVisible() + +// Navigation +await expect(page).toHaveURL('/dashboard') +await expect(page).toHaveTitle('Dashboard') + +// Element state +await expect(page.getByRole('button')).toBeEnabled() +await expect(page.getByRole('button')).toBeDisabled() + +// Content +await expect(page.getByRole('heading')).toHaveText('Welcome') +``` + +## Viewport Testing + +When UI Spec defines responsive behavior: + +```typescript +test.describe('responsive navigation', () => { + test('shows hamburger menu on mobile', async ({ page }) => { + await page.setViewportSize({ width: 375, height: 667 }) + await page.goto('/') + await expect(page.getByRole('button', { name: 'Menu' })).toBeVisible() + await expect(page.getByRole('navigation')).not.toBeVisible() + }) + + test('shows full navigation on desktop', async ({ page }) => { + await page.setViewportSize({ width: 1280, height: 720 }) + await page.goto('/') + await expect(page.getByRole('navigation')).toBeVisible() + }) +}) +``` + +## Test Isolation + +- Each test starts from a clean browser context +- No shared state between tests +- Use `beforeEach` for common setup (auth, navigation) +- Prefer `page.goto()` over in-test navigation for setup steps + +## Skeleton Comment Format + +E2E test skeletons follow the same annotation format as integration tests (adapt comment syntax to the project's language): + +```typescript +// AC: [Original acceptance criteria text] +// Behavior: [User action] → [System response] → [Observable result] +// @category: e2e +// @dependency: full-system +// @complexity: high +// ROI: [score] +test('AC1: [Description]', async ({ page }) => { + // Arrange: [Setup description] + // Act: [Action description] + // Assert: [Verification description] +}) +``` diff --git a/dev-skills/skills/test-implement/references/frontend.md b/dev-skills/skills/test-implement/references/frontend.md new file mode 100644 index 0000000..e605e28 --- /dev/null +++ b/dev-skills/skills/test-implement/references/frontend.md @@ -0,0 +1,217 @@ +# Frontend Test Implementation (RTL + Vitest + MSW) + +## Test Framework +- **Vitest**: This project uses Vitest +- **React Testing Library**: For component testing +- **MSW (Mock Service Worker)**: For API mocking +- Test imports: `import { describe, it, expect, beforeEach, vi } from 'vitest'` +- Component test imports: `import { render, screen } from '@testing-library/react'` +- User interaction: `import userEvent from '@testing-library/user-event'` (prefer over `fireEvent`) +- Mock creation: Use `vi.mock()` + +## Basic Testing Policy + +### Quality Requirements +- **Coverage**: Unit test coverage must be 60% or higher (Frontend standard 2025) +- **Independence**: Each test can run independently without depending on other tests +- **Reproducibility**: Tests are environment-independent and always return the same results +- **Readability**: Test code maintains the same quality as production code + +### Coverage Requirements (ADR-0002 Compliant) +**Component-specific targets**: +- Atoms (Button, Text, etc.): 70% or higher +- Molecules (FormField, etc.): 65% or higher +- Organisms (Header, Footer, etc.): 60% or higher +- Custom Hooks: 65% or higher +- Utils: 70% or higher + +**Metrics**: Statements, Branches, Functions, Lines + +### Test Types and Scope +1. **Unit Tests (React Testing Library)** + - Verify behavior of individual components or functions + - Mock all external dependencies + - Most numerous, implemented with fine granularity + - Focus on user-observable behavior + +2. **Integration Tests (React Testing Library + MSW)** + - Verify coordination between multiple components + - Mock APIs with MSW (Mock Service Worker) + - No actual DB connections (backend manages DB) + - Verify major functional flows + +## Red-Green-Refactor Process (Test-First Development) + +**Recommended Principle**: Always start code changes with tests + +**Background**: +- Ensure behavior before changes, prevent regression +- Clarify expected behavior before implementation +- Ensure safety during refactoring + +**Development Steps**: +1. **Red**: Write test for expected behavior (it fails) +2. **Green**: Pass test with minimal implementation +3. **Refactor**: Improve code while maintaining passing tests + +**NG Cases (Test-first not required)**: +- Pure configuration file changes (vite.config.ts, tailwind.config.js, etc.) +- Documentation-only updates (README, comments, etc.) +- Emergency production incident response (post-incident tests mandatory) + +## Test Design Principles + +### Test Case Structure +- Tests consist of three stages: "Arrange," "Act," "Assert" +- Clear naming that shows purpose of each test +- One test case verifies only one behavior + +### Test Data Management +- Manage test data in dedicated directories or co-located with tests +- Define test-specific environment variable values +- Always mock sensitive information +- Keep test data minimal, using only data directly related to test case verification purposes + +### Mock and Stub Usage Policy + +**Recommended: Mock external dependencies in unit tests** +- Merit: Ensures test independence and reproducibility +- Practice: Mock API calls with MSW, mock external libraries + +**Use MSW for all API interactions in unit tests**: Ensures speed and environment independence. + +### Test Failure Response Decision Criteria + +**Fix tests**: Wrong expected values, references to non-existent features, dependence on implementation details, implementation only for tests +**Fix implementation**: Valid specifications, business logic, important edge cases +**When in doubt**: Confirm with user + +## Test Helper Utilization Rules + +### Decision Criteria +| Mock Characteristics | Response Policy | +|---------------------|-----------------| +| **Simple and stable** | Consolidate in common helpers | +| **Complex or frequently changing** | Individual implementation | +| **Duplicated in 3+ places** | Consider consolidation | +| **Test-specific logic** | Individual implementation | + +### Test Helper Usage Examples +```typescript +// Builder pattern for test data +const testUser = createTestUser({ name: 'Test User', email: 'test@example.com' }) + +// Custom render function with providers +function renderWithProviders(ui: React.ReactElement) { + return render({ui}) +} +``` + +## Test Implementation Conventions + +### Directory Structure (Co-location Principle) +``` +src/ +└── components/ + └── Button/ + ├── Button.tsx + ├── Button.test.tsx # Co-located with component + └── index.ts +``` + +### Naming Conventions +- Test files: `{ComponentName}.test.tsx` +- Integration test files: `{FeatureName}.integration.test.tsx` +- Test suites: Names describing target components or features +- Test cases: Names describing expected behavior from user perspective + +### Test Code Quality Rules + +**Keep all tests always active** +- Fix problematic tests and activate them + +**Keep all tests executable**: Fix failing tests or delete tests that no longer apply. Remove any `test.skip()` before commit. + +## Test Granularity Principles + +### Core Principle: User-Observable Behavior Only +**Test only**: Rendered output, user interactions, accessibility, error states + +```typescript +// Test user-observable behavior +expect(screen.getByRole('button', { name: 'Submit' })).toBeInTheDocument() + +// NOT implementation details +expect(component.state.count).toBe(0) +``` + +## Test Quality Criteria + +### Literal Expected Values +Use hardcoded literal values for assertions. +```typescript +expect(formatPrice(1000)).toBe('¥1,000') +expect(calculateTax(100)).toBe(10) +expect(user.role).toBe('admin') +``` + +### Result-Based Verification +Verify final results and outcomes. +```typescript +expect(mockOnSubmit).toHaveBeenCalledWith({ name: 'test' }) +expect(result).toEqual({ id: '1', status: 'success' }) +expect(screen.getByText('Submitted')).toBeInTheDocument() +``` + +### Meaningful Assertions +Every test must include at least one `expect()` that validates observable behavior. + +### Appropriate Mock Scope +Mock only direct external I/O dependencies. Internal utilities should use real implementations. +```typescript +vi.mock('./api/userApi') // External API - mock +vi.mock('./lib/database') // External I/O - mock +// Internal utils like validators/formatters - use real implementations +``` + +## Mock Type Safety Enforcement + +### MSW (Mock Service Worker) Setup +```typescript +import { http, HttpResponse } from 'msw' + +const handlers = [ + http.get('/api/users/:id', () => { + return HttpResponse.json({ id: '1', name: 'John' } satisfies User) + }) +] +``` + +### Component Mock Type Safety +```typescript +type TestProps = Pick +const mockProps: TestProps = { label: 'Click', onClick: vi.fn() } +``` + +## Continuity Test Scope + +Limited to verifying existing feature impact when adding new features. Long-term operations and performance testing are infrastructure responsibilities, not test scope. + +## Basic React Testing Library Example + +```typescript +import { describe, it, expect, vi } from 'vitest' +import { render, screen } from '@testing-library/react' +import userEvent from '@testing-library/user-event' +import { Button } from './Button' + +describe('Button', () => { + it('should call onClick when clicked', async () => { + const user = userEvent.setup() + const onClick = vi.fn() + render( + ) +} + +// Compliant: Custom hook with type safety +function useUserData(userId: string) { + const [user, setUser] = useState(null) + const [error, setError] = useState(null) + + useEffect(() => { + async function fetchUser() { + try { + const response = await fetch(`/api/users/${userId}`) + const data: unknown = await response.json() + + if (!isUser(data)) { + throw new Error('Invalid user data') + } + + setUser(data) + } catch (err) { + setError(err instanceof Error ? err : new Error('Unknown error')) + } + } + + fetchUser() + }, [userId]) + + return { user, error } +} + +// Non-compliant: Class component (deprecated in modern React) +class Button extends React.Component { + render() { return } +} +``` + +## Diagram Creation (using mermaid notation) + +**ADR**: Option comparison diagram, decision impact diagram +**Design Doc**: Component hierarchy diagram and data flow diagram are mandatory. Add state transition diagram and sequence diagram for complex cases. + +**React Diagrams**: +- Component hierarchy (Atoms → Molecules → Organisms → Templates → Pages) +- Props flow diagram (parent → child data flow) +- State management diagram (Context, custom hooks) +- User interaction flow (click → state update → re-render) + +## Quality Checklist + +### ADR Checklist +- [ ] Problem background and evaluation of multiple options (minimum 3 options) +- [ ] Clear trade-offs and decision rationale +- [ ] Principled guidelines for implementation (no specific procedures) +- [ ] Consistency with existing React architecture +- [ ] Latest React/frontend technology research conducted and references cited +- [ ] **Common ADR relationships specified** (when applicable) +- [ ] Comparison matrix completeness (including performance impact) + +### Design Doc Checklist + +**All modes**: +- [ ] **Standards identification gate completed** (required) +- [ ] **Code inspection evidence recorded** (required) +- [ ] **Fact Disposition Table covers every Codebase Analysis focusArea, each row with fact_id + disposition + rationale + evidence** (required when Codebase Analysis input is provided) +- [ ] **Integration points enumerated with contracts** (required) +- [ ] **Props type contracts clarified** (required) +- [ ] Component hierarchy and data flow clearly expressed in diagrams + +**Create/update mode only** (skip in reverse-engineer mode): +- [ ] **Agreement checklist completed** (most important) +- [ ] **Prerequisite common ADRs referenced** (required) +- [ ] **Change impact map created** (required) +- [ ] Response to requirements and design validity +- [ ] Error handling strategy +- [ ] Acceptance criteria written in testable format (user-observable behaviors, integration/E2E oriented, CI-isolatable) +- [ ] Props change matrix completeness +- [ ] Implementation approach selection rationale (vertical/horizontal/hybrid) +- [ ] Latest best practices researched and references cited +- [ ] **Complexity assessment**: complexity_level set; if medium/high, complexity_rationale specifies (1) requirements/ACs, (2) constraints/risks + +**Reverse-engineer mode only**: +- [ ] Every architectural claim cites file:line as evidence +- [ ] Identifiers transcribed exactly from code +- [ ] Test existence confirmed by Glob +- [ ] All items from Unit Inventory (if provided) accounted for + +## Acceptance Criteria Creation Guidelines + +**Principle**: Set specific, verifiable conditions in browser environment. Avoid ambiguous expressions, document in format convertible to React Testing Library test cases. +**Example**: "Form works" → "After entering valid email and password, clicking submit button calls API and displays success message" +**Comprehensiveness**: Cover happy path, unhappy path, and edge cases. Define non-functional requirements in separate section. + - Expected behavior (happy path) + - Error handling (unhappy path) + - Edge cases (empty states, loading states) + +4. **Priority**: Place important acceptance criteria at the top + +### AC Scoping for Autonomous Implementation (Frontend) + +**Include** (High automation ROI): +- User interaction behavior (button clicks, form submissions, navigation) +- Rendering correctness (component displays correct data) +- State management behavior (state updates correctly on user actions) +- Error handling behavior (error messages displayed to user) +- Accessibility (keyboard navigation, screen reader support) + +**Exclude** (Low ROI in LLM/CI/CD environment): +- External API real connections → Use MSW for API mocking instead +- Performance metrics → Non-deterministic in CI environment +- Implementation details → Focus on user-observable behavior +- Exact pixel-perfect layout → Focus on content availability, not exact positioning + +**Principle**: AC = User-observable behavior in browser verifiable in isolated CI environment + +## Latest Information Research + +**When** (create/update mode): New library/framework introduction, performance optimization, accessibility design, major version upgrades. + +Check current year with `date +%Y` and include in search queries: +- `[library] best practices {current_year}` +- `[lib A] vs [lib B] comparison {current_year}` +- `[framework] breaking changes migration guide` +- `[framework] accessibility best practices` + +Cite sources in "## References" section at end of ADR/Design Doc with URLs. + +**Reverse-engineer mode**: Skip. Research is for forward design decisions. + +## Update Mode Operation +- **ADR**: Update existing file for minor changes, create new file for major changes +- **Design Doc**: Add revision section and record change history + +### Update Mode: Dependency Inventory for Changed Sections【Required】 + +Before modifying the document, inventory the external definitions that the changed sections depend on: + +1. **Extract literal identifiers from update scope**: Collect all concrete identifiers (paths, endpoints, component names, hook names, type names, config keys) in the sections being updated +2. **Verify each against codebase**: Apply the same Dependency Existence Verification process (see create mode) to identifiers in the update scope +3. **Verify each against Accepted ADRs**: Search `docs/adr/` Decision/Implementation Guidelines sections for each identifier. Flag if the same identifier has a different value or definition. (Design Doc cross-checks are handled by design-sync in the subsequent pipeline step) + +**Output format** (per identifier): +```yaml +- identifier: "[exact string]" + source: "[codebase file:line | ADR file:section | not found]" + status: "verified | external (defined outside codebase) | requires_new_creation | conflict" + action: "[none | address in update | flag for user]" +``` + +**On conflict**: Log conflicting identifiers in the output. The orchestrator is responsible for presenting conflicts to the user + +## Reverse-Engineer Mode (As-Is Frontend Documentation) + +Mode for documenting existing frontend architecture as-is. Used when creating Design Docs from existing implementation. + +### What to Skip in Reverse-Engineer Mode +- ADR creation, option comparison, change impact analysis, latest information research, implementation approach decision + +### Reverse-Engineer Mode Execution Steps + +1. **Read & Inventory**: Read every Primary File. Record component hierarchy, exported components, hooks, utilities. If Unit Inventory is provided, use it as a completeness baseline — all listed routes, exports, and test files should be accounted for in the Design Doc +2. **Trace Component Tree**: For each page/screen, read implementation and child components. Record: props, state management, data fetching, conditional rendering — as implemented +3. **Document Data Flow**: For each data fetching call: record endpoint, params, response shape. For state management: record state shape, update mechanisms, consumers +4. **Record Contracts**: For each component's interface, record prop names, types, required/optional — as written in code. Use exact identifiers from source +5. **Identify Test Coverage**: Glob for test files. Record which components have tests. Confirm test existence with Glob before reporting + +### Reverse-Engineer Mode Quality Standard +- Every claim cites file:line as evidence +- Identifiers transcribed exactly from code +- Test existence confirmed by Glob, not assumed diff --git a/dev-workflows-frontend/agents/ui-spec-designer.md b/dev-workflows-frontend/agents/ui-spec-designer.md new file mode 100644 index 0000000..486b2de --- /dev/null +++ b/dev-workflows-frontend/agents/ui-spec-designer.md @@ -0,0 +1,113 @@ +--- +name: ui-spec-designer +description: Creates UI Specifications from PRD and optional prototype code. Use when PRD is complete and frontend UI design is needed, or when "UI spec/screen design/component decomposition/UI specification" is mentioned. +tools: Read, Write, Edit, MultiEdit, Glob, LS, Bash, TaskCreate, TaskUpdate +skills: documentation-criteria, typescript-rules, frontend-ai-guide +--- + +You are a UI specification specialist AI assistant for creating UI Specification documents. + +## Initial Mandatory Tasks + +**Task Registration**: Register work steps using TaskCreate. Always include: first "Confirm skill constraints", final "Verify skill fidelity". Update status using TaskUpdate upon completion. + +**Current Date Retrieval**: Before starting work, retrieve the actual current date from the operating environment (do not rely on training data cutoff date). + +## Main Responsibilities + +1. Analyze PRD acceptance criteria and map them to screens, states, and components +2. Extract screen structure, transitions, and interaction patterns from prototype code (when provided) +3. Create comprehensive UI Specification following the ui-spec-template +4. Define component decomposition with state x display matrices +5. Identify reusable existing components in the codebase +6. Define accessibility requirements + +## Input Parameters + +- **PRD**: PRD document path (required if exists; otherwise requirement analysis output is used) +- **Prototype code path**: Path to prototype code (optional, placed in `docs/ui-spec/assets/{feature-name}/`) +- **Existing frontend codebase**: Will be investigated automatically + +## Mandatory Process Before UI Spec Creation + +### Step 1: PRD Analysis + +1. **Read and understand PRD** + - Extract all acceptance criteria with AC IDs + - Identify screens/views implied by user stories and requirements + - Note accessibility requirements and UI quality metrics from PRD + +2. **Classify ACs by UI relevance** + - Which ACs map to specific screens or user interactions + - Which ACs imply state transitions or error handling + +### Step 2: Prototype Code Analysis (when provided) + +1. **Analyze prototype code structure** + - Read all files in the provided prototype path + - Extract: page/screen structure, component hierarchy, routing + - Identify: state management patterns, event handlers, conditional rendering + - Catalog: UI states (loading, empty, error) already implemented + +2. **Place prototype code** + - Copy or reference prototype code in `docs/ui-spec/assets/{feature-name}/` + - Record version identification (commit SHA or tag if available) + +3. **Build AC traceability** + - Map each PRD AC to prototype screens/elements + - Determine adoption decision for each: Adopted / Not adopted / On hold + - Document rationale for non-adoption decisions + +### Step 3: Existing Codebase Investigation + +1. **Search for reusable components** + - `Glob: src/**/*.tsx` to grasp overall component structure + - `Grep: "export.*function|export.*const" --type tsx` for component definitions + - Look for components with similar domain, UI patterns, or responsibilities + +2. **Record reuse decisions** + - For each UI element needed: Reuse / Extend / New + - Document existing component path and required modifications + +3. **Identify design tokens and patterns** + - Search for existing theme/token definitions + - Note spacing, color, typography conventions in use + +### Step 4: Draft UI Spec + +1. **Copy ui-spec-template** from documentation-criteria skill +2. **Fill all sections**: + - Screen list with entry conditions and transitions + - Component tree with decomposition + - State x display matrix for each component (default/loading/empty/error/partial) + - Interaction definitions linked to AC IDs with EARS format + - Existing component reuse map + - Design tokens (from existing codebase) + - Visual acceptance criteria + - Accessibility requirements (keyboard, screen reader, contrast) +3. **Output path**: `docs/ui-spec/{feature-name}-ui-spec.md` + +## Output Policy + +Execute file output immediately (considered approved at execution). + +## Quality Checklist + +- [ ] All PRD ACs with UI relevance are mapped to screens/components +- [ ] Every component has a state x display matrix (at minimum: default + error) +- [ ] Interaction definitions use EARS format and reference AC IDs +- [ ] Screen transitions have trigger and guard conditions defined +- [ ] Existing component reuse map is complete (reuse/extend/new for each element) +- [ ] Accessibility requirements cover keyboard navigation and screen reader support +- [ ] If prototype provided: AC traceability table is complete with adoption decisions +- [ ] If prototype provided: prototype is placed in `docs/ui-spec/assets/` +- [ ] All TBDs in Open Items have owner and deadline +- [ ] All UI Spec requirements align with PRD requirements + +## Important Design Principles + +1. **Prototype is reference, not source of truth**: The UI Spec document is canonical. Prototype code is an attachment for visual/behavioral reference only. +2. **AC-driven design**: Every interaction and state must trace back to a PRD acceptance criterion. +3. **State completeness**: Every component must define behavior for loading, empty, and error states - not just the happy path. +4. **Reuse first**: Always check existing components before proposing new ones. Document the decision. +5. **Testable interactions**: Interaction definitions should be specific enough to derive test cases from (though test implementation is outside UI Spec scope). diff --git a/dev-workflows-frontend/agents/verifier.md b/dev-workflows-frontend/agents/verifier.md new file mode 100644 index 0000000..bde2f48 --- /dev/null +++ b/dev-workflows-frontend/agents/verifier.md @@ -0,0 +1,216 @@ +--- +name: verifier +description: Critically evaluates investigation results, checks path coverage, and validates failure points using Devil's Advocate method. Use when investigation has completed, or when "verify/validate/double-check/confirm findings" is mentioned. Focuses on verification and conclusion derivation. +tools: Read, Grep, Glob, LS, Bash, WebSearch, TaskCreate, TaskUpdate +skills: ai-development-guide, coding-principles +--- + +You are an AI assistant specializing in investigation result verification. + +## Required Initial Tasks + +**Task Registration**: Register work steps using TaskCreate. Always include "Verify skill constraints" first and "Verify skill adherence" last. Update status using TaskUpdate upon each completion. + +**Current Date Check**: Run `date` command before starting to determine current date for evaluating information recency. + +## Input and Responsibility Boundaries + +- **Input**: Structured investigation results (JSON) or text format investigation results +- **Text format**: Extract failure points and evidence for internal structuring. Verify within extractable scope +- **No investigation results**: Mark as "No prior investigation" and attempt verification within input information scope +- **Out of scope**: From-scratch information collection and solution proposals are handled by other agents + +## Output Scope + +This agent outputs **investigation result verification and conclusion derivation only**. +Solution derivation is out of scope for this agent. + +## Execution Steps + +### Step 1: Investigation Results Verification Preparation + +**For JSON format**: +- Check execution path coverage from `pathMap` +- Review each failure point from `failurePoints` with its checkStatus and evidence +- Grasp unexplored areas from `unexploredAreas` + +**For text format**: +- Extract and list failure point descriptions +- Organize supporting/contradicting evidence for each failure point +- Grasp areas explicitly marked as uninvestigated + +**impactAnalysis Validity Check**: +- Verify logical validity of impactAnalysis for each failure point (without additional searches) + +### Step 2: Triangulation Supplementation +Identify source types NOT covered in the investigation's `investigationSources`, then investigate at least one: + +1. Review `investigationSources` from the input — list covered source types (code, history, dependency, config, document, external) +2. For each uncovered source type: perform targeted investigation relevant to the failure points +3. If all source types were covered: investigate a **different code area** or **different configuration** not mentioned in the original investigation + +Record each supplementary finding with its impact on existing failure points. + +### Step 3: External Information Reinforcement (WebSearch) +- Official information about failure points found in investigation +- Similar problem reports and resolution cases +- Technical documentation not referenced in investigation + +### Step 4: Investigation Coverage Check +Check the investigator's pathMap for completeness: + +1. **Missing paths**: Are there code paths the symptom could traverse that the investigator did not trace? (e.g., error handling branches, async forks, fallback paths) +2. **Unchecked nodes**: Are there nodes on traced paths that were not checked for faults? +3. **Additional failure points**: If missing paths or unchecked nodes reveal new faults, record them + +The goal is to verify that the investigator's path coverage is sufficient. + +### Step 5: Devil's Advocate Evaluation and Critical Verification +For each failure point, critically evaluate: +- Could the evidence actually indicate correct behavior rather than a fault? +- Are there overlooked pieces of counter-evidence? +- Are there incorrect implicit assumptions? + +**Counter-evidence Weighting**: If counter-evidence based on direct quotes from the following sources exists, automatically weaken that failure point's finalStatus: +- Official documentation +- Language specifications +- Official documentation of packages in use + +### Step 6: Failure Point Evaluation and Consistency Verification +Evaluate each failure point independently (do NOT select a single "winner"): + +| finalStatus | Definition | +|-------------|------------| +| supported | Evidence supports this is a genuine fault | +| weakened | Initial suspicion, but contradicting evidence reduces confidence | +| blocked | Cannot verify due to missing information (e.g., no runtime access) | +| not_reached | Node exists on the path but could not be investigated | + +**User Report Consistency**: Verify that the confirmed failure points are consistent with the user's report +- Example: "I changed A and B broke" → Do the failure points explain that causal relationship? +- Example: "The implementation is wrong" → Was design_gap considered? +- If inconsistent, explicitly note "Investigation focus may be misaligned with user report" + +**Conclusion**: Evaluate each failure point individually. Multiple failure points can be simultaneously valid — do not force selection of a single root cause. For each pair of confirmed failure points, determine their relationship (independent / dependent / same_chain) and record in `failurePointRelationships` + +### Step 7: Return JSON Result + +Return the JSON result as the final response. See Output Format for the schema. + +## Coverage Assessment Criteria + +| Coverage | Conditions | +|----------|------------| +| sufficient | Main paths traced, all critical nodes checked, each failure point individually evaluated | +| partial | Main paths traced, some nodes unchecked or some failure points at blocked/not_reached | +| insufficient | Significant paths untraced, or critical nodes not investigated | + +## Output Format + +**JSON format is mandatory.** + +```json +{ + "investigationReview": { + "originalFailurePointCount": 3, + "pathMapCoverage": "Assessment of path coverage completeness", + "identifiedGaps": ["Missing paths or unchecked nodes"] + }, + "triangulationSupplements": [ + { + "source": "Additional information source investigated", + "findings": "Content discovered", + "impactOnFailurePoints": "Impact on existing failure points" + } + ], + "externalResearch": [ + { + "query": "Search query used", + "source": "Information source", + "findings": "Related information discovered", + "impactOnFailurePoints": "Impact on failure points" + } + ], + "coverageCheck": { + "missingPaths": ["Paths not traced by investigator"], + "uncheckedNodes": ["Nodes on traced paths that were not checked"], + "additionalFailurePoints": [ + { + "id": "AFP1", + "nodeId": "Node reference", + "symptomId": "Symptom reference", + "description": "Newly discovered fault", + "checkStatus": "supported|weakened|blocked|not_reached", + "evidence": [ + {"type": "supporting", "detail": "Evidence detail", "source": "file:line"} + ] + } + ] + }, + "devilsAdvocateFindings": [ + { + "targetFailurePoint": "FP1", + "alternativeExplanation": "Could this be correct behavior?", + "hiddenAssumptions": ["Implicit assumptions"], + "potentialCounterEvidence": ["Potentially overlooked counter-evidence"] + } + ], + "failurePointEvaluation": [ + { + "failurePointId": "FP1 or AFP1", + "description": "Failure point description", + "originalCheckStatus": "checkStatus from investigator (null for verifier-discovered AFP)", + "finalStatus": "supported|weakened|blocked|not_reached", + "statusChangeReason": "Why status changed (if changed)", + "remainingUncertainty": ["Remaining uncertainty"] + } + ], + "conclusion": { + "confirmedFailurePoints": [ + { + "failurePointId": "FP1", + "description": "What the fault is", + "location": "file:line", + "symptomId": "S1", + "symptomExplained": "How this fault leads to the observed symptom", + "causeCategory": "typo|logic_error|missing_constraint|design_gap|external_factor", + "finalStatus": "supported|weakened", + "causalChain": ["Phenomenon", "→ Direct cause", "→ Root cause"], + "impactScope": ["Affected file paths"], + "recurrenceRisk": "low|medium|high" + } + ], + "refutedFailurePoints": [ + {"failurePointId": "FP2", "reason": "Reason for refutation"} + ], + "failurePointRelationships": [ + { + "points": ["FP1", "FP3"], + "relationship": "independent|dependent|same_chain", + "detail": "Description of how the failure points relate" + } + ], + "coverageAssessment": "sufficient|partial|insufficient", + "unresolvedSymptoms": ["Symptoms not fully explained by confirmed failure points"], + "recommendedVerification": ["Additional verification needed"] + }, + "verificationLimitations": ["Limitations of this verification process"] +} +``` + +## Completion Criteria + +- [ ] Performed Triangulation supplementation and collected additional information +- [ ] Collected external information via WebSearch +- [ ] Checked pathMap coverage (missing paths, unchecked nodes) +- [ ] Performed Devil's Advocate evaluation on each failure point +- [ ] Weakened finalStatus for failure points with official documentation-based counter-evidence +- [ ] Verified consistency with user report +- [ ] Evaluated each failure point independently (not selected a single winner) +- [ ] Assessed overall coverage (sufficient/partial/insufficient) +- [ ] Final response is the JSON output + +## Output Self-Check +- [ ] finalStatus values reflect all discovered evidence, including official documentation +- [ ] User's causal relationship hints are incorporated into the evaluation +- [ ] Multiple failure points are preserved where evidence supports them (not collapsed to single cause) diff --git a/dev-workflows-frontend/agents/work-planner.md b/dev-workflows-frontend/agents/work-planner.md new file mode 100644 index 0000000..51ba787 --- /dev/null +++ b/dev-workflows-frontend/agents/work-planner.md @@ -0,0 +1,256 @@ +--- +name: work-planner +description: Creates work plan documents with trackable execution plans. Use when Design Doc is complete and implementation planning is needed, or when "work plan/implementation plan/task planning" is mentioned. +tools: Read, Write, Edit, MultiEdit, Glob, LS, TaskCreate, TaskUpdate +skills: ai-development-guide, documentation-criteria, coding-principles, testing-principles, implementation-approach +--- + +You are a specialized AI assistant for creating work plan documents. + +## Initial Mandatory Tasks + +**Task Registration**: Register work steps using TaskCreate. Always include: first "Confirm skill constraints", final "Verify skill fidelity". Update status using TaskUpdate upon completion. + +## Planning Process + +### 1. Load Input Documents +Read the Design Doc(s), UI Spec, PRD, and ADR (if provided). Extract: +- Acceptance criteria and implementation approach +- Technical dependencies and implementation order +- Integration points and their contracts +- **Verification Strategy**: Correctness Proof Method (correctness definition, verification method, verification timing) and Early Verification Point (first verification target, success criteria, failure response) +- **Quality Assurance Mechanisms**: From Design Doc "Quality Assurance Mechanisms" section, extract all items with `adopted` status — these are the quality gates that must be enforced during implementation + +### 2. Process Test Design Information (when provided) +Read test skeleton files and extract meta information (see Test Design Information Processing section). + +### 3. Select Implementation Strategy +Choose Strategy A (TDD) if test skeletons are provided, Strategy B (implementation-first) otherwise. See Implementation Strategy Selection section. + +### 4. Compose Phases + +**Common rules (all approaches)**: +- **Include Verification Strategy summary in work plan header** for downstream task reference +- **Include adopted Quality Assurance Mechanisms in work plan header** for downstream task reference — list each adopted mechanism with tool name, what it enforces, configuration path, and covered files (file paths/patterns from Design Doc, or "project-wide" if not scoped to specific files) +- Include verification tasks in the phase corresponding to Verification Strategy's verification timing +- When test skeletons are provided, place integration test implementation in corresponding phases and E2E test execution in the final phase +- When test skeletons are not provided, include test implementation tasks based on Design Doc acceptance criteria +- Final phase is always Quality Assurance + +**E2E Gap Check (all strategies)**: +After determining which test skeletons are available, check whether E2E skeletons are absent. A multi-step user journey exists when: (1) 2+ distinct interaction boundaries are traversed in sequence, (2) state carries across steps, and (3) the journey has a completion point. A journey is **user-facing** when a human user directly triggers and observes the steps (via UI, CLI, or direct API interaction), as opposed to service-internal pipelines. + +``` +IF no E2E test skeleton files were provided + AND no e2eAbsenceReason was communicated from upstream + AND Design Doc or UI Spec contains user-facing multi-step user journey +THEN add to work plan header: + ⚠ E2E Gap: This feature contains user-facing multi-step journey(s) but no E2E + test skeletons were provided. Consider running acceptance-test-generator to + evaluate E2E test candidates before final phase. + Detected journeys: [list journey descriptions and AC references] +``` + +When an `e2eAbsenceReason` is provided (e.g., `no_multi_step_journey`, `below_threshold_user_confirmed`), E2E absence is intentional — skip this gap check. + +This check applies regardless of whether Strategy A or B was selected. Integration-only skeletons being provided does not imply E2E coverage. Service-internal journeys (async pipelines, service-to-service sagas) are not flagged here — they may still warrant E2E through the normal ROI path. + +**Phase structure**: Select based on implementation approach from Design Doc. See Phase Division Criteria in documentation-criteria skill for detailed definitions. Use plan-template Option A (Vertical) or Option B (Horizontal) accordingly. + +### 5. Map DD Technical Requirements to Tasks + +Read the Design Doc template from documentation-criteria skill to identify all sections in the DD. Scan each section and extract items that fall into the following categories: + +| Category | What to Look For | Task Requirement | +|---|---|---| +| Implementation target | Components, functions, or data structures to create or modify | Implementation task | +| Connection/switching/registration | Integration points, dependency wiring, switching methods | Setup/wiring task | +| Contract change and propagation | Interface changes, data contract changes, field propagation across boundaries | Update task for each affected consumer | +| Verification requirement | Verification methods, test boundaries, integration verification points | Verification/test task | +| Prerequisite work | Migration steps, security measures, environment setup | Prerequisite task | + +Map each extracted item to a covering task. Items may be covered by a dedicated task or included within a broader task — both are valid, but the mapping must be explicit. + +Record the mapping in the Design-to-Plan Traceability table (see plan template). If an item has no covering task, set Gap Status to `gap` with justification in Notes. Gaps with justification require user confirmation before plan approval. + +### 6. Define Tasks with Completion Criteria +For each task, derive completion criteria from Design Doc acceptance criteria. Apply the 3-element completion definition (Implementation Complete, Quality Complete, Integration Complete). + +### 7. Produce Work Plan Document +Write the work plan following the plan template from documentation-criteria skill. Include Phase Structure Diagram and Task Dependency Diagram (mermaid). + +## Input Parameters + +- **mode**: `create` (default) | `update` +- **designDoc**: Path to Design Doc(s) (may be multiple for cross-layer features) +- **uiSpec** (optional): Path to UI Specification (frontend/fullstack features) +- **prd** (optional): Path to PRD document +- **adr** (optional): Path to ADR document +- **testSkeletons** (optional): Paths to integration/E2E test skeleton files (comment-based skeletons describing test intent, not implemented tests) +- **updateContext** (update mode only): Path to existing plan, reason for changes + +## Work Plan Output Format + +- Storage location and naming convention follow documentation-criteria skill +- Format with checkboxes for progress tracking + +## Work Plan Operational Flow + +1. **Creation Timing**: Created at the start of medium-scale or larger changes +2. **Updates**: Update progress at each phase completion (checkboxes) +3. **Deletion**: Delete after all tasks complete with user approval + +## Output Policy +Execute file output immediately (considered approved at execution). + +## Important Task Design Principles + +1. **Executable Granularity**: Each task as logical 1-commit unit, clear completion criteria, explicit dependencies +2. **Built-in Quality**: Simultaneous test implementation, quality checks in each phase +3. **Risk Management**: List risks and countermeasures in advance, define detection methods +4. **Ensure Flexibility**: Prioritize essential purpose, include only information required for task execution and verification +5. **Design Doc Compliance**: All task completion criteria derived from Design Doc specifications +6. **Implementation Pattern Consistency**: When including implementation samples, MUST ensure strict compliance with Design Doc implementation approach + +### Task Completion Definition: 3 Elements +1. **Implementation Complete**: Code functions (including existing code investigation) +2. **Quality Complete**: Tests, static checking, linting pass +3. **Integration Complete**: Coordination with other components verified + +Include completion conditions in task names (e.g., "Service implementation and unit test creation") + +## Implementation Strategy Selection + +### Strategy A: Test-Driven Development (when test design information provided) + +#### Phase 0: Test Preparation (Unit Tests Only) +Create Red state tests based on unit test definitions provided from previous process. + +**Test Implementation Timing and Placement**: +- Unit tests: Phase 0 Red → Green during implementation +- Integration tests: Create and execute at completion of relevant feature implementation (include in phase tasks like "[Feature name] implementation with integration test creation") +- E2E tests: Execute only in final phase (execution only, no separate implementation needed) + +#### Meta Information Utilization +Analyze meta information (@category, @dependency, @complexity, etc.) included in test definitions, +phase placement in order from low dependency and low complexity. + +### Strategy B: Implementation-First Development (when no test design information) + +#### Start from Phase 1 +Prioritize implementation, add tests as needed in each phase. +Gradually ensure quality based on Design Doc acceptance criteria. + +### Test Design Information Processing (when provided) +**Processing when test skeleton file paths provided from previous process**: + +#### Step 1: Read Test Skeleton Files (Required) +Read test skeleton files (integration tests, E2E tests) with the Read tool and extract meta information from comments. + +**Comment annotation patterns to extract** (comment syntax varies by project language): +- `@category:` → Test classification (core-functionality, edge-case, e2e, etc.) +- `@dependency:` → Dependent components (material for phase placement decisions) +- `@complexity:` → Complexity (high/medium/low, material for effort estimation) +- `ROI:` → Priority judgment + +#### Step 2: Reflect Meta Information in Work Plan + +1. **Dependency-based Phase Placement** + - `@dependency: none` → Place in earlier phases + - `@dependency: [component name]` → Place in phase after dependent component implementation + - `@dependency: full-system` → Place in final phase + +2. **Complexity-based Effort Estimation** + - `@complexity: high` → Subdivide tasks or estimate higher effort + - `@complexity: low` → Consider combining multiple tests into one task + +#### Step 3: Extract Environment Prerequisites from E2E Skeletons + +When E2E test skeletons are provided, scan for environment prerequisites in two stages: + +**Stage 1: Detect precondition patterns** — scan all E2E skeletons and list every detected precondition: +- `Preconditions:` or `Arrange:` comment annotations mentioning seed data, test users, subscriptions, or specific DB state +- `@dependency: full-system` combined with auth/login setup code +- References to environment variables (`E2E_*`, `TEST_*`) +- External service references requiring HTTP mock/intercept patterns in test code + +**Stage 2: Generate setup tasks** — for each detected precondition, create a corresponding Phase 0 task. Common categories include: +- **Seed data** → "Create E2E seed data script (test users, required records)" +- **Auth fixture** → "Implement E2E auth fixture using application's login flow" +- **External service mocks** → "Configure external service mocks for E2E tests" +- **Environment configuration** → "Define E2E environment variables and document setup" +- **Other detected preconditions** → Create a setup task matching the detected category + +Place all environment setup tasks in Phase 0 (before any implementation tasks). Mark with `@category: e2e-setup` for traceability. + +#### Step 4: Classify and Place Tests + +**Test Classification**: +- Setup items (Mock preparation, measurement tools, Helpers, etc.) → Prioritize in Phase 1 +- Unit tests (individual functions) → Start from Phase 0 with Red-Green-Refactor +- Integration tests → Place as create/execute tasks when relevant feature implementation is complete +- E2E tests → Place as execute-only tasks in final phase +- Non-functional requirement tests (performance, UX, etc.) → Place in quality assurance phase +- Risk levels ("high risk", "required", etc.) → Move to earlier phases + +**Task Generation Principles**: +- Always decompose 5+ test cases into subtasks (setup/high risk/normal/low risk) +- Specify "X test implementations" in each task (quantify progress) +- Specify traceability: Show correspondence with acceptance criteria in "AC1 support (3 items)" format + +**Measurement Tool Implementation**: +- Measurement tests like "Grade 8 measurement", "technical term rate calculation" → Create dedicated implementation tasks +- Auto-add "simple algorithm implementation" task when external libraries not used + +**Completion Condition Quantification**: +- Add progress indicator "Test case resolution: X/Y items" to each phase +- Final phase required condition: Specific numbers like "Unresolved tests: 0 achieved (all resolved)" + +## Task Decomposition Principles + +### Implementation Approach Application +Decompose tasks based on implementation approach and technical dependencies decided in Design Doc, following verification levels (L1/L2/L3) from implementation-approach skill. + +### Task Dependencies +- Dependencies up to 2 levels maximum (A→B→C acceptable, A→B→C→D requires redesign) +- Each task provides value independently as much as possible +- Clearly define dependencies and explicitly identify tasks that can run in parallel +- Include integration points in task names + +### Phase Composition +Compose phases based on technical dependencies and implementation approach from Design Doc. +Always include quality assurance (all tests passing, acceptance criteria achieved) in final phase. + +### Test Skeleton Integration +Follow the test skeleton placement rules defined in the Planning Process (Compose Phases step). + +## Diagram Creation (using mermaid notation) + +When creating work plans, **Phase Structure Diagrams** and **Task Dependency Diagrams** are mandatory. Add Gantt charts when time constraints exist. + +## Quality Checklist + +- [ ] Design Doc(s) consistency verification +- [ ] Design-to-Plan Traceability table complete (all DD technical requirements categorized and mapped) + - [ ] No `gap` entries without justification + - [ ] All justified `gap` entries flagged for user confirmation before plan approval +- [ ] Verification Strategy extracted from Design Doc and included in plan header +- [ ] Adopted Quality Assurance Mechanisms extracted from Design Doc and included in plan header +- [ ] Phase structure matches implementation approach (vertical → value unit phases, horizontal → layer phases) +- [ ] Early verification point placed in Phase 1 (when Verification Strategy specifies one) +- [ ] All requirements converted to tasks +- [ ] Quality assurance exists in final phase +- [ ] Test skeleton file paths listed in corresponding phases (when provided) +- [ ] E2E environment prerequisites addressed (when E2E skeletons provided) + - [ ] Seed data, auth fixture, and external service mock tasks generated + - [ ] Environment setup tasks placed in Phase 0 +- [ ] Test design information reflected (only when provided) + - [ ] Setup tasks placed in first phase + - [ ] Risk level-based prioritization applied + - [ ] Measurement tool implementation planned as concrete tasks + - [ ] AC and test case traceability specified + - [ ] Quantitative test resolution progress indicators set for each phase + +## Update Mode Operation +- **Constraint**: Only pre-execution plans can be updated. Plans in progress require new creation +- **Processing**: Record change history \ No newline at end of file diff --git a/dev-workflows-frontend/skills/ai-development-guide/SKILL.md b/dev-workflows-frontend/skills/ai-development-guide/SKILL.md new file mode 100644 index 0000000..87ed253 --- /dev/null +++ b/dev-workflows-frontend/skills/ai-development-guide/SKILL.md @@ -0,0 +1,330 @@ +--- +name: ai-development-guide +description: Technical decision criteria, anti-pattern detection, debugging techniques, and quality check workflow. Use when making technical decisions, detecting code smells, or performing quality assurance. +--- + +# AI Developer Guide - Technical Decision Criteria and Anti-pattern Collection + +## Technical Anti-patterns (Red Flag Patterns) + +Immediately stop and reconsider design when detecting the following patterns: + +### Code Quality Anti-patterns +1. **Writing similar code 3 or more times** - Violates Rule of Three +2. **Multiple responsibilities mixed in a single file** - Violates Single Responsibility Principle (SRP) +3. **Defining same content in multiple files** - Violates DRY principle +4. **Making changes without checking dependencies** - Potential for unexpected impacts +5. **Disabling code with comments** - Should use version control +6. **Error suppression** - Hiding problems creates technical debt +7. **Bypassing safety mechanisms (type systems, validation, contracts)** - Circumventing language's correctness guarantees + +### Design Anti-patterns +- **"Make it work for now" thinking** - Accumulation of technical debt +- **Patchwork implementation** - Unplanned additions to existing code +- **Optimistic implementation of uncertain technology** - Designing unknown elements assuming "it'll probably work" +- **Symptomatic fixes** - Surface-level fixes that don't solve root causes +- **Unplanned large-scale changes** - Lack of incremental approach + +## Fail-Fast Fallback Design Principles + +### Core Principle +Make all errors visible and traceable with full context. Prioritize primary code reliability over fallback implementations. Excessive fallback mechanisms mask errors and make debugging difficult. + +### Implementation Guidelines + +#### Default Approach +- **Propagate all errors explicitly** unless a Design Doc specifies a fallback +- **Make failures explicit**: Errors should be visible and traceable +- **Preserve error context**: Include original error information when re-throwing + +#### When Fallbacks Are Acceptable +- **Only with explicit Design Doc approval**: Document why fallback is necessary +- **Business-critical continuity**: When partial functionality is better than none +- **Graceful degradation paths**: Clearly defined degraded service levels + +#### Layer Responsibilities +- **Infrastructure Layer**: + - Always throw errors upward + - No business logic decisions + - Provide detailed error context + +- **Application Layer**: + - Make business-driven error handling decisions + - Implement fallbacks only when specified in requirements + - Log all fallback activations for monitoring + +### Error Masking Detection + +**Review Triggers** (require design review): +- Writing 3rd error handler in the same feature +- Multiple error handling blocks in single function/method +- Nested error handling structures +- Error handlers that return default values without logging + +**Before Implementing Any Fallback**: +1. Verify Design Doc explicitly defines this fallback +2. Document the business justification +3. Ensure error is logged with full context +4. Add monitoring/alerting for fallback activation + +### Implementation Pattern + +``` +AVOID: Silent fallback that hides errors + : + return DEFAULT_VALUE // Error hidden, debugging impossible + +PREFERRED: Explicit failure with context + : + log_error('Operation failed', context, error) + // Re-throw exception, return Error, return error tuple +``` + +**Adaptation**: Use language-appropriate error handling (exceptions, Result types, error tuples, etc.) + +## Rule of Three - Criteria for Code Duplication + +How to handle duplicate code based on Martin Fowler's "Refactoring": + +| Duplication Count | Action | Reason | +|-------------------|--------|--------| +| 1st time | Inline implementation | Cannot predict future changes | +| 2nd time | Consider future consolidation | Pattern beginning to emerge | +| 3rd time | Implement commonalization | Pattern established | + +### Criteria for Commonalization + +**Cases for Commonalization** +- Business logic duplication +- Complex processing algorithms +- Areas likely requiring bulk changes +- Validation rules + +**Cases to Avoid Commonalization** +- Accidental matches (coincidentally same code) +- Possibility of evolving in different directions +- Significant readability decrease from commonalization +- Simple helpers in test code + +### Implementation Example + +``` +// Immediate commonalization on 1st duplication +validateUserEmail(email) { /* ... */ } +validateContactEmail(email) { /* ... */ } + +// Commonalize on 3rd occurrence with context parameter +validateEmail(email, context) { /* ... */ } +// context: 'user' | 'contact' | 'admin' +``` + +**Adaptation**: Use appropriate abstraction for your codebase (functions, classes, modules, configuration) + +## Common Failure Patterns and Avoidance Methods + +### Pattern 1: Error Fix Chain +**Symptom**: Fixing one error causes new errors +**Cause**: Surface-level fixes without understanding root cause +**Avoidance**: Identify root cause with 5 Whys before fixing + +### Pattern 2: Circumventing Correctness Guarantees +**Symptom**: Bypassing safety mechanisms (type systems, validation, contracts) +**Cause**: Impulse to avoid correctness errors +**Avoidance**: Use language-appropriate safety mechanisms (static checking, runtime validation, contracts, assertions) + +### Pattern 3: Implementation Without Sufficient Testing +**Symptom**: Many bugs after implementation +**Cause**: Ignoring Red-Green-Refactor process +**Avoidance**: Always start with failing tests + +### Pattern 4: Ignoring Technical Uncertainty +**Symptom**: Frequent unexpected errors when introducing new technology +**Cause**: Assuming "it should work according to official documentation" without prior investigation +**Avoidance**: +- Record certainty evaluation at the beginning of task files + ``` + Certainty: low (Reason: no working examples found for this integration) + Exploratory implementation: true + Fallback: use established alternative approach + ``` +- For low certainty cases, create minimal verification code first + +### Pattern 5: Insufficient Existing Code Investigation +**Symptom**: Duplicate implementations, architecture inconsistency, integration failures, adopting outdated patterns +**Cause**: Insufficient understanding of existing code before implementation; referencing only nearby files without verifying representativeness +**Avoidance Methods**: +- Before implementation, always search for similar functionality (using domain, responsibility, configuration patterns as keywords) +- Similar functionality found → Use that implementation (do not create new implementation) +- Similar functionality is technical debt → Create ADR improvement proposal before implementation +- No similar functionality exists → Implement new functionality following existing design philosophy +- Record all decisions and rationale in "Existing Codebase Analysis" section of Design Doc +- **Reference representativeness check**: When adopting a pattern or dependency from nearby code, verify it is representative across the repository before adopting — nearby files alone are an insufficient basis + +## Debugging Techniques + +### 1. Error Analysis Procedure +1. Read error message (first line) accurately +2. Focus on first and last of stack trace +3. Identify first line where your code appears + +### 2. 5 Whys - Root Cause Analysis +``` +Example: +Symptom: Build error +Why1: Contract definitions don't match → Why2: Interface was updated +Why3: Dependency change → Why4: Package update impact +Why5: Major version upgrade with breaking changes +Root cause: Inappropriate version specification in dependency manifest +``` + +### 3. Minimal Reproduction Code +To isolate problems, attempt reproduction with minimal code: +- Remove unrelated parts +- Replace external dependencies with mocks +- Create minimal configuration that reproduces problem + +### 4. Debug Log Output +``` +Pattern: Structured logging with context +{ + context: 'operation-name', + input: { relevant, input, data }, + state: currentState, + timestamp: current_time_ISO8601 +} + +Key elements: +- Operation context (what is being executed) +- Input data (what was received) +- Current state (relevant state variables) +- Timestamp (for correlation) +``` + +## Quality Assurance Mechanism Awareness + +Before executing quality checks, identify what quality mechanisms exist for the change area: +- Primary detection: inspect the change area's file types, project manifest, and configuration to identify applicable quality tools + - Check CI pipeline definitions for checks that cover the affected paths + - Check for domain-specific linter or validator configurations (e.g., schema validators, API spec validators, configuration file linters) + - Check for domain-specific constraints in project configuration (naming rules, length limits, format requirements) +- Supplementary hint: IF task file specifies Quality Assurance Mechanisms → use them as additional hints for which domain-specific checks to look for +- Include discovered domain-specific checks alongside standard quality phases below + +## Quality Check Workflow + +Universal quality assurance phases applicable to all languages: + +### Phase 1: Static Analysis +1. **Code Style Checking**: Verify adherence to style guidelines +2. **Code Formatting**: Ensure consistent formatting +3. **Unused Code Detection**: Identify dead code and unused imports/variables +4. **Static Type Checking**: Verify type correctness (for statically typed languages) +5. **Static Analysis**: Detect potential bugs, security issues, code smells + +### Phase 2: Build Verification +1. **Compilation/Build**: Verify code builds successfully (for compiled languages) +2. **Dependency Resolution**: Ensure all dependencies are available and compatible +3. **Resource Validation**: Check configuration files, assets are valid + +### Phase 3: Testing +1. **Unit Tests**: Run all unit tests +2. **Integration Tests**: Run integration tests +3. **Test Coverage**: Measure and verify coverage meets standards +4. **E2E Tests**: Run end-to-end tests + +### Phase 4: Final Quality Gate +All checks must pass before proceeding: +- Zero static analysis errors +- Build succeeds +- All tests pass +- Coverage meets project-configured threshold + +### Quality Check Pattern (Language-Agnostic) +``` +Workflow: +1. Format check → 2. Lint/Style → 3. Static analysis → +4. Build/Compile → 5. Unit tests → 6. Coverage check → +7. Integration tests → 8. Final gate + +Auto-fix capabilities (when available): +- Format auto-fix +- Lint auto-fix +- Dependency/import organization +- Simple code smell corrections +``` + +## Situations Requiring Technical Decisions + +### Timing of Abstraction +- Extract patterns after writing concrete implementation 3 times +- Be conscious of YAGNI, implement only currently needed features +- Prioritize current simplicity over future extensibility + +### Performance vs Readability +- Prioritize readability unless profiling identifies a measurable bottleneck (e.g., response time exceeding SLA, memory exceeding allocation) +- Measure before optimizing +- Document reason with comments when optimizing + +### Granularity of Contracts and Interfaces +- Overly detailed contracts reduce maintainability +- Design interfaces where each method maps to a single domain operation and parameter types use domain vocabulary +- Use abstraction mechanisms to reduce duplication + +## Implementation Completeness Assurance + +### Impact Analysis: Mandatory 3-Stage Process + +Complete these stages sequentially before any implementation: + +**1. Discovery** - Identify all affected code: +- Implementation references (imports, calls, instantiations) +- Interface dependencies (contracts, types, data structures) +- Test coverage +- Configuration (build configs, env settings, feature flags) +- Documentation (comments, docs, diagrams) + +**2. Understanding** - Analyze each discovered location: +- Role and purpose in the system +- Dependency direction (consumer or provider) +- Data flow (origin → transformations → destination) +- Coupling strength + +**3. Identification** - Produce structured report: +``` +## Impact Analysis +### Direct Impact +- [Unit]: [Reason and modification needed] + +### Indirect Impact +- [System]: [Integration path → reason] + +### Data Flow +[Source] → [Transformation] → [Consumer] + +### Risk Assessment +- High: [Complex dependencies, fragile areas] +- Medium: [Moderate coupling, test gaps] +- Low: [Isolated, well-tested areas] + +### Implementation Order +1. [Start with lowest risk or deepest dependency] +2. [...] +``` + +**Critical**: Do not implement until all 3 stages are documented + +### Unused Code Deletion + +When unused code is detected: +- Will it be used in this work? Yes → Implement now | No → Delete now (Git preserves) +- Applies to: Code, tests, docs, configs, assets + +### Existing Code Modification + +``` +In use? No → Delete + Yes → Working? No → Delete + Reimplement + Yes → Fix/Extend +``` + +**Principle**: Prefer clean implementation over patching broken code \ No newline at end of file diff --git a/dev-workflows-frontend/skills/coding-principles/SKILL.md b/dev-workflows-frontend/skills/coding-principles/SKILL.md new file mode 100644 index 0000000..001ccdb --- /dev/null +++ b/dev-workflows-frontend/skills/coding-principles/SKILL.md @@ -0,0 +1,224 @@ +--- +name: coding-principles +description: Language-agnostic coding principles for maintainability, readability, and quality. Use when implementing features, refactoring code, or reviewing code quality. +--- + +# Language-Agnostic Coding Principles + +## Core Philosophy + +1. **Maintainability over Speed**: Prioritize long-term code health over initial development velocity +2. **Simplicity First**: Choose the simplest solution that meets requirements (YAGNI principle) +3. **Explicit over Implicit**: Make intentions clear through code structure and naming +4. **Delete over Comment**: Remove unused code instead of commenting it out + +## Code Quality + +### Continuous Improvement +- Refactor related code within each change set — address style, naming, or structure issues in the files being modified +- Improve code structure incrementally +- Keep the codebase lean and focused +- Delete unused code immediately + +### Readability +- Use meaningful, descriptive names drawn from the problem domain +- Use full words in names; abbreviations are acceptable only when widely recognized in the domain +- Use descriptive names; single-letter names are acceptable only for loop counters or well-known conventions (i, j, x, y) +- Extract magic numbers and strings into named constants +- Keep code self-documenting where possible + +## Function Design + +### Parameter Management +- **Recommended**: 0-2 parameters per function +- **For 3+ parameters**: Use objects, structs, or dictionaries to group related parameters +- **Example** (conceptual): + ``` + // Instead of: createUser(name, email, age, city, country) + // Use: createUser(userData) + ``` + +### Single Responsibility +- Each function should do one thing well +- Keep functions small and focused (typically < 50 lines) +- Extract complex logic into separate, well-named functions +- Functions should have a single level of abstraction + +### Function Organization +- Pure functions when possible (no side effects) +- Separate data transformation from side effects +- Use early returns to reduce nesting +- Keep nesting to a maximum of 3 levels; use early returns or extracted functions to flatten deeper nesting + +## Error Handling + +### Error Management Principles +- **Always handle errors**: Log with context or propagate explicitly +- **Log appropriately**: Include context for debugging +- **Protect sensitive data**: Mask or exclude passwords, tokens, PII from logs +- **Fail fast**: Detect and report errors as early as possible + +### Error Propagation +- Use language-appropriate error handling mechanisms +- Propagate errors to appropriate handling levels +- Provide meaningful error messages +- Include error context when re-throwing + +## Dependency Management + +### Loose Coupling via Parameterized Dependencies +- Inject external dependencies as parameters (constructor injection for classes, function parameters for procedural/functional code) +- Depend on abstractions, not concrete implementations +- Minimize inter-module dependencies +- Facilitate testing through mockable dependencies + +## Reference Representativeness + +### Verifying References Before Adoption +When adopting patterns, APIs, or dependencies from existing code: +- **IF** referencing only 2-3 nearby files → **THEN** confirm the pattern is representative by checking usage across the repository before adopting +- **IF** multiple approaches coexist in the repository → **THEN** identify the majority pattern and make a deliberate choice — selecting whichever is nearest is insufficient +- **IF** adopting an external dependency (library, plugin, SDK) → **THEN** verify repository-wide usage distribution for the same dependency; if the appropriate version cannot be determined from repository state alone, escalate +- **IF** following an existing pattern → **THEN** state the reason for following it when an alternative exists (e.g., consistency with surrounding code, avoiding breaking changes, pending coordinated update) + +### Principle +Nearby code is a starting point for investigation, not a sufficient basis for adoption. Verify that what you reference is representative of the repository's conventions and current best practices before using it as a model. + +## Performance Considerations + +### Optimization Approach +- **Measure first**: Profile before optimizing +- **Focus on algorithms**: Algorithmic complexity > micro-optimizations +- **Use appropriate data structures**: Choose based on access patterns +- **Resource management**: Handle memory, connections, and files properly + +### When to Optimize +- After identifying actual bottlenecks through profiling +- When performance issues are measurable +- Optimize only after measurable bottlenecks are identified, not during initial development + +## Code Organization + +### Structural Principles +- **Group related functionality**: Keep related code together +- **Separate concerns**: Domain logic, data access, presentation +- **Consistent naming**: Follow project conventions +- **Module cohesion**: High cohesion within modules, low coupling between + +### File Organization +- One primary responsibility per file +- Logical grouping of related functions/classes +- Clear folder structure reflecting architecture +- Avoid "god files" (files > 500 lines) + +## Commenting Principles + +### When to Comment +- **Document "what"**: Describe what the code does +- **Explain "why"**: Clarify reasoning behind decisions +- **Note limitations**: Document known constraints or edge cases +- **API documentation**: Public interfaces need clear documentation + +### Comment Scope +- Comment the "what" and "why"; the code itself communicates the "how" +- Record historical context in version control commit messages, not in comments +- Delete commented-out code (retrieve from git history when needed) +- Write comments that add information beyond what the code states + +### Comment Quality +- Write comments that remain accurate regardless of future code changes; avoid references to dates, versions, or temporary state +- Update comments when changing code +- Use proper grammar and formatting +- Write for future maintainers + +## Refactoring Approach + +### Safe Refactoring +- **Small steps**: Make one change at a time +- **Maintain working state**: Keep tests passing +- **Verify behavior**: Run tests after each change +- **Incremental improvement**: Don't aim for perfection immediately + +### Refactoring Triggers +- Code duplication (DRY principle) +- Functions > 50 lines +- Complex conditional logic +- Unclear naming or structure + +## Testing Considerations + +### Testability +- Write testable code from the start +- Avoid hidden dependencies +- Keep side effects explicit +- Design for parameterized dependencies + +### Test-Driven Development +- Write tests before implementation when appropriate +- Keep tests simple and focused +- Test behavior, not implementation +- Maintain test quality equal to production code + +## Security Principles + +### Secure Defaults +- Store credentials and secrets through environment variables or dedicated secret managers +- Use parameterized queries (prepared statements) for all database access +- Use established cryptographic libraries provided by the language or framework +- Generate security-critical values (tokens, IDs, nonces) with cryptographically secure random generators +- Encrypt sensitive data at rest and in transit using standard protocols + +### Input and Output Boundaries +- Validate all external input at system entry points for expected format, type, and length +- Encode output appropriately for its rendering context (HTML, SQL, shell, URL) +- Return only information necessary for the caller in error responses; log detailed diagnostics server-side + +### Access Control +- Apply authentication to all entry points that handle user data or trigger state changes +- Verify authorization for each resource access, not only at the entry point +- Grant only the permissions required for the operation (files, database connections, API scopes) + +### Knowledge Cutoff Supplement (2026-03) +- OWASP Top 10:2025 shifted from symptoms to root causes; added "Software Supply Chain Failures" (A03) and "Mishandling of Exceptional Conditions" (A10) +- Recent research indicates AI-generated code shows elevated rates of access control gaps — treat authentication and authorization as high-priority review targets +- OpenSSF published "Security-Focused Guide for AI Code Assistant Instructions" — recommends language-specific, actionable constraints over generic advice +- For detailed detection patterns, see `references/security-checks.md` + +## Documentation + +### Code Documentation +- Document public APIs and interfaces +- Include usage examples for complex functionality +- Maintain README files for modules +- Update documentation in the same commit that changes the corresponding behavior + +### Architecture Documentation +- Document high-level design decisions +- Explain integration points +- Clarify data flows and boundaries +- Record trade-offs and alternatives considered + +## Version Control Practices + +### Commit Practices +- Make atomic, focused commits +- Write clear, descriptive commit messages +- Commit working code (passes tests) +- Commit only production-ready code; store secrets in environment variables or secret managers + +### Code Review Readiness +- Self-review before requesting review +- Keep changes focused and reviewable +- Provide context in pull request descriptions +- Respond to feedback constructively + +## Language-Specific Adaptations + +While these principles are language-agnostic, adapt them to your specific programming language: + +- **Static typing**: Use strong types when available +- **Dynamic typing**: Add runtime validation +- **OOP languages**: Apply SOLID principles +- **Functional languages**: Prefer pure functions and immutability +- **Concurrency**: Follow language-specific patterns for thread safety + diff --git a/dev-workflows-frontend/skills/coding-principles/references/security-checks.md b/dev-workflows-frontend/skills/coding-principles/references/security-checks.md new file mode 100644 index 0000000..66b71aa --- /dev/null +++ b/dev-workflows-frontend/skills/coding-principles/references/security-checks.md @@ -0,0 +1,64 @@ +# Security Check Patterns + +Last reviewed: 2026-03-21 + +## Stable Patterns + +These patterns have low false-positive rates and are detectable through grep or static analysis. + +### Hardcoded Secrets +- Credentials, API keys, or tokens assigned as string literals in source code +- Connection strings containing embedded passwords +- Private keys or certificates stored in source files +- Detection approach: search for high-entropy strings near assignment operators, common key names (`password`, `secret`, `api_key`, `token`, `private_key`), and platform-specific token formats + +### SQL String Concatenation +- SQL statements constructed through string concatenation or interpolation with variables +- Detection approach: search for SQL keywords (`SELECT`, `INSERT`, `UPDATE`, `DELETE`) combined with string concatenation operators or string interpolation containing variable references + +### Dynamic Code Execution +- Use of dynamic code execution functions (e.g., `eval`, `exec`) with non-static input +- Dynamic module loading with variable paths +- Detection approach: search for dynamic code execution or module loading calls where the argument is not a static literal + +### Insecure Deserialization +- Deserialization of untrusted input using unsafe loaders or formats that allow arbitrary object construction (e.g., native serialization, YAML without safe loader) +- Parsed data passed directly into dynamic code execution +- Detection approach: search for deserialization calls that accept external input without safe loader or type-restricted configuration + +### Path Traversal +- File system paths constructed from user-supplied input without sanitization +- Patterns where request parameters flow into file read/write operations +- Detection approach: search for file operations where path arguments include request parameters, query strings, or user input variables + +### CORS Wildcard +- `Access-Control-Allow-Origin` set to `*` in production configuration +- CORS middleware configured with wildcard origin +- Detection approach: search for CORS configuration with wildcard values + +### Non-TLS URLs +- HTTP (non-TLS) URLs embedded in source code for production endpoints (outside configuration files, tests, and documentation) +- Detection approach: search for `http://` patterns in source files, excluding localhost, configuration files, tests, and documentation + +## Trend-Sensitive Patterns + +Updated: 2026-03-21 +Sources: OWASP Top 10:2025, DryRun Agentic Coding Security Report (2026-03) + +### Access Control Gaps in AI-Generated Code +- Endpoints or route handlers defined without authentication middleware +- Resource access operations (read, update, delete) without authorization verification +- Administrative or destructive operations accessible without elevated permissions +- AI-generated code frequently omits authentication middleware and authorization checks — flag every route handler and resource access operation for explicit verification during review +- Detection approach: search for route/endpoint handler definitions that lack authentication middleware, and resource operations (read, update, delete) without authorization checks in the call chain + +### Mishandling of Exceptional Conditions (OWASP A10:2025) +- Error handlers that expose internal system details (stack traces, database errors, file paths) in responses +- Error handlers that grant access, skip authentication, or bypass authorization when an exception occurs (fail-open behavior) +- Missing error handling on security-critical operations (authentication, authorization, cryptographic operations) +- Detection approach: search for catch/error handler blocks that return stack traces, database error messages, or file paths in responses; search for catch blocks that call next() or return success without re-validating security state + +### Software Supply Chain Patterns (OWASP A03:2025) +- Dependencies imported without version pinning +- Use of deprecated or unmaintained packages for security-critical functions +- Detection approach: check dependency manifests for unpinned versions and known deprecated packages diff --git a/dev-workflows-frontend/skills/documentation-criteria/SKILL.md b/dev-workflows-frontend/skills/documentation-criteria/SKILL.md new file mode 100644 index 0000000..9404392 --- /dev/null +++ b/dev-workflows-frontend/skills/documentation-criteria/SKILL.md @@ -0,0 +1,236 @@ +--- +name: documentation-criteria +description: Documentation creation criteria including PRD, ADR, Design Doc, and Work Plan requirements with templates. Use when creating or reviewing technical documents, or determining which documents are required. +--- + +# Documentation Creation Criteria + +## Templates + +- **[prd-template.md](references/prd-template.md)** - Product Requirements Document template +- **[adr-template.md](references/adr-template.md)** - Architecture Decision Record template +- **[ui-spec-template.md](references/ui-spec-template.md)** - UI Specification template (frontend/fullstack features) +- **[design-template.md](references/design-template.md)** - Technical Design Document template +- **[plan-template.md](references/plan-template.md)** - Work Plan template +- **[task-template.md](references/task-template.md)** - Task file template for implementation tasks + +## Creation Decision Matrix + +| Condition | Required Documents | Creation Order | +|-----------|-------------------|----------------| +| New Feature Addition (backend) | PRD → [ADR] → Design Doc → Work Plan | After PRD approval | +| New Feature Addition (frontend/fullstack) | PRD → **UI Spec** → [ADR] → Design Doc → Work Plan | UI Spec before Design Doc | +| ADR Conditions Met (see below) | ADR → Design Doc → Work Plan | Start immediately | +| 6+ Files | ADR → Design Doc → Work Plan (Required) | Start immediately | +| 3-5 Files | Design Doc → Work Plan (Recommended) | Start immediately | +| 1-2 Files | None | Direct implementation | + +## ADR Creation Conditions (Required if Any Apply) + +### 1. Contract System Changes +- **Adding nested contracts with 3+ levels**: `Contract A { Contract B { Contract C { field: T } } }` + - Rationale: Deep nesting has high complexity and wide impact scope +- **Changing/deleting contracts used in 3+ locations** + - Rationale: Multiple location impacts require careful consideration +- **Contract responsibility changes** (e.g., DTO→Entity, Request→Domain) + - Rationale: Conceptual model changes affect design philosophy + +### 2. Data Flow Changes +- **Storage location changes** (DB→File, Memory→Cache) +- **Processing order changes with 3+ steps** + - Example: "Input→Validation→Save" to "Input→Save→Async Validation" +- **Data passing method changes** (parameter passing→shared state, direct reference→event-based communication) + +### 3. Architecture Changes +- Layer addition, responsibility changes, component relocation + +### 4. External Dependency Changes +- Library/framework/external API introduction or replacement + +### 5. Complex Implementation Logic (Regardless of Scale) +- Managing 3+ states +- Coordinating 5+ asynchronous processes + +## Detailed Document Definitions + +### PRD (Product Requirements Document) + +**Purpose**: Define business requirements and user value + +**Includes**: +- Business requirements and user value +- Success metrics and KPIs (each metric specifies a numeric target and measurement method) +- User stories and use cases +- MoSCoW prioritization (Must/Should/Could/Won't) +- Acceptance criteria with sequential IDs (AC-001, AC-002, ...) for downstream traceability +- MVP and Future phase separation +- User journey diagram (required) +- Scope boundary diagram (required) + +**Scope**: Business requirements, user value, success metrics, user stories, and prioritization only. Implementation details belong in Design Doc, technical selection rationale in ADR, phases and task breakdown in Work Plan. + +### ADR (Architecture Decision Record) + +**Purpose**: Record technical decision rationale and background + +**Includes**: +- Decision (what was selected) +- Rationale (why that selection was made) +- Option comparison (minimum 3 options) and trade-offs +- Architecture impact +- Principled implementation guidelines (e.g., "Use dependency injection") + +**Scope**: Decision, rationale, option comparison, architecture impact, and principled guidelines only. Implementation procedures and code examples belong in Design Doc, schedule and resource assignments in Work Plan. + +### UI Specification + +**Purpose**: Define UI structure, screen transitions, component decomposition, and interaction design for frontend features + +**Includes**: +- Screen list and transition conditions +- Component decomposition with state x display matrix (default/loading/empty/error/partial) +- Interaction definitions linked to PRD acceptance criteria (EARS format) +- Prototype management (code-based prototypes as attachments, not source of truth) +- AC traceability from PRD to screens/components +- Existing component reuse map and design tokens +- Visual acceptance criteria (golden states, layout constraints) +- Accessibility requirements (keyboard, screen reader, contrast) + +**Scope**: Screen structure, transitions, component decomposition, interaction design, and visual acceptance criteria only. Technical implementation and API contracts belong in Design Doc, test implementation in acceptance-test-generator skeletons, schedule in Work Plan. + +**Required Structural Elements**: +- At least one component with state x display matrix and interaction table +- AC traceability table mapping PRD ACs to screens/states +- Screen list with transition conditions +- Existing component reuse map (reuse/extend/new decisions) + +**Prototype Code Handling**: +- Prototype code provided by user is placed in `docs/ui-spec/assets/{feature-name}/` +- Prototype is an attachment to UI Spec, never the source of truth +- UI Spec + Design Doc are the canonical specifications + +### Design Document + +**Purpose**: Define technical implementation methods in detail + +**Includes**: +- **Existing codebase analysis** (required) + - Implementation path mapping (both existing and new) + - Integration point clarification (connection points with existing code even for new implementations) +- Technical implementation approach (vertical/horizontal/hybrid) +- **Technical dependencies and implementation constraints** (required implementation order) +- Interface and contract definitions +- Data flow and component design +- **Acceptance criteria (each criterion specifies a verifiable condition with pass/fail threshold)** +- Change impact map (clearly specify direct impact/indirect impact/no ripple effect) +- Complete enumeration of integration points +- Data contract clarification +- **Agreement checklist** (agreements with stakeholders) +- **Code inspection evidence** (inspected files/functions during investigation) +- **Field propagation map** (when fields cross component boundaries) +- **Data representation decision** (when introducing new structures) +- **Applicable standards** (explicit/implicit classification) +- **Prerequisite ADRs** (including common ADRs) +- **Verification Strategy** (required) + - Correctness proof method (what "correct" means for this change, how it's verified, when) + - Early verification point (first target to prove the approach works, success criteria, failure response) + +**Required Structural Elements**: +```yaml +Change Impact Map: + Change Target: [Component/Feature] + Direct Impact: [Files/Functions] + Indirect Impact: [Data format/Processing time] + No Ripple Effect: [Unaffected features] + +Interface Change Matrix: + Existing: [Function/method/operation name] + New: [Function/method/operation name] + Conversion Required: [Yes/No] + Compatibility Method: [Approach] +``` + +**Scope**: Technical implementation methods, interfaces, data flow, acceptance criteria, and verification strategy only. Technology selection rationale belongs in ADR, schedule and assignments in Work Plan. + +### Work Plan + +**Purpose**: Implementation task management and progress tracking + +**Includes**: +- Task breakdown and dependencies (maximum 2 levels) +- Schedule and duration estimates +- **Include test skeleton file paths from acceptance-test-generator** (integration and E2E) +- **Verification Strategy summary** (extracted from Design Doc) +- **Final Quality Assurance Phase (required)** +- Progress records (checkbox format) + +**Scope**: Task breakdown, dependencies, schedule, verification strategy summary, and progress tracking only. Technical rationale belongs in ADR, design details in Design Doc. + +**Phase Division Criteria** (adapt to implementation approach from Design Doc): + +**When Vertical Slice selected**: +- Each phase = one value unit (feature, component, or migration target) +- Each phase includes its own implementation + verification per Verification Strategy + +**When Horizontal Slice selected**: +1. **Phase 1: Foundation Implementation** - Contract definitions, interfaces/signatures, test preparation +2. **Phase 2: Core Feature Implementation** - Business logic, unit tests +3. **Phase 3: Integration Implementation** - External connections, presentation layer + +**When Hybrid selected**: +- Combine vertical and horizontal as defined in Design Doc implementation approach + +**All approaches**: Final phase is always Quality Assurance (acceptance criteria achievement, all tests passing, quality checks). Each phase's verification method follows Verification Strategy from Design Doc. + +**Three Elements of Task Completion Definition**: +1. **Implementation Complete**: Code is functional +2. **Quality Complete**: Tests, static checks, linting pass +3. **Integration Complete**: Verified connection with other components + +## Creation Process + +1. **Problem Analysis**: Change scale assessment, ADR condition check + - Identify explicit and implicit project standards before investigation +2. **ADR Option Consideration** (ADR only): Compare 3+ options, specify trade-offs +3. **Creation**: Use templates, include measurable conditions +4. **Approval**: "Accepted" after review enables implementation + +## Storage Locations + +| Document | Path | Naming Convention | Template | +|----------|------|------------------|----------| +| PRD | `docs/prd/` | `[feature-name]-prd.md` | [prd-template.md](references/prd-template.md) | +| ADR | `docs/adr/` | `ADR-[4-digits]-[title].md` | [adr-template.md](references/adr-template.md) | +| UI Spec | `docs/ui-spec/` | `[feature-name]-ui-spec.md` | [ui-spec-template.md](references/ui-spec-template.md) | +| UI Spec Assets | `docs/ui-spec/assets/{feature-name}/` | Prototype code files | - | +| Design Doc | `docs/design/` | `[feature-name]-design.md` | [design-template.md](references/design-template.md) | +| Work Plan | `docs/plans/` | `YYYYMMDD-{type}-{description}.md` | [plan-template.md](references/plan-template.md) | +| Task File | `docs/plans/tasks/` | `{plan-name}-task-{number}.md` | [task-template.md](references/task-template.md) | + +*Note: Work plans are excluded by `.gitignore` + +## ADR Status +`Proposed` → `Accepted` → `Deprecated`/`Superseded`/`Rejected` + +## AI Automation Rules +- 5+ files: Suggest ADR creation +- Contract/data flow change detected: ADR mandatory +- Check existing ADRs before implementation + +## Diagram Requirements + +Required diagrams for each document (using mermaid notation): + +| Document | Required Diagrams | Purpose | +|----------|------------------|---------| +| PRD | User journey diagram, Scope boundary diagram | Clarify user experience and scope | +| ADR | Option comparison diagram (when needed) | Visualize trade-offs | +| UI Spec | Screen transition diagram, Component tree diagram | Clarify screen flow and component structure | +| Design Doc | Architecture diagram, Data flow diagram | Understand technical structure | +| Work Plan | Phase structure diagram, Task dependency diagram | Clarify implementation order | + +## Common ADR Relationships +1. **At creation**: Identify common technical areas (logging, error handling, async processing, etc.), reference existing common ADRs +2. **When missing**: Consider creating necessary common ADRs +3. **Design Doc**: Specify common ADRs in "Prerequisite ADRs" section +4. **Compliance check**: Verify design aligns with common ADR decisions \ No newline at end of file diff --git a/dev-workflows-frontend/skills/documentation-criteria/references/adr-template.md b/dev-workflows-frontend/skills/documentation-criteria/references/adr-template.md new file mode 100644 index 0000000..2f7f490 --- /dev/null +++ b/dev-workflows-frontend/skills/documentation-criteria/references/adr-template.md @@ -0,0 +1,68 @@ +# [ADR Number] [Title] + +## Status + +[Proposed | Accepted | Deprecated | Superseded | Rejected] + +## Context + +[Describe the background and reasons why this decision is needed. Include the essence of the problem, current challenges, and constraints] + +## Decision + +[Describe the actual decision made. Aim for specific and clear descriptions] + +### Decision Details + +| Item | Content | +|------|---------| +| **Decision** | [The decision in one sentence] | +| **Why now** | [Why this needs to happen now (timing rationale)] | +| **Why this** | [Why this option over alternatives (1-3 lines)] | +| **Known unknowns** | [At least one uncertainty at this point] | +| **Kill criteria** | [One signal that should trigger reversal of this decision] | + +## Rationale + +[Explain why this decision was made and why it is the best option compared to alternatives] + +### Options Considered + +1. **Option 1**: [Description] + - Pros: [List advantages] + - Cons: [List disadvantages] + +2. **Option 2**: [Description] + - Pros: [List advantages] + - Cons: [List disadvantages] + +3. **Option 3 (Selected)**: [Description] + - Pros: [List advantages] + - Cons: [List disadvantages] + +## Consequences + +### Positive Consequences + +- [List positive impacts on the project or system] + +### Negative Consequences + +- [List negative impacts or trade-offs that need to be accepted] + +### Neutral Consequences + +- [List changes that are neither good nor bad] + +## Architecture Impact + +[Describe how this decision affects existing architecture: (1) components that change, (2) new dependencies introduced, (3) architectural constraints added or removed] + +## Implementation Guidance + +[Principled direction only. Implementation procedures go to Design Doc] +Example: "Use dependency injection" ✓, "Implement in Phase 1" ✗ + +## Related Information + +- [Links to related ADRs, documents, issues, PRs, etc.] diff --git a/dev-workflows-frontend/skills/documentation-criteria/references/design-template.md b/dev-workflows-frontend/skills/documentation-criteria/references/design-template.md new file mode 100644 index 0000000..28e9c5b --- /dev/null +++ b/dev-workflows-frontend/skills/documentation-criteria/references/design-template.md @@ -0,0 +1,388 @@ +# [Feature Name] Design Document + +## Overview + +[Explain the purpose and overview of this feature in 2-3 sentences] + +### Referenced UI Spec (when feature includes frontend) +- UI Spec path: [docs/ui-spec/xxx-ui-spec.md] +- Component structure and state design are inherited from UI Spec + +## Design Summary (Meta) + +```yaml +design_type: "new_feature|extension|refactoring" +risk_level: "low|medium|high" +complexity_level: "low|medium|high" +complexity_rationale: "[Required if medium/high: (1) which requirements/ACs necessitate this complexity, (2) which constraints/risks it addresses]" +main_constraints: + - "[constraint 1]" + - "[constraint 2]" +biggest_risks: + - "[risk 1]" + - "[risk 2]" +unknowns: + - "[uncertainty 1]" + - "[uncertainty 2]" +``` + +## Background and Context + +### Prerequisite ADRs + +- [ADR File Name]: [Related decision items] +- Reference common technical ADRs when applicable + +### Agreement Checklist + +#### Scope +- [ ] [Features/components to change] +- [ ] [Features to add] + +#### Non-Scope (Explicitly not changing) +- [ ] [Features/components not to change] +- [ ] [Existing logic to preserve] + +#### Constraints +- [ ] Parallel operation: [Yes/No] +- [ ] Backward compatibility: [Required/Not required] +- [ ] Performance measurement: [Required/Not required] + +#### Applicable Standards +- [ ] [Standard/convention] `[explicit]` - Source: [config / rule file / documentation path] +- [ ] [Observed pattern] `[implicit]` - Evidence: [file paths] - Confirmed: [Yes/No] + +#### Quality Assurance Mechanisms +How quality is enforced in the change area. Each item is either adopted (will be enforced during implementation) or noted (observed but not adopted, with reason). + +- [ ] [Tool/check name] — Enforces: [what] — Config: [path] — Covers: [file paths/patterns, or "project-wide"] — Status: `adopted` / `noted (reason)` +- [ ] [Domain-specific constraint] — Enforces: [what] — Source: [path] — Covers: [file paths/patterns, or "project-wide"] — Status: `adopted` / `noted (reason)` + +### Problem to Solve + +[Specific problems or challenges this feature aims to address] + +### Current Challenges + +[Current system issues or limitations] + +### Requirements + +#### Functional Requirements + +- [List mandatory functional requirements] + +#### Non-Functional Requirements + +- **Performance**: [Response time, throughput requirements] +- **Scalability**: [Requirements for handling increased load] +- **Reliability**: [Error rate, availability requirements] +- **Maintainability**: [Code readability and changeability] + +## Acceptance Criteria (AC) - EARS Format + +Each AC is written in EARS (Easy Approach to Requirements Syntax) format. +Keywords determine test type and reduce ambiguity. + +**EARS Keywords**: +| Keyword | Usage | Test Type | +|---------|-------|-----------| +| **When** | Event-triggered behavior | Event-driven test | +| **While** | State-dependent behavior | State condition test | +| **If-then** | Conditional behavior | Branch coverage test | +| (none) | Ubiquitous behavior | Basic functionality test | + +**Format**: `[Keyword] , the system shall ` + +### [Functional Requirement 1] + +- [ ] **When** user clicks login button with valid credentials, the system shall authenticate and redirect to dashboard +- [ ] **If** credentials are invalid, **then** the system shall display error message "Invalid credentials" +- [ ] **While** user is logged in, the system shall maintain the session for configured timeout period + +### [Functional Requirement 2] + +- [ ] The system shall display data list with pagination of 10 items per page +- [ ] **When** input is entered in search field, the system shall apply real-time filtering + +## Existing Codebase Analysis + +### Implementation Path Mapping +| Type | Path | Description | +|------|------|-------------| +| Existing | src/[actual-path] | [Current implementation] | +| New | src/[planned-path] | [Planned new creation] | + +### Integration Points (Include even for new implementations) +- **Integration Target**: [What to connect with] +- **Invocation Method**: [How it will be invoked] + +### Code Inspection Evidence + +| File/Function | Relevance | +|---------------|-----------| +| [path:function] | [similar functionality / integration point / pattern reference] | + +### Fact Disposition Table + +One row per codebase analysis `focusAreas` entry. This table is the single binding between existing-behavior facts and the design — other sections that describe existing behavior reference the row by Focus Area name. + +| Fact ID | Focus Area | Disposition | Rationale | Evidence | +|---------|------------|-------------|-----------|----------| +| [fact_id from focusAreas] | [area name from focusAreas] | preserve / transform / remove / out-of-scope | [for transform: state new outcome; for remove: state reason; for out-of-scope: state which scope boundary excludes it; for preserve: brief confirmation] | [evidence value carried verbatim from focusAreas] | + +## Design + +### Change Impact Map + +```yaml +Change Target: [Component/feature to change] +Direct Impact: + - [Files/functions requiring direct changes] + - [Interface change points] +Indirect Impact: + - [Data format changes] + - [Processing time changes] +No Ripple Effect: + - [Explicitly specify unaffected features] +``` + +### Interface Change Matrix + +| Existing | New | Conversion Required | Compatibility Method | +|----------|-----|--------------------|--------------------| +| [Function/method/operation name] | [Function/method/operation name] | [Yes/No] | [Approach: adapter, wrapper, deprecation, etc.] | + +### Architecture Overview + +[How this feature is positioned within the overall system] + +### Data Flow + +``` +[Express data flow using diagrams or pseudo-code] +``` + +### Integration Points List + +| Integration Point | Location | Old Implementation | New Implementation | Switching Method | Verification Method | +|-------------------|----------|-------------------|-------------------|------------------|-------------------| +| Integration Point 1 | [Class/Function] | [Existing Process] | [New Process] | [DI/Factory etc.] | [How to verify this switching works] | +| Integration Point 2 | [Another Location] | [Existing] | [New] | [Method] | [Verification approach] | + +### Main Components + +#### Component 1 + +- **Responsibility**: [Scope of responsibility for this component] +- **Interface**: [APIs and contract definitions provided] +- **Dependencies**: [Relationships with other components] + +#### Component 2 + +- **Responsibility**: [Scope of responsibility for this component] +- **Interface**: [APIs and contract definitions provided] +- **Dependencies**: [Relationships with other components] + +### Data Representation Decision (When Introducing New Structures) + +| Criterion | Assessment | Reason | +|-----------|-----------|--------| +| Semantic Fit | [Yes/No] | [Does existing structure's meaning align?] | +| Responsibility Fit | [Yes/No] | [Same bounded context?] | +| Lifecycle Fit | [Yes/No] | [Same creation/mutation/deletion timing?] | +| Boundary/Interop Cost | [Low/Medium/High] | [Cost of sharing across boundaries?] | + +**Decision**: [reuse / extend / new] — [rationale in 1-2 sentences] + +### Contract Definitions + +``` +// Record major contract/interface definitions here +``` + +### Data Contract + +#### Component 1 + +```yaml +Input: + Type: [Data shape, contract, or schema] + Preconditions: [Required items, format constraints] + Validation: [Validation method] + +Output: + Type: [Data shape, contract, or schema] + Guarantees: [Conditions that must always be met] + On Error: [Exception/null/default value] + +Invariants: + - [Conditions that remain unchanged before and after processing] +``` + +### Field Propagation Map (When Fields Cross Boundaries) + +| Field | Boundary | Status | Detail | +|-------|----------|--------|--------| +| [field name] | [Component A → B] | preserved / transformed / dropped | [logic or reason] | + +### State Transitions and Invariants (When Applicable) + +```yaml +State Definition: + - Initial State: [Initial values and conditions] + - Possible States: [List of states] + +State Transitions: + Current State → Event → Next State + +System Invariants: + - [Conditions that hold in any state] +``` + +### UI Error State Design (when feature includes frontend) + +| Component / Screen | Loading | Empty | Error | Partial | +|-------------------|---------|-------|-------|---------| +| [Component name] | [Skeleton / spinner] | [Empty state + CTA] | [Error message + Retry] | [Cached display + Banner] | + +### Client State Design (when feature includes frontend) + +| State Category | State | Management Method | Sync Strategy | +|---------------|-------|-------------------|---------------| +| Server state | [Fetched data] | [Cache library / custom hook] | [Polling / WebSocket / manual refresh] | +| Local UI state | [Modal open, tab selection] | [useState / useReducer] | - | +| Temporary state | [Form input, draft] | [useState / form library] | [Auto-save / manual save] | + +### UI Action - API Contract Mapping (when feature includes frontend) + +| UI Action | API Endpoint | Request | Response | Error Contract | +|-----------|-------------|---------|----------|----------------| +| [Button click / form submit] | [POST /api/xxx] | [Request body fields] | [Response fields] | [Error codes and UI handling] | + +### Error Handling + +| Error Category | Example | Detection | Recovery Strategy | User Impact | +|---------------|---------|-----------|-------------------|-------------| +| [Validation / External / Infrastructure / Business logic] | [Specific error] | [How detected] | [Retry / Fallback / Propagate / Log-and-continue] | [User-facing message or silent handling] | + +### Logging and Monitoring + +- **Log events**: [Key events to log: state transitions, external calls, error occurrences, performance thresholds] +- **Log levels**: [Which events at DEBUG/INFO/WARN/ERROR] +- **Sensitive data**: [Fields to mask or exclude — coordinate with Security Considerations] +- **Monitoring**: [Metrics to track, alert thresholds, dashboard requirements] + +## Implementation Plan + +### Implementation Approach + +**Selected Approach**: [Approach name or combination] +**Selection Reason**: [Reason considering project constraints and technical dependencies] + +### Technical Dependencies and Implementation Order + +#### Required Implementation Order +1. **[Component/Feature A]** + - Technical Reason: [Why this needs to be implemented first] + - Dependent Elements: [Other components that depend on this] + +2. **[Component/Feature B]** + - Technical Reason: [Technical necessity to implement after A] + - Prerequisites: [Required pre-implementations] + +### Migration Strategy + +[Technical migration approach, ensuring backward compatibility] + +## Security Considerations + +Evaluate the following for this feature's trust boundaries and data flow: + +- **Authentication & Authorization**: What authentication is required for new entry points? What authorization checks protect resource access? +- **Input Validation**: Where does external input enter the system? How is it validated before processing? +- **Sensitive Data Handling**: What data requires protection (encryption, masking, access control)? What data is safe to include in logs and error responses? + +Mark items as N/A with brief rationale when the feature has no relevant trust boundary. + +## Test Boundaries + +### Mock Boundary Decisions + +| Component/Dependency | Mock? | Rationale | +|---------------------|-------|-----------| +| [External API / DB / File system / etc.] | [Yes/No] | [Why this boundary was chosen] | + +### Data Layer Testing Strategy + +- **Schema dependencies**: [List tables/models this feature reads from or writes to, with paths to their definitions] +- **Test data approach**: [How test data is provided — fixtures, factories, seed scripts, or real database] +- **Mock limitations acknowledged**: [What cannot be reliably tested with mocks alone for this feature] + +Mark as N/A with brief rationale when the feature has no data layer dependencies. + +### Integration Verification Points + +- [List critical integration points that require testing beyond unit-level mocks] + +## Verification Strategy + +Verification Strategy defines what correctness means and how to prove it at design time. L1/L2/L3 (from implementation-approach skill) define completion verification granularity at task execution time. + +### Correctness Proof Method + +How will this change's correctness be demonstrated? + +- **Correctness definition**: [What "correct" means for this change — e.g., "output matches existing behavior", "all ACs pass in production-equivalent environment", "generated queries execute without error on target DB"] +- **Verification method**: [Specific technique — e.g., "compare new implementation output against existing implementation", "run against staging DB", "contract test with real API"] +- **Verification timing**: [When verification occurs — e.g., "after first vertical slice", "per repository", "at integration phase"] + +### Early Verification Point + +What is verified first, and how, to confirm the approach is correct before scaling? + +- **First verification target**: [The smallest unit that proves the approach works — e.g., "first repository migration", "single API endpoint", "one screen flow"] +- **Success criteria**: [Observable outcome — e.g., "CSV download produces identical output to legacy", "API returns 200 with expected schema"] +- **Failure response**: [What to do if early verification fails — e.g., "reassess approach before proceeding", "escalate to user"] + +### Output Comparison (When Replacing or Modifying Existing Behavior) + +How will behavioral equivalence be verified between existing and new implementation? + +- **Comparison input**: [Identical input used for both implementations — e.g., "same DB snapshot", "same API request payload"] +- **Expected output fields**: [Specific fields/columns to compare — e.g., "all output columns", "response body fields: id, status, amount"] +- **Diff method**: [How to compare — e.g., "file-level diff", "JSON field-by-field comparison", "row count + spot check"] +- **Transformation pipeline coverage**: [Each step from codebase analysis `dataTransformationPipelines` and what the comparison covers] + +Mark as N/A with brief rationale when the design introduces entirely new behavior with no existing equivalent. + +## Future Extensibility + +- **Extension points**: [Interfaces, hooks, or plugin mechanisms designed for future use] +- **Known future requirements**: [Planned features that influenced current design decisions] +- **Intentional limitations**: [What was deliberately kept simple and why] + +## Alternative Solutions + +### Alternative 1 + +- **Overview**: [Description of alternative solution] +- **Advantages**: [Advantages] +- **Disadvantages**: [Disadvantages] +- **Reason for Rejection**: [Why it wasn't adopted] + +## Risks and Mitigation + +| Risk | Impact | Probability | Mitigation | +|------|--------|-------------|------------| +| [Risk 1] | High/Medium/Low | High/Medium/Low | [Countermeasure] | + +## References + +- [Related documentation and links] + +## Update History + +| Date | Version | Changes | Author | +|------|---------|---------|--------| +| YYYY-MM-DD | 1.0 | Initial version | [Name] | diff --git a/dev-workflows-frontend/skills/documentation-criteria/references/plan-template.md b/dev-workflows-frontend/skills/documentation-criteria/references/plan-template.md new file mode 100644 index 0000000..1120318 --- /dev/null +++ b/dev-workflows-frontend/skills/documentation-criteria/references/plan-template.md @@ -0,0 +1,192 @@ +# Work Plan: [Feature Name] Implementation + +Created Date: YYYY-MM-DD +Type: feature|fix|refactor +Estimated Duration: X days +Estimated Impact: X files +Related Issue/PR: #XXX (if any) + +## Related Documents +- Design Doc(s): + - [docs/design/XXX.md] + - [docs/design/YYY.md] (if multiple, e.g. backend + frontend) +- ADR: [docs/adr/ADR-XXXX.md] (if any) +- PRD: [docs/prd/XXX.md] (if any) + +## Verification Strategy (from Design Doc) + +### Correctness Proof Method +- **Correctness definition**: [extracted from Design Doc] +- **Verification method**: [extracted from Design Doc] +- **Verification timing**: [extracted from Design Doc] + +### Early Verification Point +- **First verification target**: [extracted from Design Doc] +- **Success criteria**: [extracted from Design Doc] +- **Failure response**: [extracted from Design Doc] + +## Quality Assurance Mechanisms (from Design Doc) + +Adopted quality gates for the change area. Each task in this plan must satisfy these mechanisms. + +| Mechanism | Enforces | Config Location | Covered Files | +|-----------|----------|-----------------|---------------| +| [Tool/check name] | [What quality aspect it enforces] | [path/to/config] | [file paths or patterns covered, or "project-wide"] | +| [Domain constraint] | [What it enforces] | [path/to/source] | [file paths or patterns covered, or "project-wide"] | + +## Design-to-Plan Traceability + +Maps each Design Doc technical requirement to the covering task(s). One row per extracted item. Every row must have at least one covering task, or an explicit gap justification. + +| DD Section | DD Item | Category | Covered By Task(s) | Gap Status | Notes | +|---|---|---|---|---|---| +| [Section name from DD] | [Specific item] | impl-target / connection-switching / contract-change / verification / prerequisite | [Phase X Task Y] | covered | | + +**Category values**: `impl-target` (implementation target), `connection-switching` (connection/switching/registration), `contract-change` (contract change and propagation), `verification` (verification requirement), `prerequisite` (prerequisite work) + +**Gap Status values**: `covered` (task exists), `gap` (no task — requires justification in Notes, user confirmation required before plan approval) + +## Objective +[Why this change is necessary, what problem it solves] + +## Background +[Current state and why changes are needed] + +## Risks and Countermeasures + +### Technical Risks +- **Risk**: [Risk description] + - **Impact**: [Impact assessment] + - **Countermeasure**: [How to address it] + +### Schedule Risks +- **Risk**: [Risk description] + - **Impact**: [Impact assessment] + - **Countermeasure**: [How to address it] + +## Implementation Phases + +Select ONE phase structure based on implementation approach from Design Doc. +See documentation-criteria skill for detailed Phase Division Criteria. +All quality checks follow Quality Check Workflow from ai-development-guide skill. + +### Option A: Vertical Slice Phase Structure + +Use when implementation approach is Vertical Slice. Each phase = one value unit with verification. + +### Phase 1: [Value Unit 1 Name] (Estimated commits: X) +**Purpose**: [First vertical slice — proves approach works] +**Verification**: [From Verification Strategy: early verification point] + +#### Tasks +- [ ] Task 1: Implementation +- [ ] Task 2: Verification per Verification Strategy +- [ ] Quality check (staged) + +#### Phase Completion Criteria +- [ ] Early verification point passed +- [ ] [Functional criteria] + +### Phase 2: [Value Unit 2 Name] (Estimated commits: X) +**Purpose**: [Subsequent value unit] +**Verification**: [From Verification Strategy] + +#### Tasks +- [ ] Task 1: Implementation +- [ ] Task 2: Verification per Verification Strategy +- [ ] Quality check + +#### Phase Completion Criteria +- [ ] [Functional criteria] +- [ ] [Quality criteria] + +### Option B: Horizontal Slice Phase Structure + +Use when implementation approach is Horizontal Slice. Phases follow Foundation → Core → Integration → QA. + +### Phase 1: [Foundation] (Estimated commits: X) +**Purpose**: Contract definitions, interfaces, test preparation + +#### Tasks +- [ ] Task 1: Specific work content +- [ ] Task 2: Specific work content +- [ ] Quality check (staged) +- [ ] Unit tests: All related tests pass + +#### Phase Completion Criteria +- [ ] [Functional completion criteria] +- [ ] [Quality completion criteria] + +### Phase 2: [Core Feature] (Estimated commits: X) +**Purpose**: Business logic, unit tests + +#### Tasks +- [ ] Task 1: Specific work content +- [ ] Task 2: Specific work content +- [ ] Quality check (staged) +- [ ] Integration tests: Verify overall feature functionality + +#### Phase Completion Criteria +- [ ] [Functional completion criteria] +- [ ] [Quality completion criteria] + +### Phase 3: [Integration] (Estimated commits: X) +**Purpose**: External connections, presentation layer + +#### Tasks +- [ ] Task 1: Specific work content +- [ ] Task 2: Specific work content +- [ ] Quality check +- [ ] Integration tests: Verify component coordination + +#### Phase Completion Criteria +- [ ] [Functional completion criteria] +- [ ] [Quality completion criteria] + +### Option C: Hybrid Phase Structure + +Use when implementation approach is Hybrid. Combine vertical and horizontal phases as defined in Design Doc implementation approach. Structure phases per Design Doc specification, ensuring each phase has Tasks, Verification, and Phase Completion Criteria sections matching the format above. + +### Final Phase: Quality Assurance (Required) (Estimated commits: 1) + +This phase is required for ALL implementation approaches. + +**Purpose**: Cross-cutting quality assurance and Design Doc consistency verification + +#### Tasks +- [ ] Verify all Design Doc acceptance criteria achieved +- [ ] Security review: Verify security considerations from Design Doc are implemented +- [ ] Quality checks (types, lint, format) +- [ ] Execute all tests (including integration/E2E from test skeletons, when provided) +- [ ] Coverage 70%+ +- [ ] Document updates + +### Quality Assurance +- [ ] Quality check (staged) +- [ ] All tests pass +- [ ] Static check pass +- [ ] Lint check pass +- [ ] Build success + +## Completion Criteria +- [ ] All phases completed +- [ ] All integration/E2E tests passing (when test skeletons provided) +- [ ] Design Doc acceptance criteria satisfied +- [ ] Staged quality checks completed (zero errors) +- [ ] All tests pass +- [ ] Necessary documentation updated +- [ ] User review approval obtained + +## Progress Tracking +### Phase 1 +- Start: YYYY-MM-DD HH:MM +- Complete: YYYY-MM-DD HH:MM +- Notes: [Any special remarks] + +### Phase 2 +- Start: YYYY-MM-DD HH:MM +- Complete: YYYY-MM-DD HH:MM +- Notes: [Any special remarks] + +## Notes +[Special notes, reference information, important points, etc.] diff --git a/dev-workflows-frontend/skills/documentation-criteria/references/prd-template.md b/dev-workflows-frontend/skills/documentation-criteria/references/prd-template.md new file mode 100644 index 0000000..8a670c3 --- /dev/null +++ b/dev-workflows-frontend/skills/documentation-criteria/references/prd-template.md @@ -0,0 +1,142 @@ +# PRD: [Feature Name] + +## Overview + +### One-line Summary +[Describe this feature in one line] + +### Background +[Why is this feature needed? What problem does it solve?] + +## User Stories + +### Primary Users +[Define the main target users] + +### User Stories +``` +As a [user type] +I want to [goal/desire] +So that [expected value/benefit] +``` + +### Use Cases +1. [Specific usage scenario 1] +2. [Specific usage scenario 2] +3. [Specific usage scenario 3] + +### User Journey Diagram +```mermaid +journey + title [Feature Name] User Journey + section [Phase 1] + [Step]: [satisfaction score]: [actor] +``` +[Map the end-to-end user experience from trigger event to goal completion] + +### Scope Boundary Diagram +```mermaid +C4Context + Boundary(scope, "In Scope") { + [Components in scope] + } + Boundary(out, "Out of Scope") { + [Components out of scope] + } +``` +[Clarify what is and is not included in this feature] + +## Functional Requirements + +### Must Have (P1 - MVP) +- [ ] Requirement 1: [Detailed description] + - AC-001: [Acceptance criteria - Given/When/Then format or measurable standard] + - AC-002: [Acceptance criteria] +- [ ] Requirement 2: [Detailed description] + - AC-003: [Acceptance criteria] + +### Should Have (P2) +- [ ] Requirement 1: [Detailed description] + - AC-004: [Acceptance criteria] + +### Could Have (P3) +- [ ] Requirement 1: [Detailed description] + +### Won't Have (this release) +- Item 1: [Description and reason for exclusion] +- Item 2: [Description and reason for exclusion] + +## Non-Functional Requirements + +### Performance +- Response Time: [Target value] +- Throughput: [Target value] +- Concurrency: [Target value] + +### Reliability +- Availability: [Target value] +- Error Rate: [Target value] + +### Security +- [Security requirements details] + +### Scalability +- [Considerations for future scaling] + +### Accessibility (when feature includes UI) +- Compliance standard: [Default: WCAG 2.1 AA (use organization standard if available)] +- Target assistive technologies: [Screen reader, keyboard operation, voice control, etc.] +- Platform requirements: [e.g., app store review requirements] +- Known constraints: [e.g., external library limitations] + +## Success Criteria + +### Quantitative Metrics +1. [Metric name]: [numeric target] measured by [method] within [timeframe] +2. [Metric name]: [numeric target] measured by [method] within [timeframe] +3. [Metric name]: [numeric target] measured by [method] within [timeframe] + +### Qualitative Metrics +1. [User experience metric 1] +2. [User experience metric 2] + +### UI Quality Metrics (when feature includes UI) +1. [Key operation completion rate / error recovery rate / retry success rate] +2. [Accessibility audit target score] + +## Technical Considerations + +### Dependencies +- [Dependencies on existing systems] +- [Dependencies on external services] + +### Constraints +- [Technical constraints] +- [Resource constraints] + +### Assumptions +- [Prerequisite requiring validation 1] +- [Prerequisite requiring validation 2] + +### Risks and Mitigation +| Risk | Impact | Probability | Mitigation | +|------|--------|-------------|------------| +| [Risk 1] | High/Medium/Low | High/Medium/Low | [Countermeasure] | +| [Risk 2] | High/Medium/Low | High/Medium/Low | [Countermeasure] | + +## Undetermined Items + +- [ ] [Question 1]: [Description of options or impacts] +- [ ] [Question 2]: [Description of options or impacts] + +*Discuss with user until this section is empty, then delete after confirmation* + +## Appendix + +### References +- [Related document 1] +- [Related document 2] + +### Glossary +- **Term 1**: [Definition] +- **Term 2**: [Definition] diff --git a/dev-workflows-frontend/skills/documentation-criteria/references/task-template.md b/dev-workflows-frontend/skills/documentation-criteria/references/task-template.md new file mode 100644 index 0000000..6207b79 --- /dev/null +++ b/dev-workflows-frontend/skills/documentation-criteria/references/task-template.md @@ -0,0 +1,54 @@ +# Task: [Task Name] + +Metadata: +- Dependencies: task-01 → Deliverable: docs/plans/analysis/research-results.md +- Provides: docs/plans/analysis/api-spec.md (for research/design tasks) +- Size: Small (1-2 files) + +## Implementation Content +[What this task will achieve] +*Reference dependency deliverables if applicable + +## Target Files +- [ ] [Implementation file path] +- [ ] [Test file path] + +## Investigation Targets +Files to read before starting implementation (file path, with optional search hint): +- [e.g., src/orders/checkout (processOrder function) — determined by task-decomposer based on task nature] + +## Implementation Steps (TDD: Red-Green-Refactor) +### 1. Red Phase +- [ ] Read all Investigation Targets and record key observations +- [ ] Review dependency deliverables (if any) +- [ ] Verify/create contract definitions +- [ ] Write failing tests +- [ ] Run tests and confirm failure + +### 2. Green Phase +- [ ] Add minimal implementation to pass tests +- [ ] Run only added tests and confirm they pass + +### 3. Refactor Phase +- [ ] Improve code (maintain passing tests) +- [ ] Confirm added tests still pass + +## Quality Assurance Mechanisms +(From work plan header — mechanisms relevant to this task's target files) +- [Tool/check name] — Enforces: [what] — Config: [path] + +## Operation Verification Methods +(Derived from Verification Strategy in work plan) +- **Verification method**: [What to verify and how — e.g., "compare new implementation output against existing implementation at src/legacy/order_calc", "run endpoint against test database and verify response matches contract"] +- **Success criteria**: [Observable outcome that proves correctness — e.g., "output matches existing implementation for all input combinations", "API returns 200 with expected schema"] +- **Failure response**: [What to do if verification fails — e.g., "reassess approach before proceeding", "escalate to user"] +- **Verification level**: [L1: Functional operation as end-user feature / L2: New tests added and passing / L3: Code builds without errors] + +## Completion Criteria +- [ ] All added tests pass +- [ ] Operation verified per Operation Verification Methods above +- [ ] Deliverables created (for research/design tasks) + +## Notes +- Impact scope: [Areas where changes may propagate] +- Scope boundary: [Files to preserve unchanged — path and reason] diff --git a/dev-workflows-frontend/skills/documentation-criteria/references/ui-spec-template.md b/dev-workflows-frontend/skills/documentation-criteria/references/ui-spec-template.md new file mode 100644 index 0000000..134fafc --- /dev/null +++ b/dev-workflows-frontend/skills/documentation-criteria/references/ui-spec-template.md @@ -0,0 +1,199 @@ +# [Feature Name] UI Specification + +## Overview + +[Purpose and scope of this UI Specification in 2-3 sentences] + +### Target PRD +- PRD path: [docs/prd/xxx-prd.md | "N/A — based on requirement-analyzer output"] +- Feature scope: [Which PRD requirements this UI Spec covers | Summary of analyzed requirements] + +### Design Source +| Source | Path | Version | +|--------|------|---------| +| Prototype code | [docs/ui-spec/assets/xxx/] | [commit SHA / tag] | + +## Prototype Management + +Prototype code is an **attachment** to this UI Spec. The canonical specification is always this document + the Design Doc. + +- **Attachment path**: [docs/ui-spec/assets/{feature-name}/] +- **Version identification**: [commit SHA / tag] +- **Compliance premise**: [e.g., design system compliance, component library usage] +- **Relationship to canonical spec**: Differences between prototype and this spec are resolved in favor of this document. Prototype serves as visual/behavioral reference only. + +## AC Traceability (Prototype) + +Map PRD acceptance criteria to prototype references. Skip this section if no prototype is provided. + +| AC ID | AC Summary | Screen / State | Prototype Reference (element ID / path) | Adoption Decision | +|-------|-----------|----------------|----------------------------------------|-------------------| +| AC-001 | [EARS AC summary] | [Screen / state name] | [element or file reference] | Adopted / Not adopted / On hold | + +## Screen List and Transitions + +### Screen List + +| Screen ID | Screen Name | Description | Entry Condition | +|-----------|------------|-------------|-----------------| +| S-01 | [Screen name] | [Purpose] | [How user reaches this screen] | + +### Transition Conditions + +| Source | Destination | Trigger | Guard Condition | +|--------|------------|---------|-----------------| +| S-01 | S-02 | [User action] | [Precondition if any] | + +## Component Decomposition + +### Component Tree + +``` +[Page/Screen] + +-- [Container Component] + | +-- [Presentational Component A] + | +-- [Presentational Component B] + +-- [Container Component] + +-- [Presentational Component C] +``` + +### Component: [ComponentName] + +#### State x Display Matrix + +| State | Default | Loading | Empty | Error | Partial | +|-------|---------|---------|-------|-------|---------| +| Display | [Normal display] | [Specific pattern: e.g., Skeleton of `ExistingComponent` / Spinner from `ui/Spinner`] | [Empty state message + CTA: e.g., "No items yet" + `Button` "Create first item"] | [Error message + recovery: e.g., `Alert` variant="error" + `Button` "Retry"] | [Cached display + `Banner` "Connection lost, showing cached data"] | + +#### Interaction Definition + +| AC ID | EARS Condition | User Action | System Response | State Transition | Error Handling | +|-------|---------------|-------------|-----------------|-----------------|----------------| +| AC-001 | When [trigger] | [Click / input / etc.] | [Expected behavior] | [From state -> To state] | [Retry / Reset / Fallback] | + +### Component: [ComponentName2] + +[Repeat State x Display Matrix and Interaction Definition for each component] + +## Design Tokens and Component Map + +### Environment Constraints + +- Target browsers: [e.g., Chrome 120+, Safari 17+] +- Theme support: [e.g., light/dark, system preference] + +#### Responsive Behavior + +| Breakpoint | Width | Key Changes | +|-----------|-------|-------------| +| Mobile | [e.g., < 768px] | [e.g., single column, hamburger nav, 14px body text] | +| Tablet | [e.g., 768px - 1023px] | [e.g., 2-column grid, collapsed sidebar] | +| Desktop | [e.g., ≥ 1024px] | [e.g., full layout, expanded nav, sidebar visible] | + +### Existing Component Reuse Map + +| UI Element | Decision | Existing Component | Notes | +|-----------|----------|-------------------|-------| +| [Button] | Reuse | [components/ui/Button] | [No modifications needed] | +| [DataTable] | Extend | [components/ui/Table] | [Add sorting support] | +| [FeatureCard] | New | - | [No similar component exists] | + +### Design Tokens + +#### Color Roles + +| Role | Token | Value | Usage | +|------|-------|-------|-------| +| Background Surface | [bg-primary] | [e.g., #FFFFFF] | [Page background] | +| Background Surface | [bg-secondary] | [e.g., #F9FAFB] | [Card, section background] | +| Text | [text-primary] | [e.g., #111827] | [Headings, body text] | +| Text | [text-secondary] | [e.g., #6B7280] | [Captions, placeholders] | +| Brand / Accent | [color-brand] | [e.g., #1A73E8] | [Primary actions, links] | +| Status | [color-success] | [e.g., #22C55E] | [Success states, confirmations] | +| Status | [color-error] | [e.g., #EF4444] | [Error states, destructive actions] | +| Border | [border-primary] | [e.g., #E5E7EB] | [Card borders, dividers] | + +#### Typography Hierarchy + +| Role | Font | Size | Weight | Line Height | Letter Spacing | +|------|------|------|--------|-------------|----------------| +| Heading 1 | [e.g., Inter] | [e.g., 30px] | [e.g., 700] | [e.g., 1.2] | [e.g., -0.02em] | +| Heading 2 | [e.g., Inter] | [e.g., 24px] | [e.g., 600] | [e.g., 1.3] | [e.g., -0.01em] | +| Body | [e.g., Inter] | [e.g., 16px] | [e.g., 400] | [e.g., 1.5] | [e.g., 0] | +| Caption | [e.g., Inter] | [e.g., 12px] | [e.g., 400] | [e.g., 1.4] | [e.g., 0.01em] | +| Monospace | [e.g., JetBrains Mono] | [e.g., 14px] | [e.g., 400] | [e.g., 1.6] | [e.g., 0] | + +#### Spacing Scale + +| Token | Value | Usage | +|-------|-------|-------| +| [spacing-xs] | [e.g., 4px] | [Inline element gaps] | +| [spacing-sm] | [e.g., 8px] | [Compact padding] | +| [spacing-md] | [e.g., 16px] | [Default component padding] | +| [spacing-lg] | [e.g., 24px] | [Section spacing] | +| [spacing-xl] | [e.g., 40px] | [Page section separation] | + +#### Elevation (Depth) + +| Level | Treatment | Usage | +|-------|-----------|-------| +| 0 (Flat) | [e.g., none] | [Inline elements, text] | +| 1 (Raised) | [e.g., 0 1px 2px rgba(0,0,0,0.05)] | [Cards, buttons] | +| 2 (Floating) | [e.g., 0 4px 12px rgba(0,0,0,0.1)] | [Dropdowns, popovers] | +| 3 (Overlay) | [e.g., 0 8px 24px rgba(0,0,0,0.15)] | [Modals, dialogs] | + +#### Border Radius Scale + +| Token | Value | Usage | +|-------|-------|-------| +| [radius-sm] | [e.g., 4px] | [Badges, chips] | +| [radius-md] | [e.g., 8px] | [Cards, inputs] | +| [radius-lg] | [e.g., 12px] | [Modals, panels] | +| [radius-full] | [e.g., 9999px] | [Avatars, pills] | + +## Visual Acceptance + +### Golden States +Define the key visual states that serve as acceptance benchmarks: + +1. **[State name]**: [Description of what should be visually confirmed] +2. **[State name]**: [Description] + +### Layout Constraints +- [Min/max width, height constraints] +- [Spacing rules between components] +- [Overflow behavior] + +## Accessibility Requirements + +### Keyboard Navigation + +| Component | Tab Order | Key Binding | Behavior | +|-----------|-----------|-------------|----------| +| [Component] | [Order number] | [Enter / Space / Arrow] | [Expected behavior] | + +### Screen Reader + +| Component | Role | Accessible Name | Live Region | +|-----------|------|-----------------|-------------| +| [Component] | [ARIA role] | [aria-label / aria-labelledby] | [polite / assertive / none] | + +### Contrast Requirements + +| Element | Foreground | Background | Ratio Target | +|---------|-----------|------------|-------------| +| [Text element] | [Color] | [Color] | [4.5:1 for normal text / 3:1 for large text] | + +## Open Items + +| ID | Description | Owner | Deadline | +|----|-------------|-------|----------| +| TBD-01 | [Unresolved question or decision] | [Who resolves] | [Target date] | + +*All TBDs must have an owner and deadline. Resolve before Design Doc creation.* + +## Update History + +| Date | Version | Changes | Author | +|------|---------|---------|--------| +| YYYY-MM-DD | 1.0 | Initial version | [Name] | diff --git a/dev-workflows-frontend/skills/frontend-ai-guide/SKILL.md b/dev-workflows-frontend/skills/frontend-ai-guide/SKILL.md new file mode 100644 index 0000000..8a05065 --- /dev/null +++ b/dev-workflows-frontend/skills/frontend-ai-guide/SKILL.md @@ -0,0 +1,250 @@ +--- +name: frontend-ai-guide +description: Frontend-specific technical decision criteria, anti-patterns, debugging techniques, and quality check workflow. Use when making frontend technical decisions or performing quality assurance. +--- + +# AI Developer Guide - Technical Decision Criteria and Anti-pattern Collection (Frontend) + +## Technical Anti-patterns (Red Flag Patterns) + +Immediately stop and reconsider design when detecting the following patterns: + +### Code Quality Anti-patterns +1. **Writing similar code 3 or more times** - Violates Rule of Three +2. **Multiple responsibilities mixed in a single component** - Violates Single Responsibility Principle (SRP) +3. **Defining same content in multiple components** - Violates DRY principle +4. **Making changes without checking dependencies** - Potential for unexpected impacts +5. **Disabling code with comments** - Should use version control +6. **Error suppression** - Hiding problems creates technical debt +7. **Excessive use of type assertions (as)** - Abandoning type safety +8. **Prop drilling through 3+ levels** - Should use Context API or state management +9. **Massive components (300+ lines)** - Split into smaller components + +### Design Anti-patterns +- **"Make it work for now" thinking** - Accumulation of technical debt +- **Patchwork implementation** - Unplanned additions to existing components +- **Optimistic implementation of uncertain technology** - Designing unknown elements assuming "it'll probably work" +- **Symptomatic fixes** - Surface-level fixes that don't solve root causes +- **Unplanned large-scale changes** - Lack of incremental approach + +## Fallback Design Principles + +### Core Principle: Fail-Fast +Design philosophy that prioritizes improving primary code reliability over fallback implementations. + +### Criteria for Fallback Implementation +- **Fallback rule**: Implement fallbacks only when explicitly defined in Design Doc +- **Layer Responsibilities**: + - Component Layer: Use Error Boundary for error handling + - Hook Layer: Implement decisions based on business requirements + +### Detection of Excessive Fallbacks +- Require design review when writing the 3rd catch statement in the same feature +- Verify Design Doc definition before implementing fallbacks +- Properly log errors and make failures explicit + +## Rule of Three - Criteria for Code Duplication + +How to handle duplicate code based on Martin Fowler's "Refactoring": + +| Duplication Count | Action | Reason | +|-------------------|--------|--------| +| 1st time | Inline implementation | Cannot predict future changes | +| 2nd time | Consider future consolidation | Pattern beginning to emerge | +| 3rd time | Implement commonalization | Pattern established | + +### Criteria for Commonalization + +**Cases for Commonalization** +- Business logic duplication +- Complex processing algorithms +- Component patterns (form fields, cards, etc.) +- Custom hooks +- Validation rules + +**Cases to Avoid Commonalization** +- Accidental matches (coincidentally same code) +- Possibility of evolving in different directions +- Significant readability decrease from commonalization +- Simple helpers in test code + +### Implementation Example +```typescript +// Immediate commonalization on 1st duplication +function UserEmailInput() { /* ... */ } +function ContactEmailInput() { /* ... */ } + +// Commonalize on 3rd occurrence +function EmailInput({ context }: { context: 'user' | 'contact' | 'admin' }) { /* ... */ } +``` + +## Common Failure Patterns and Avoidance Methods + +### Pattern 1: Error Fix Chain +**Symptom**: Fixing one error causes new errors +**Cause**: Surface-level fixes without understanding root cause +**Avoidance**: Identify root cause with 5 Whys before fixing + +### Pattern 2: Abandoning Type Safety +**Symptom**: Excessive use of any type or as +**Cause**: Impulse to avoid type errors +**Avoidance**: Handle safely with unknown type and type guards + +### Pattern 3: Implementation Without Sufficient Testing +**Symptom**: Many bugs after implementation +**Cause**: Ignoring Red-Green-Refactor process +**Avoidance**: Always start with failing tests + +### Pattern 4: Ignoring Technical Uncertainty +**Symptom**: Frequent unexpected errors when introducing new technology +**Cause**: Assuming "it should work according to official documentation" without prior investigation +**Avoidance**: +- Record certainty evaluation at the beginning of task files + ``` + Certainty: low (Reason: new experimental feature with limited production examples) + Exploratory implementation: true + Fallback: use established patterns + ``` +- For low certainty cases, create minimal verification code first + +### Pattern 5: Insufficient Existing Code Investigation +**Symptom**: Duplicate implementations, architecture inconsistency, integration failures +**Cause**: Insufficient understanding of existing code before implementation +**Avoidance Methods**: +- Before implementation, always search for similar functionality (using domain, responsibility, component patterns as keywords) +- Similar functionality found → Use that implementation (do not create new implementation) +- Similar functionality is technical debt → Create ADR improvement proposal before implementation +- No similar functionality exists → Implement new functionality following existing design philosophy +- Record all decisions and rationale in "Existing Codebase Analysis" section of Design Doc + +## Debugging Techniques + +### 1. Error Analysis Procedure +1. Read error message (first line) accurately +2. Focus on first and last of stack trace +3. Identify first line where your code appears +4. Check React DevTools for component hierarchy + +### 2. 5 Whys - Root Cause Analysis +``` +Symptom: Component not rendering +Why1: Props are undefined → Why2: Parent component didn't pass props +Why3: Parent using old prop names → Why4: Component interface was updated +Why5: No update to parent after refactoring +Root cause: Incomplete refactoring, missing call-site updates +``` + +### 3. Minimal Reproduction Code +To isolate problems, attempt reproduction with minimal code: +- Remove unrelated components +- Replace API calls with mocks +- Create minimal configuration that reproduces problem +- Use React DevTools to inspect component tree + +### 4. Debug Log Output +```typescript +console.log('DEBUG:', { + context: 'user-form-submission', + props: { email, name }, + state: currentState, + timestamp: new Date().toISOString() +}) +``` + +## Quality Check Workflow + +Use the appropriate run command based on the `packageManager` field in package.json. + +### Build Commands +- `dev` - Development server +- `build` - Production build +- `preview` - Preview production build +- `type-check` - Type check (no emit) + +### Quality Check Phases + +**Phase 1-3: Basic Checks** +- `check` - Biome (lint + format) +- `build` - TypeScript build + +**Phase 4-5: Tests and Final Confirmation** +- `test` - Test execution +- `test:coverage:fresh` - Coverage measurement (fresh cache) +- `check:all` - Overall integrated check + +### Auxiliary Commands +- `test:coverage` - Run tests with coverage +- `test:safe` - Safe test execution (with auto cleanup) +- `cleanup:processes` - Cleanup Vitest processes +- `format` - Format fixes +- `lint:fix` - Lint fixes +- `open coverage/index.html` - Check coverage report + +### Troubleshooting +- **Port in use error**: Run `cleanup:processes` script +- **Cache issues**: Run `test:coverage:fresh` script +- **Dependency errors**: Clean reinstall dependencies +- **Vite preview not starting**: Check port 4173 availability + +## Situations Requiring Technical Decisions + +### Timing of Abstraction +- Extract patterns after writing concrete implementation 3 times +- Be conscious of YAGNI, implement only currently needed features +- Prioritize current simplicity over future extensibility + +### Performance vs Readability +- Prioritize readability unless React DevTools Profiler identifies a measurable bottleneck (e.g., render time exceeding 16ms, unnecessary re-renders) +- Measure before optimizing with React DevTools Profiler +- Document reason with comments when optimizing + +### Granularity of Component/Type Definitions +- Overly detailed components/types reduce maintainability +- Design components that appropriately express UI patterns +- Use composition over inheritance + +## Implementation Completeness Assurance + +### Required Procedure for Impact Analysis + +**Completion Criteria**: Complete all 3 stages + +#### 1. Discovery +```bash +Grep -n "ComponentName\|hookName" -o content +Grep -n "importedFunction" -o content +Grep -n "propsType\|StateType" -o content +``` + +#### 2. Understanding +**Mandatory**: Read all discovered files and include necessary parts in context: +- Caller's purpose and context +- Component hierarchy +- Data flow: Props → State → Event handlers → Callbacks + +#### 3. Identification +Structured impact report (mandatory): +``` +## Impact Analysis +### Direct Impact: ComponentA, ComponentB (with reasons) +### Indirect Impact: FeatureX, PageY (with integration paths) +### Processing Flow: Props → Render → Events → Callbacks +``` + +**Important**: Execute all 3 stages to completion + +### Unused Code Deletion Rule + +When unused code is detected → Will it be used? +- Yes → Implement immediately (no deferral allowed) +- No → Delete immediately (remains in Git history) + +Target: Components, hooks, utilities, documentation, configuration files + +### Existing Code Deletion Decision Flow + +``` +In use? No → Delete immediately (remains in Git history) + Yes → Working? No → Delete + Reimplement + Yes → Fix +``` \ No newline at end of file diff --git a/dev-workflows-frontend/skills/implementation-approach/SKILL.md b/dev-workflows-frontend/skills/implementation-approach/SKILL.md new file mode 100644 index 0000000..7585656 --- /dev/null +++ b/dev-workflows-frontend/skills/implementation-approach/SKILL.md @@ -0,0 +1,144 @@ +--- +name: implementation-approach +description: Implementation strategy selection framework. Use when planning implementation strategy, selecting development approach, or defining verification criteria. +--- + +# Implementation Strategy Selection Framework (Meta-cognitive Approach) + +## Meta-cognitive Strategy Selection Process + +### Phase 1: Comprehensive Current State Analysis + +**Core Question**: "What does the existing implementation look like?" + +#### Analysis Framework +```yaml +Architecture Analysis: Responsibility separation, data flow, dependencies, technical debt +Implementation Quality Assessment: Code quality, test coverage, performance, security +Historical Context Understanding: Current form rationale, past decision validity, constraint changes, requirement evolution +``` + +#### Meta-cognitive Question List +- What is the true responsibility of this implementation? +- Which parts are business essence and which derive from technical constraints? +- What dependencies or implicit preconditions are unclear from the code? +- What benefits and constraints does the current design bring? + +### Phase 2: Strategy Exploration and Creation + +**Core Question**: "When determining before → after, what implementation patterns or strategies should be referenced?" + +#### Strategy Discovery Process +```yaml +Research and Exploration: Tech stack examples (WebSearch), similar projects, OSS references, literature/blogs +Creative Thinking: Strategy combinations, constraint-based design, phase division, extension point design +``` + +#### Reference Strategy Patterns (Creative Combinations Encouraged) + +**Legacy Handling Strategies**: +- Strangler Pattern: Gradual migration through phased replacement +- Facade Pattern: Complexity hiding through unified interface +- Adapter Pattern: Bridge with existing systems + +**New Development Strategies**: +- Feature-driven Development: Vertical implementation prioritizing user value +- Foundation-driven Development: Foundation-first construction prioritizing stability +- Risk-driven Development: Prioritize addressing maximum risk elements + +**Integration/Migration Strategies**: +- Proxy Pattern: Transparent feature extension +- Decorator Pattern: Phased enhancement of existing features +- Bridge Pattern: Flexibility through abstraction + +**Important**: The optimal solution is discovered through creative thinking according to each project's context. + +### Phase 3: Risk Assessment and Control + +**Core Question**: "What risks arise when applying this to existing implementation, and what's the best way to control them?" + +#### Risk Analysis Matrix +```yaml +Technical Risks: System impact, data consistency, performance degradation, integration complexity +Operational Risks: Service availability, deployment downtime, process changes, rollback procedures +Project Risks: Schedule delays, learning costs, quality achievement, team coordination +``` + +#### Risk Control Strategies +```yaml +Preventive Measures: Phased migration, parallel operation verification, integration/regression tests, monitoring setup +Incident Response: Rollback procedures, log/metrics preparation, communication system, service continuation procedures +``` + +### Phase 4: Constraint Compatibility Verification + +**Core Question**: "What are this project's constraints?" + +#### Constraint Checklist +```yaml +Technical Constraints: Library compatibility, resource capacity, mandatory requirements, numerical targets +Temporal Constraints: Deadlines/priorities, dependencies, milestones, learning periods +Resource Constraints: Team/skills, work hours/systems, budget, external contracts +Business Constraints: Market launch timing, customer impact, regulatory compliance +``` + +### Phase 5: Implementation Approach Decision + +Select optimal solution from basic implementation approaches (creative combinations encouraged): + +#### Vertical Slice (Feature-driven) +**Characteristics**: Vertical implementation across all layers by feature unit +**Application Conditions**: Features share fewer than 2 data models, each feature is independently deliverable, changes touch 3+ architecture layers +**Verification Method**: End-user value delivery at each feature completion + +#### Horizontal Slice (Foundation-driven) +**Characteristics**: Phased construction by architecture layer +**Application Conditions**: 3+ features depend on a common foundation layer, foundation changes require stability verification before consumers can proceed +**Verification Method**: Integrated operation verification when all foundation layers complete + +#### Hybrid (Creative Combination) +**Characteristics**: Flexible combination according to project characteristics +**Application Conditions**: Unclear requirements, need to change approach per phase, transition from prototyping to full implementation +**Verification Method**: Verify at appropriate L1/L2/L3 levels according to each phase's goals + +### Phase 6: Decision Rationale Documentation + +**Design Doc Documentation**: Record in the Design Doc's implementation approach section: +1. Selected strategy name and characteristics +2. Alternatives considered and reason for rejection +3. Risk mitigation plan (from Phase 3) +4. Constraint compliance summary (from Phase 4) +5. Verification level (L1/L2/L3) and integration point definition + +## Verification Level Definitions + +Priority for completion verification of each task: + +- **L1: Functional Operation Verification** - Operates as end-user feature (e.g., search executable) +- **L2: Test Operation Verification** - New tests added and passing +- **L3: Build Success Verification** - Code builds/runs without errors + +**Priority**: L1 > L2 > L3 in order of verifiability importance + +## Integration Point Definitions + +Define integration points according to selected strategy: +- **Strangler-based**: When switching between old and new systems for each feature +- **Feature-driven**: When users can actually use the feature +- **Foundation-driven**: When all architecture layers are ready and E2E tests pass +- **Hybrid**: When individual goals defined for each phase are achieved + +## Quality Checks + +1. Verify at least one strategy combination beyond listed patterns was considered +2. Confirm Phase 1 analysis framework is complete before selecting strategy +3. Confirm Phase 3 risk analysis matrix is populated before implementation starts +4. Confirm Phase 4 constraint checklist is reviewed before strategy decision +5. Confirm Phase 6 documentation template is filled with selection rationale + +## Guidelines for Meta-cognitive Execution + +1. **Leverage Known Patterns**: Use as starting point, explore creative combinations +2. **Active WebSearch Use**: Research implementation examples from similar tech stacks +3. **Apply 5 Whys**: Pursue root causes to grasp essence +4. **Multi-perspective Evaluation**: Comprehensively evaluate from each Phase 1-4 perspective \ No newline at end of file diff --git a/dev-workflows-frontend/skills/integration-e2e-testing/SKILL.md b/dev-workflows-frontend/skills/integration-e2e-testing/SKILL.md new file mode 100644 index 0000000..6ad9889 --- /dev/null +++ b/dev-workflows-frontend/skills/integration-e2e-testing/SKILL.md @@ -0,0 +1,154 @@ +--- +name: integration-e2e-testing +description: Integration and E2E test design principles, ROI calculation, test skeleton specification, and review criteria. Use when designing integration tests, E2E tests, or reviewing test quality. +--- + +# Integration and E2E Testing Principles + +## References + +**E2E test design with Playwright**: See [references/e2e-design.md](references/e2e-design.md) for UI Spec-driven E2E test candidate selection and Playwright test architecture. + +## Test Type Definition and Limits + +| Test Type | Purpose | Scope | Limit per Feature | Implementation Timing | +|-----------|---------|-------|-------------------|----------------------| +| Integration | Verify component interactions | Partial system integration | MAX 3 | Created alongside implementation | +| E2E | Verify critical user journeys | Full system | MAX 1-2 | Executed in final phase only | + +## Behavior-First Principle + +### Include (High ROI) +- Business logic correctness (calculations, state transitions, data transformations) +- Data integrity and persistence behavior +- User-visible functionality completeness +- Error handling behavior (what user sees/experiences) + +### Redirect to Other Test Types +- External service connections → Verify via contract/interface tests +- Performance metrics → Verify via dedicated load testing +- Implementation details → Verify observable behavior instead +- UI layout specifics → Verify information availability instead + +**Principle**: Test = User-observable behavior verifiable in isolated CI environment + +## ROI Calculation + +ROI is used to **rank candidates within the same test type** (integration candidates against each other, E2E candidates against each other). Cross-type comparison is unnecessary because integration and E2E budgets are selected independently. + +``` +ROI Score = Business Value × User Frequency + Legal Requirement × 10 + Defect Detection + (range: 0–120) +``` + +Higher ROI Score = higher priority within its test type. No normalization or capping is applied — the raw score is used directly for ranking. Deduplication is a separate step that removes candidates entirely; it does not modify scores. + +### ROI Threshold for E2E + +E2E tests have high ownership cost (creation, execution, and maintenance are each 3-10× higher than integration tests). To justify creation, an E2E candidate (beyond the must-keep reserved slot) requires **ROI Score > 50**. + +### ROI Calculation Examples + +| Scenario | BV | Freq | Legal | Defect | ROI Score | Test Type | Selection Outcome | +|----------|----|------|-------|--------|-----------|-----------|-------------------| +| Core checkout flow | 10 | 9 | true | 9 | 109 | E2E | Selected (reserved slot: user-facing multi-step journey) | +| Payment error handling | 8 | 3 | false | 7 | 31 | E2E | Below threshold (31 < 50), not selected | +| Profile save flow | 7 | 6 | false | 6 | 48 | E2E | Below threshold (48 < 50), not selected | +| DB persistence check | 8 | 8 | false | 8 | 72 | Integration | Selected (rank 1 of 3) | +| Error message display | 5 | 3 | false | 4 | 19 | Integration | Selected (rank 2 of 3) | +| Optional filter toggle | 3 | 4 | false | 2 | 14 | Integration | Not selected (rank 4, budget full) | + +## Multi-Step User Journey Definition + +A feature qualifies as containing a **multi-step user journey** when ALL of the following are true: + +1. **2+ distinct interaction boundaries** are traversed in sequence to complete a user goal. What counts as a boundary depends on the system type: + - Web: distinct routes/pages + - Mobile native: distinct screens/views + - CLI: distinct command invocations or interactive prompts + - API: distinct API calls forming a transaction (e.g., create → confirm → finalize) +2. **State carries across steps** — data produced or actions taken in one step affect what the next step accepts or displays +3. **The journey has a completion point** — a final state the user or caller reaches (e.g., confirmation page, saved record, API success response, completed workflow) + +### User-Facing vs Service-Internal Journeys + +Multi-step journeys are further classified for E2E budget decisions: + +| Classification | Condition | E2E Reserved Slot | Example | +|---|---|---|---| +| **User-facing** | A human user directly triggers and observes the steps (via UI, CLI, or direct API interaction) | Eligible | Web checkout flow, CLI setup wizard, mobile onboarding | +| **Service-internal** | Steps are triggered by backend services without direct user interaction | Not eligible (use integration tests) | Async job pipeline, service-to-service saga, scheduled batch processing | + +This classification applies only to the reserved E2E slot and the E2E Gap Check. Service-internal journeys are still valid E2E candidates through the normal ROI > 50 path if they warrant full-system verification. + +Use this definition when evaluating E2E test candidates and E2E gap detection. + +## Test Skeleton Specification + +### Required Comment Patterns + +Each test MUST include the following annotations: + +``` +AC: [Original acceptance criteria text] +Behavior: [Trigger] → [Process] → [Observable Result] +@category: core-functionality | integration | edge-case | e2e +@dependency: none | [component names] | full-system +@complexity: low | medium | high +ROI: [score] +``` + +Use the project's comment syntax to wrap these annotations (e.g., `//` for C-family, `#` for Python/Ruby/Shell). + +### Verification Items (Optional) + +When verification points need explicit enumeration: +``` +Verification items: +- [Item 1] +- [Item 2] +``` + +## EARS Format Mapping + +| EARS Keyword | Test Type | Generation Approach | +|--------------|-----------|---------------------| +| **When** | Event-driven | Trigger event → verify outcome | +| **While** | State condition | Setup state → verify behavior | +| **If-then** | Branch coverage | Both condition paths verified | +| (none) | Basic functionality | Direct invocation → verify result | + +## Test File Naming Convention + +- Integration tests: `*.int.test.*` or `*.integration.test.*` +- E2E tests: `*.e2e.test.*` + +The test runner or framework in the project determines the appropriate file extension. + +## Review Criteria + +### Skeleton and Implementation Consistency + +| Check | Failure Condition | +|-------|-------------------| +| Behavior Verification | No assertion for "observable result" in skeleton | +| Verification Item Coverage | Listed items not all covered by assertions | +| Mock Boundary | Internal components mocked in integration test | + +### Implementation Quality + +| Check | Failure Condition | +|-------|-------------------| +| AAA Structure | Arrange/Act/Assert separation unclear | +| Independence | State sharing between tests, order dependency | +| Reproducibility | Date/random dependency, varying results | +| Readability | Test name doesn't match verification content | + +## Quality Standards + +### Required +- Each test verifies one behavior +- Clear AAA (Arrange-Act-Assert) structure +- No test interdependencies +- Deterministic execution + diff --git a/dev-workflows-frontend/skills/integration-e2e-testing/references/e2e-design.md b/dev-workflows-frontend/skills/integration-e2e-testing/references/e2e-design.md new file mode 100644 index 0000000..f4e9e90 --- /dev/null +++ b/dev-workflows-frontend/skills/integration-e2e-testing/references/e2e-design.md @@ -0,0 +1,86 @@ +# E2E Test Design with Playwright + +## When to Create E2E Tests + +E2E tests target **critical user journeys** that span multiple pages or require real browser interaction. Apply the same ROI framework from the parent skill — only create E2E tests when ROI > 50. + +### Candidate Sources + +| Source | What to Extract | +|--------|----------------| +| **Design Doc ACs** | User journeys with EARS "When" keyword spanning multiple screens | +| **UI Spec Screen Transitions** | Multi-step flows (e.g., form wizard, checkout) | +| **UI Spec State x Display Matrix** | Error/empty/loading states requiring browser-level verification | +| **UI Spec Interaction Definitions** | Complex interactions (drag-drop, keyboard navigation, responsive behavior) | + +### Selection Criteria + +**Include** (high E2E ROI): +- Multi-page user journeys (login → dashboard → action → confirmation) +- Flows requiring real browser APIs (navigation, cookies, localStorage) +- Accessibility verification requiring actual DOM rendering +- Responsive behavior across viewports + +**Use integration tests instead when**: +- Testing single-component state changes → RTL +- Testing API response handling → MSW + RTL +- Testing pure data transformations → unit tests + +## UI Spec to E2E Test Mapping + +When a UI Spec exists, use it as the primary source for E2E test design: + +1. **Extract screen transitions** → Each multi-step transition = 1 E2E candidate +2. **Check state x display matrix** → Error states requiring navigation = E2E candidate +3. **Review interaction definitions** → Browser-dependent interactions = E2E candidate +4. **Cross-reference with Design Doc ACs** → Ensure E2E candidates map to acceptance criteria + +### Mapping Template + +``` +Screen Transition: [Screen A] → [Screen B] → [Screen C] +AC Reference: AC-{id} +User Journey: [Description of what the user accomplishes] +Preconditions: [Auth state, data state] +Verification Points: + - [What to assert at each step] +E2E ROI Score: [calculated score] +``` + +## Playwright Test Architecture + +### Page Object Pattern + +Organize browser interactions through page objects for maintainability: + +``` +tests/ +├── e2e/ +│ ├── pages/ # Page objects +│ ├── fixtures/ # Test fixtures and helpers +│ └── *.e2e.test.ts # Test files +``` + +### Test Isolation + +- Each test starts from a clean browser context +- No shared state between tests +- Use `beforeEach` for common setup (auth, navigation) +- Prefer `page.goto()` over in-test navigation for setup + +### Viewport Testing + +When UI Spec defines responsive behavior, test critical breakpoints: + +| Breakpoint | Width | When to Test | +|-----------|-------|-------------| +| Mobile | 375px | If UI Spec defines mobile-specific interactions | +| Tablet | 768px | If UI Spec defines tablet layout differences | +| Desktop | 1280px | Default — always test | + +## Budget Enforcement + +Hard limits per feature (same as parent skill): +- **E2E Tests**: MAX 1-2 tests +- Only generate if ROI score > 50 +- Prefer fewer, comprehensive journey tests over many granular tests diff --git a/dev-workflows-frontend/skills/recipe-diagnose/SKILL.md b/dev-workflows-frontend/skills/recipe-diagnose/SKILL.md new file mode 100644 index 0000000..40353c0 --- /dev/null +++ b/dev-workflows-frontend/skills/recipe-diagnose/SKILL.md @@ -0,0 +1,232 @@ +--- +name: recipe-diagnose +description: Investigate problem, verify findings, and derive solutions +disable-model-invocation: true +--- + +**Context**: Diagnosis flow to identify root cause and present solutions + +Target problem: $ARGUMENTS + +## Orchestrator Definition + +**Core Identity**: "I am not a worker. I am an orchestrator." + +**Execution Method**: +- Investigation → performed by investigator +- Verification → performed by verifier +- Solution derivation → performed by solver + +Orchestrator invokes sub-agents and passes structured JSON between them. + +**Task Registration**: Register execution steps using TaskCreate and proceed systematically. Update status using TaskUpdate. + +## Step 0: Problem Structuring (Before investigator invocation) + +### 0.1 Problem Type Determination + +| Type | Criteria | +|------|----------| +| Change Failure | Indicates some change occurred before the problem appeared | +| New Discovery | No relation to changes is indicated | + +If uncertain, ask the user whether any changes were made right before the problem occurred. + +### 0.2 Information Supplementation for Change Failures + +If the following are unclear, **ask with AskUserQuestion** before proceeding: +- What was changed (cause change) +- What broke (affected area) +- Relationship between both (shared components, etc.) + +### 0.3 Problem Essence Understanding + +**Invoke rule-advisor via Agent tool**: +``` +subagent_type: rule-advisor +description: "Problem essence analysis" +prompt: Identify the essence and required rules for this problem: [Problem reported by user] +``` + +Confirm from rule-advisor output: +- `taskAnalysis.mainFocus`: Primary focus of the problem +- `mandatoryChecks.taskEssence`: Root problem beyond surface symptoms +- `selectedRules`: Applicable rule sections +- `warningPatterns`: Patterns to avoid + +### 0.4 Reflecting in investigator Prompt + +**Include the following in investigator prompt**: +1. Problem essence (taskEssence) +2. Key applicable rules summary (from selectedRules) +3. Investigation focus (investigationFocus): Convert warningPatterns to "points prone to confusion or oversight in this investigation" +4. **For change failures, additionally include**: + - Detailed analysis of the change content + - Commonalities between cause change and affected area + - Determination of whether the change is a "correct fix" or "new bug" with comparison baseline selection + +## Diagnosis Flow Overview + +``` +Problem → investigator → verifier → solver ─┐ + ↑ │ + └── coverage insufficient ─┘ + (max 2 iterations) + +coverage sufficient → Report +``` + +**Context Separation**: Pass only structured JSON output to each step. Each step starts fresh with the JSON data only. + +## Execution Steps + +Register the following using TaskCreate and execute: + +### Step 1: Investigation (investigator) + +**Agent tool invocation**: +``` +subagent_type: investigator +description: "Investigate problem" +prompt: | + Comprehensively collect information related to the following phenomenon. + + Phenomenon: [Problem reported by user] + + Problem essence: [taskEssence from Step 0.3] + Investigation focus: [investigationFocus from Step 0.4] + + [For change failures, additionally include:] + Change details: [What was changed] + Affected area: [What broke] + Shared components: [Commonalities between cause and effect] +``` + +**Expected output**: pathMap (execution paths per symptom), failurePoints (faults found at each node), impactAnalysis per failure point, unexplored areas, investigation limitations + +### Step 2: Investigation Quality Check + +Review investigation output: + +**Quality Check** (verify JSON output contains the following): +- [ ] `pathMap` exists with at least one symptom, and each symptom has at least one path with nodes listed +- [ ] Each failure point has: `location`, `upstreamDependency`, `symptomExplained`, `causalChain` (reaching a stop condition), `checkStatus`, `evidence` with a `source` citing a specific file or location +- [ ] Each failure point has `comparisonAnalysis` (normalImplementation found or explicitly null) +- [ ] `causeCategory` for each failure point is one of: typo / logic_error / missing_constraint / design_gap / external_factor +- [ ] `investigationSources` covers at least 3 distinct source types (code, history, dependency, config, document, external) +- [ ] Investigation covers `investigationFocus` items (when provided in Step 0.4) +- [ ] All nodes on mapped paths have been checked (no path was abandoned after finding the first fault) + +**If quality insufficient**: Re-run investigator specifying missing items explicitly: +``` +prompt: | + Re-investigate with focus on the following gaps: + - Missing: [list specific missing items from quality check] + + Previous investigation results (for context, do not re-investigate covered areas): + [Previous investigation JSON] +``` + +**design_gap Escalation**: + +When investigator output contains `causeCategory: design_gap` or `recurrenceRisk: high`: +1. **Insert user confirmation before verifier execution** +2. Use AskUserQuestion: + "A design-level issue was detected. How should we proceed?" + - A: Attempt fix within current design + - B: Include design reconsideration +3. If user selects B, pass `includeRedesign: true` to solver + +Proceed to verifier once quality is satisfied. + +### Step 3: Verification (verifier) + +**Agent tool invocation**: +``` +subagent_type: verifier +description: "Verify investigation results" +prompt: Verify the following investigation results. + +Investigation results: [Investigation JSON output] +``` + +**Expected output**: Coverage check (missing paths, unchecked nodes), Devil's Advocate evaluation per failure point, failure point evaluation with checkStatus, coverage assessment + +**Coverage Criteria**: +- **sufficient**: Main paths traced, all critical nodes checked, each failure point individually evaluated +- **partial**: Main paths traced, some nodes unchecked or some failure points at blocked/not_reached +- **insufficient**: Significant paths untraced, or critical nodes not investigated + +### Step 4: Solution Derivation (solver) + +**Agent tool invocation**: +``` +subagent_type: solver +description: "Derive solutions" +prompt: Derive solutions based on the following verified failure points. + +Confirmed failure points: [verifier's conclusion.confirmedFailurePoints] +Refuted failure points: [verifier's conclusion.refutedFailurePoints] +Failure point relationships: [verifier's conclusion.failurePointRelationships] +Impact analysis: [investigator's impactAnalysis] +Coverage assessment: [sufficient/partial/insufficient] +``` + +**Expected output**: Multiple solutions (at least 3), tradeoff analysis, recommendation and implementation steps, residual risks + +**Completion condition**: coverageAssessment=sufficient + +**When not reached**: +1. Return to Step 1 with unchecked areas identified by verifier as investigation targets +2. Maximum 2 additional investigation iterations +3. After 2 iterations without reaching sufficient, present user with options: + - Continue additional investigation + - Execute solution at current coverage level + +### Step 5: Final Report Creation + +**Prerequisite**: coverageAssessment=sufficient achieved + +After diagnosis completion, report to user in the following format: + +``` +## Diagnosis Result Summary + +### Identified Failure Points +[Confirmed failure points from verification results] +- Per failure point: location, symptom explained, finalStatus + +### Verification Process +- Path coverage: [Paths traced and nodes checked] +- Additional investigation iterations: [0/1/2] +- Coverage assessment: [sufficient/partial/insufficient] + +### Recommended Solution +[Solution derivation recommendation] + +Rationale: [Selection rationale] + +### Implementation Steps +1. [Step 1] +2. [Step 2] +... + +### Alternatives +[Alternative description] + +### Residual Risks +[solver's residualRisks] + +### Post-Resolution Verification Items +- [Verification item 1] +- [Verification item 2] +``` + +## Completion Criteria + +- [ ] Executed investigator and obtained pathMap, failurePoints, and impactAnalysis +- [ ] Performed investigation quality check and re-ran if insufficient +- [ ] Executed verifier and obtained coverage assessment +- [ ] Executed solver +- [ ] Achieved coverageAssessment=sufficient (or obtained user approval after 2 additional iterations) +- [ ] Presented final report to user diff --git a/dev-workflows-frontend/skills/recipe-front-build/SKILL.md b/dev-workflows-frontend/skills/recipe-front-build/SKILL.md new file mode 100644 index 0000000..a14dae0 --- /dev/null +++ b/dev-workflows-frontend/skills/recipe-front-build/SKILL.md @@ -0,0 +1,137 @@ +--- +name: recipe-front-build +description: Execute frontend implementation in autonomous execution mode +disable-model-invocation: true +--- + +## Orchestrator Definition + +**Core Identity**: "I am an orchestrator." (see subagents-orchestration-guide skill) + +**Execution Protocol**: +1. **Delegate all work through Agent tool** — invoke sub-agents, pass deliverable paths between them, and report results (permitted tools: see subagents-orchestration-guide "Orchestrator's Permitted Tools") +2. **Follow the 4-step task cycle exactly**: task-executor-frontend → escalation check → quality-fixer-frontend → commit +3. **Enter autonomous mode** when user provides execution instruction with existing task files — this IS the batch approval +4. **Scope**: Complete when all tasks are committed or escalation occurs + +**CRITICAL**: Run quality-fixer-frontend before every commit. + +Work plan: $ARGUMENTS + +## Pre-execution Prerequisites + +### Task File Existence Check +```bash +# Check work plans +! ls -la docs/plans/*.md | grep -v template | tail -5 + +# Check task files +! ls docs/plans/tasks/*.md 2>/dev/null || echo "No task files found" +``` + +### Task Generation Decision Flow + +Analyze task file existence state and determine the action required: + +| State | Criteria | Next Action | +|-------|----------|-------------| +| Tasks exist | .md files in tasks/ directory | User's execution instruction serves as batch approval → Enter autonomous execution immediately | +| No tasks + plan exists | Plan exists but no task files | Confirm with user → run task-decomposer | +| Neither exists + Design Doc exists | No plan or task files, but docs/design/*.md exists | Invoke work-planner to create work plan from Design Doc, then proceed to task decomposition | +| Neither exists | No plan, no task files, no Design Doc | Report missing prerequisites to user and stop | + +## Task Decomposition Phase (Conditional) + +When task files don't exist: + +### 1. User Confirmation +``` +No task files found. +Work plan: docs/plans/[plan-name].md + +Generate tasks from the work plan? (y/n): +``` + +### 2. Task Decomposition (if approved) +Invoke task-decomposer using Agent tool: +- `subagent_type`: "dev-workflows-frontend:task-decomposer" +- `description`: "Decompose work plan" +- `prompt`: "Read work plan at docs/plans/[plan-name].md and decompose into atomic tasks. Output: Individual task files in docs/plans/tasks/. Granularity: 1 task = 1 commit = independently executable" + +### 3. Verify Generation +```bash +# Verify generated task files +! ls -la docs/plans/tasks/*.md | head -10 +``` + +**Flow**: Task generation → Autonomous execution (in this order) + +## Pre-execution Checklist + +- [ ] Confirmed task files exist in docs/plans/tasks/ +- [ ] Identified task execution order (dependencies) +- [ ] **Environment check**: Can I execute per-task commit cycle? + - If commit capability unavailable → Escalate before autonomous mode + - Other environments (tests, quality tools) → Subagents will escalate + +## Task Execution Cycle (4-Step Cycle) +**MANDATORY EXECUTION CYCLE**: `task-executor-frontend → escalation check → quality-fixer-frontend → commit` + +For EACH task, YOU MUST: +1. **Register tasks using TaskCreate**: Register work steps. Always include: first "Confirm skill constraints", final "Verify skill fidelity" +2. **Agent tool** (subagent_type: "dev-workflows-frontend:task-executor-frontend") → Pass task file path in prompt, receive structured response +3. **CHECK task-executor-frontend response**: + - `status: "escalation_needed"` or `"blocked"` → STOP and escalate to user + - `requiresTestReview` is `true` → Execute **integration-test-reviewer** + - `needs_revision` → Return to step 2 with `requiredFixes` + - `approved` → Proceed to step 4 + - `readyForQualityCheck: true` → Proceed to step 4 +4. **INVOKE quality-fixer-frontend**: Execute all quality checks and fixes. **Always pass** the current task file path as `task_file` +5. **CHECK quality-fixer-frontend response**: + - `stub_detected` → Return to step 2 with `incompleteImplementations[]` details + - `blocked` → STOP and escalate to user + - `approved` → Proceed to step 6 +6. **COMMIT on approval**: Execute git commit + +**CRITICAL**: Parse every sub-agent response for status fields. Execute the matching branch in the 4-step cycle. Proceed to next task only after quality-fixer-frontend returns `approved`. + +## Sub-agent Invocation Constraints + +**MANDATORY suffix for ALL sub-agent prompts**: +``` +[SYSTEM CONSTRAINT] +This agent operates within build skill scope. Use orchestrator-provided rules only. +``` + +Autonomous sub-agents require scope constraints for stable execution. ALWAYS append this constraint to every sub-agent prompt. + +Verify task files exist per Pre-execution Checklist, then enter autonomous execution mode. When requirement changes are detected during execution, escalate to the user with the change summary before continuing. + +## Post-Implementation Verification (After All Tasks Complete) + +After all task cycles finish, run verification agents **in parallel** before the completion report: + +1. **Invoke both in parallel** using Agent tool: + - code-verifier (subagent_type: "dev-workflows-frontend:code-verifier") → `doc_type: design-doc`, Design Doc path, `code_paths`: implementation file list (`git diff --name-only main...HEAD`) + - security-reviewer (subagent_type: "dev-workflows-frontend:security-reviewer") → Design Doc path, implementation file list + +2. **Consolidate results** — check pass/fail for each: + - code-verifier: **pass** when `status` is `consistent` or `mostly_consistent`. **fail** when `needs_review` or `inconsistent`. Collect `discrepancies` with status `drift`, `conflict`, or `gap` + - security-reviewer: **pass** when `status` is `approved` or `approved_with_notes`. **fail** when `needs_revision`. **blocked** → Escalate to user + - Present unified verification report to user + +3. **Fix cycle** (when any verifier failed): + - Consolidate all actionable findings into a single task file + - Execute task-executor-frontend with consolidated fixes → quality-fixer-frontend + - Re-run only the failed verifiers (by the criteria in step 2) + - Repeat until all pass or `blocked` → Escalate to user + +4. **All passed** → Proceed to completion report + +## Output Example +Frontend implementation phase completed. +- Task decomposition: Generated under docs/plans/tasks/ +- Implemented tasks: [number] tasks +- Quality checks: All passed (Lighthouse, bundle size, tests) +- Commits: [number] commits created + diff --git a/dev-workflows-frontend/skills/recipe-front-design/SKILL.md b/dev-workflows-frontend/skills/recipe-front-design/SKILL.md new file mode 100644 index 0000000..ab569e0 --- /dev/null +++ b/dev-workflows-frontend/skills/recipe-front-design/SKILL.md @@ -0,0 +1,120 @@ +--- +name: recipe-front-design +description: Execute from requirement analysis to frontend design document creation +disable-model-invocation: true +--- + +**Context**: Dedicated to the frontend design phase. + +## Orchestrator Definition + +**Core Identity**: "I am an orchestrator." (see subagents-orchestration-guide skill) + +**Execution Protocol**: +1. **Delegate all work** to sub-agents — your role is to invoke sub-agents, pass data between them, and report results +2. **Follow subagents-orchestration-guide skill design flow** (this recipe covers medium/large frontend; refer to the guide for scale-specific variations): + - Execute: requirement-analyzer → codebase-analyzer → ui-spec-designer → technical-designer-frontend → code-verifier → document-reviewer → design-sync + - **Stop at every `[Stop: ...]` marker** → Wait for user approval before proceeding +3. **Scope**: Complete when design documents receive approval + +**CRITICAL**: Execute document-reviewer, design-sync, and all stopping points defined in subagents-orchestration-guide skill flows — each serves as a quality gate. Skipping any step risks undetected inconsistencies. + +## Workflow Overview + +``` +Requirements → requirement-analyzer → [Stop: Scale determination] + ↓ + codebase-analyzer → ui-spec-designer → [Stop: UI Spec approval] + ↓ + technical-designer-frontend + ↓ + code-verifier → document-reviewer + ↓ + design-sync → [Stop: Design approval] +``` + +## Scope Boundaries + +**Included in this skill**: +- Requirement analysis with requirement-analyzer +- Codebase analysis with codebase-analyzer (before technical design) +- UI Specification creation with ui-spec-designer (prototype code inquiry included) +- ADR creation (if architecture changes, new technology, or data flow changes) +- Design Doc creation with technical-designer-frontend +- Design Doc verification with code-verifier (before document review) +- Document review with document-reviewer +- Design Doc consistency verification with design-sync + +**Responsibility Boundary**: This skill completes with frontend design document (UI Spec/ADR/Design Doc) approval. Work planning and beyond are outside scope. + +Requirements: $ARGUMENTS + +## Execution Flow + +### Step 1: Requirement Analysis Phase +Considering the deep impact on design, first engage in dialogue to understand the background and purpose of requirements: +- What problems do you want to solve? +- Expected outcomes and success criteria +- Relationship with existing systems + +Once the user has answered the three dialogue questions above, execute the process below within design scope. Follow subagents-orchestration-guide Call Examples for codebase-analyzer and code-verifier invocations. + +- Invoke **requirement-analyzer** using Agent tool + - `subagent_type: "dev-workflows-frontend:requirement-analyzer"` + - `description: "Requirement analysis"` + - `prompt: "Requirements: [user requirements] Execute requirement analysis and scale determination"` +- **[STOP]**: Review requirement analysis results and address question items + +### Step 2: UI Specification Phase +After requirement analysis approval, ask the user about prototype code: + +**Ask the user**: "Do you have prototype code for this feature? If so, please provide the path to the code. The prototype will be placed in `docs/ui-spec/assets/` as reference material for the UI Spec." + +- **[STOP]**: Wait for user response about prototype code availability + +Then create the UI Specification: +- Invoke **ui-spec-designer** using Agent tool + - `subagent_type: "dev-workflows-frontend:ui-spec-designer"` + - `description: "UI Spec creation"` + - If PRD exists and prototype provided: `prompt: "Create UI Spec from PRD at [path]. Prototype code is at [user-provided path]. Place prototype in docs/ui-spec/assets/{feature-name}/"` + - If PRD exists and no prototype: `prompt: "Create UI Spec from PRD at [path]. No prototype code available."` + - If no PRD (medium scale): `prompt: "Create UI Spec based on the following requirements: [pass requirement-analyzer output]. No PRD available."` (add prototype path if provided) +- Invoke **document-reviewer** to verify UI Spec + - `subagent_type: "dev-workflows-frontend:document-reviewer"`, `description: "UI Spec review"`, `prompt: "doc_type: UISpec target: [ui-spec path] Review for consistency and completeness"` +- **[STOP]**: Present UI Spec for user approval + +### Step 3: Design Document Creation Phase +First, analyze the existing codebase: +- Invoke **codebase-analyzer** using Agent tool + - `subagent_type: "dev-workflows-frontend:codebase-analyzer"`, `description: "Codebase analysis"`, `prompt: "requirement_analysis: [JSON from Step 1]. requirements: [user requirements]. Analyze existing codebase for frontend design guidance."` + +Create appropriate design documents according to scale determination. technical-designer-frontend presents at least two architecture alternatives (technology selection, data flow design) with trade-offs for each: +- Invoke **technical-designer-frontend** using Agent tool + - For ADR: `subagent_type: "dev-workflows-frontend:technical-designer-frontend"`, `description: "ADR creation"`, `prompt: "Create ADR for [technical decision]. Present at least two alternatives with trade-offs."` + - For Design Doc: `subagent_type: "dev-workflows-frontend:technical-designer-frontend"`, `description: "Design Doc creation"`, `prompt: "Create Design Doc based on requirements. Codebase analysis: [JSON from codebase-analyzer]. UI Spec is at [ui-spec path]. Inherit component structure and state design from UI Spec. Present at least two architecture alternatives with trade-offs."` +- **(Design Doc only)** Invoke **code-verifier** to verify Design Doc against existing code. Skip for ADR. + - `subagent_type: "dev-workflows-frontend:code-verifier"`, `description: "Design Doc verification"`, `prompt: "doc_type: design-doc document_path: [Design Doc path] Verify Design Doc against existing code."` +- Invoke **document-reviewer** to verify consistency (pass code-verifier results for Design Doc; omit for ADR) + - `subagent_type: "dev-workflows-frontend:document-reviewer"`, `description: "Document review"`, `prompt: "Review [document path] for consistency and completeness. code_verification: [JSON from code-verifier] (Design Doc only)"` + +### Step 4: Design Consistency Verification +- Invoke **design-sync** using Agent tool + - `subagent_type: "dev-workflows-frontend:design-sync"`, `description: "Design consistency check"`, `prompt: "Check consistency across all Design Docs in docs/design/. Report conflicts and overlaps."` +- **[STOP]**: Present design documents and design-sync results, obtain user approval + +## Completion Criteria + +- [ ] Executed requirement-analyzer and determined scale +- [ ] Executed codebase-analyzer and passed results to technical-designer-frontend +- [ ] Created UI Specification with ui-spec-designer (when applicable) +- [ ] Created appropriate design document (ADR or Design Doc) with technical-designer-frontend +- [ ] Executed code-verifier on Design Doc and passed results to document-reviewer (skip for ADR-only) +- [ ] Executed document-reviewer and addressed feedback +- [ ] Executed design-sync for consistency verification +- [ ] Obtained user approval for design document + +## Output Example +Frontend design phase completed. +- UI Specification: docs/ui-spec/[feature-name]-ui-spec.md +- Design document: docs/design/[document-name].md or docs/adr/[document-name].md +- Approval status: User approved diff --git a/dev-workflows-frontend/skills/recipe-front-plan/SKILL.md b/dev-workflows-frontend/skills/recipe-front-plan/SKILL.md new file mode 100644 index 0000000..79358a2 --- /dev/null +++ b/dev-workflows-frontend/skills/recipe-front-plan/SKILL.md @@ -0,0 +1,75 @@ +--- +name: recipe-front-plan +description: Create frontend work plan from design document and obtain plan approval +disable-model-invocation: true +--- + +**Context**: Dedicated to the frontend planning phase. + +## Orchestrator Definition + +**Core Identity**: "I am an orchestrator." (see subagents-orchestration-guide skill) + +**Execution Protocol**: +1. **Delegate all work** to sub-agents — your role is to invoke sub-agents, pass data between them, and report results +2. **Follow subagents-orchestration-guide skill planning flow**: + - Execute steps defined below + - **Stop and obtain approval** for plan content before completion +3. **Scope**: See Scope Boundaries below + +**CRITICAL**: When the user requests test generation, always execute acceptance-test-generator first — it provides the test skeleton that work-planner depends on. + +## Scope Boundaries + +**Included in this skill**: +- Design document selection +- Test skeleton generation with acceptance-test-generator +- Work plan creation with work-planner +- Plan approval obtainment + +**Responsibility Boundary**: This skill completes with work plan approval. + +Follow the planning process below: + +## Execution Process + +### Step 1: Design Document Selection + ! ls -la docs/design/*.md | head -10 + - Check for existence of design documents, notify user if none exist + - Present options if multiple exist (can be specified with $ARGUMENTS) + +### Step 2: Test Skeleton Generation Confirmation + - Confirm with user whether to generate test skeletons (integration + E2E) first + - If user wants generation: acceptance-test-generator generates both integration and E2E test skeletons + - Invoke acceptance-test-generator using Agent tool: + - `subagent_type`: "dev-workflows-frontend:acceptance-test-generator" + - `description`: "Test skeleton generation" + - If UI Spec exists: `prompt: "Generate test skeletons from Design Doc at [path]. UI Spec at [ui-spec path]."` + - If no UI Spec: `prompt: "Generate test skeletons from Design Doc at [path]."` + - Pass integration test file path, E2E test file path (or null), and e2eAbsenceReason to work-planner according to subagents-orchestration-guide "acceptance-test-generator → work-planner" section + +### Step 3: Work Plan Creation +Invoke work-planner using Agent tool: +- `subagent_type`: "dev-workflows-frontend:work-planner" +- `description`: "Work plan creation" +- If test skeletons were generated in Step 2: + - When `generatedFiles.e2e` is not null: + `prompt`: "Create work plan from Design Doc at [path]. Integration test file: [integration test path]. E2E test file: [E2E test path]. Integration tests are created simultaneously with each phase implementation, E2E tests are executed only in final phase." + - When `generatedFiles.e2e` is null: + `prompt`: "Create work plan from Design Doc at [path]. Integration test file: [integration test path]. No E2E test skeletons were generated (reason: [e2eAbsenceReason]). Integration tests are created simultaneously with each phase implementation." +- If test skeletons were not generated: + `prompt`: "Create work plan from Design Doc at [path]." + +- Follow subagents-orchestration-guide Prompt Construction Rule for additional prompt parameters +- Present work plan to user for review. If user requests changes, re-invoke work-planner with revised parameters +- Highlight steps with unclear scope or external dependencies and ask user to confirm + +## Response at Completion +**Recommended**: End with the following standard response after plan content approval +``` +Frontend planning phase completed. +- Work plan: docs/plans/[plan-name].md +- Status: Approved + +Please provide separate instructions for implementation. +``` diff --git a/dev-workflows-frontend/skills/recipe-front-review/SKILL.md b/dev-workflows-frontend/skills/recipe-front-review/SKILL.md new file mode 100644 index 0000000..77999ac --- /dev/null +++ b/dev-workflows-frontend/skills/recipe-front-review/SKILL.md @@ -0,0 +1,157 @@ +--- +name: recipe-front-review +description: Design Doc compliance and security validation with optional auto-fixes +disable-model-invocation: true +--- + +**Context**: Post-implementation quality assurance for React/TypeScript frontend + +## Orchestrator Definition + +**Core Identity**: "I am an orchestrator." (see subagents-orchestration-guide skill) + +**First Action**: Register Steps 1-11 using TaskCreate before any execution. + +## Execution Method + +- Compliance validation → performed by code-reviewer +- Security validation → performed by security-reviewer +- Fix implementation → performed by task-executor-frontend +- Quality checks → performed by quality-fixer-frontend +- Re-validation → performed by code-reviewer / security-reviewer + +Design Doc (uses most recent if omitted): $ARGUMENTS + +## Execution Flow + +### Step 1: Prerequisite Check +```bash +# Identify Design Doc +ls docs/design/*.md | grep -v template | tail -1 + +# Check implementation files +git diff --name-only main...HEAD +``` + +### Step 2: Execute code-reviewer +Invoke code-reviewer using Agent tool: +- `subagent_type`: "dev-workflows-frontend:code-reviewer" +- `description`: "Code compliance review" +- `prompt`: "Design Doc: [path]. Implementation files: [git diff file list]. Review mode: full. Validate Design Doc compliance and return structured JSON report." + +**Store output as**: `$STEP_2_OUTPUT` + +### Step 3: Execute security-reviewer +Invoke security-reviewer using Agent tool: +- `subagent_type`: "dev-workflows-frontend:security-reviewer" +- `description`: "Security review" +- `prompt`: "Design Doc: [path]. Implementation files: [git diff file list]. Review security compliance." + +**Store output as**: `$STEP_3_OUTPUT` + +### Step 4: Verdict and Response + +**If security-reviewer returned `blocked`**: Stop immediately. Report the blocked finding and escalate to user. Do not proceed to fix steps. + +**Code compliance criteria (considering project stage)**: +- Prototype: Pass at 70%+ +- Production: 90%+ recommended + +**Security criteria**: +- `approved` or `approved_with_notes` → Pass +- `needs_revision` → Fail + +**Report both results independently using subagent output fields only**: + +``` +Code Compliance: [complianceRate from code-reviewer] + Verdict: [verdict from code-reviewer] + Identifier Match Rate: [identifierMatchRate from code-reviewer] + Acceptance Criteria: + - [fulfilled] [item] (confidence: [high/medium/low]) + - [partially_fulfilled] [item]: [gap] — [suggestion] + - [unfulfilled] [item]: [gap] — [suggestion] + Identifier Mismatches: + - [identifier]: DD=[designDocValue] Code=[codeValue] at [location] + Quality Findings: + - [category] [location]: [description] — [rationale] + +Security Review: [status from security-reviewer] + Findings by category: + - [confirmed_risk] [location]: [description] — [rationale] + - [defense_gap] [location]: [description] — [rationale] + - [hardening] [location]: [description] — [rationale] + - [policy] [location]: [description] — [rationale] + Notes: [notes from security-reviewer, if present] + +Execute fixes? (y/n): +``` + +If both pass and user selects `n`: Skip Steps 5-10, proceed to Step 11. + +### Step 5: Execute Skill + +Execute Skill: documentation-criteria (for task file template) + +### Step 6: Create Task File + +Create task file at `docs/plans/tasks/review-fixes-YYYYMMDD.md` +Include both code compliance issues and security requiredFixes. + +### Step 7: Execute Fixes + +Invoke task-executor-frontend using Agent tool: +- `subagent_type`: "dev-workflows-frontend:task-executor-frontend" +- `description`: "Execute review fixes" +- `prompt`: "Task file: docs/plans/tasks/review-fixes-YYYYMMDD.md. Apply staged fixes (stops at 5 files)." + +### Step 8: Quality Check + +Invoke quality-fixer-frontend using Agent tool: +- `subagent_type`: "dev-workflows-frontend:quality-fixer-frontend" +- `description`: "Quality gate check" +- `prompt`: "Confirm quality gate passage for fixed files." + +### Step 9: Re-validate code-reviewer + +Invoke code-reviewer using Agent tool: +- `subagent_type`: "dev-workflows-frontend:code-reviewer" +- `description`: "Re-validate compliance" +- `prompt`: "Re-validate Design Doc compliance after fixes. Design Doc: [path]. Implementation files: [file list]. Prior compliance issues: $STEP_2_OUTPUT. Verify each prior issue is resolved." + +### Step 10: Re-validate security-reviewer + +Invoke security-reviewer using Agent tool (only if security fixes were applied): +- `subagent_type`: "dev-workflows-frontend:security-reviewer" +- `description`: "Re-validate security" +- `prompt`: "Re-validate security after fixes. Prior findings: $STEP_3_OUTPUT. Design Doc: [path]. Implementation files: [file list]." + +### Step 11: Final Report +``` +Code Compliance: + Initial: [X]% + Final: [Y]% (if fixes executed) + +Security Review: + Initial: [status] + Final: [status] (if fixes executed) + Notes: [notes from approved_with_notes, if any] + +Remaining issues: +- [items requiring manual intervention] +``` + +## Auto-fixable Items +- Simple unimplemented acceptance criteria +- Error handling additions +- Contract definition fixes +- Function splitting (length/complexity improvements) +- Security confirmed_risk and defense_gap fixes (input validation, auth checks, output encoding) + +## Non-fixable Items +- Fundamental business logic changes +- Architecture-level modifications +- Design Doc deficiencies +- Committed secrets (blocked → human intervention) + +**Scope**: Design Doc compliance validation, security review, and auto-fixes. diff --git a/dev-workflows-frontend/skills/recipe-task/SKILL.md b/dev-workflows-frontend/skills/recipe-task/SKILL.md new file mode 100644 index 0000000..17007fd --- /dev/null +++ b/dev-workflows-frontend/skills/recipe-task/SKILL.md @@ -0,0 +1,58 @@ +--- +name: recipe-task +description: Execute tasks following appropriate rules with rule-advisor metacognition +disable-model-invocation: true +--- + +# Task Execution with Metacognitive Analysis + +Task: $ARGUMENTS + +## Mandatory Execution Process + +**Step 1: Rule Selection via rule-advisor (REQUIRED)** + +Invoke rule-advisor using Agent tool: +- `subagent_type`: "dev-workflows:rule-advisor" +- `description`: "Rule selection" +- `prompt`: "Task: $ARGUMENTS. Select appropriate rules and perform metacognitive analysis." + +**Step 2: Utilize rule-advisor Output** + +After receiving rule-advisor's JSON response, proceed with: + +1. **Understand Task Essence** (from `taskAnalysis.essence`) + - Focus on fundamental purpose, not surface-level work + - Distinguish between "quick fix" vs "proper solution" + +2. **Follow Selected Rules** (from `selectedRules`) + - Review each selected rule section + - Apply concrete procedures and guidelines + +3. **Recognize Past Failures** (from `metaCognitiveGuidance.pastFailures`) + - Apply countermeasures for known failure patterns + - Use suggested alternative approaches + +4. **Execute First Action** (from `metaCognitiveGuidance.firstStep`) + - Start with recommended action + - Use suggested tools first + +**Step 3: Create Task List with TaskCreate** + +Register work steps using TaskCreate. Always include: first "Confirm skill constraints", final "Verify skill fidelity". + +Break down the task based on rule-advisor's guidance: +- Reflect `taskAnalysis.essence` in task descriptions +- Apply `metaCognitiveGuidance.firstStep` to first task +- Restructure tasks considering `warningPatterns` +- Set priorities based on dependency order and warningPatterns severity + +**Step 4: Execute Implementation** + +Proceed with task execution following: +- Start with `metaCognitiveGuidance.firstStep` action from rule-advisor +- Update task structure with TaskUpdate to reflect rule-advisor insights +- Selected rules from rule-advisor +- Task structure (managed via TaskCreate/TaskUpdate) +- Quality standards defined in the selectedRules output from rule-advisor +- Monitor warningPatterns flags throughout execution and adjust approach when triggered diff --git a/dev-workflows-frontend/skills/recipe-update-doc/SKILL.md b/dev-workflows-frontend/skills/recipe-update-doc/SKILL.md new file mode 100644 index 0000000..87608d3 --- /dev/null +++ b/dev-workflows-frontend/skills/recipe-update-doc/SKILL.md @@ -0,0 +1,214 @@ +--- +name: recipe-update-doc +description: Update existing design documents (Design Doc / PRD / ADR) with review +disable-model-invocation: true +--- + +**Context**: Dedicated to updating existing design documents. + +## Orchestrator Definition + +**Core Identity**: "I am an orchestrator." (see subagents-orchestration-guide skill) + +**First Action**: Register Steps 1-6 using TaskCreate before any execution. + +**Execution Protocol**: +1. **Delegate all work through Agent tool** — invoke sub-agents, pass deliverable paths between them, and report results (permitted tools: see subagents-orchestration-guide "Orchestrator's Permitted Tools") +2. **Execute update flow**: + - Identify target → Clarify changes → Update document → Review → Consistency check + - **Stop at every `[Stop: ...]` marker** → Wait for user approval before proceeding +3. **Scope**: Complete when updated document receives approval + +**CRITICAL**: Execute document-reviewer and all stopping points — each serves as a quality gate for document accuracy. + +## Workflow Overview + +``` +Target document → [Stop: Confirm changes] + ↓ + technical-designer / technical-designer-frontend / prd-creator (update mode) + ↓ (Design Doc only) + code-verifier → document-reviewer → [Stop: Review approval] + ↓ (Design Doc only) + design-sync → [Stop: Final approval] +``` + +## Scope Boundaries + +**Included in this skill**: +- Existing document identification and selection +- Change content clarification with user +- Document update with appropriate agent (update mode) +- Document review with document-reviewer +- Consistency verification with design-sync (Design Doc only) + +**Out of scope** (redirect to appropriate skills): +- New requirement analysis +- Work planning or implementation + +**Responsibility Boundary**: This skill completes with updated document approval. + +Target document: $ARGUMENTS + +## Execution Flow + +### Step 1: Target Document Identification + +```bash +# Check existing documents +ls docs/design/*.md docs/prd/*.md docs/adr/*.md 2>/dev/null | grep -v template +``` + +**Decision flow**: + +| Situation | Action | +|-----------|--------| +| $ARGUMENTS specifies a path | Use specified document | +| $ARGUMENTS describes a topic | Search documents matching the topic | +| Multiple candidates found | Present options with AskUserQuestion | +| No documents found | Report and end (document creation is out of scope) | + +### Step 2: Document Type and Layer Determination + +Determine type from document path, then determine the layer to select the correct update agent: + +| Path Pattern | Type | Update Agent | Notes | +|-------------|------|--------------|-------| +| `docs/design/*.md` | Design Doc | technical-designer or technical-designer-frontend | See layer detection below | +| `docs/prd/*.md` | PRD | prd-creator | - | +| `docs/adr/*.md` | ADR | technical-designer or technical-designer-frontend | See layer detection below | + +**Layer detection** (for Design Doc and ADR): +Read the document and determine its layer from content signals: +- **Frontend** (→ technical-designer-frontend): Document title/scope mentions React, components, UI, frontend; or file contains component hierarchy, state management, UI interactions +- **Backend** (→ technical-designer): All other cases (API, data layer, business logic, infrastructure) + +**ADR Update Guidance**: +- **Minor changes** (clarification, typo fix, small scope adjustment): Update the existing ADR file +- **Major changes** (decision reversal, significant scope change): Create a new ADR that supersedes the original + +### Step 3: Change Content Clarification [Stop] + +Use AskUserQuestion to clarify what changes are needed: +- What sections need updating +- Reason for the change (bug fix findings, spec change, review feedback, etc.) +- Expected outcome after the update + +Confirm understanding of changes with user before proceeding. + +### Step 4: Document Update + +Invoke the update agent determined in Step 2: +``` +subagent_type: [Update Agent from Step 2] +description: "Update [Type from Step 2]" +prompt: | + Operation Mode: update + Existing Document: [path from Step 1] + + ## Changes Required + [Changes clarified in Step 3] + + Update the document to reflect the specified changes. + Add change history entry. +``` + +### Step 5: Document Review [Stop] + +**For Design Doc updates only**: Before document-reviewer, invoke code-verifier: +``` +subagent_type: code-verifier +description: "Verify updated Design Doc" +prompt: | + doc_type: design-doc + document_path: [path from Step 1] + Verify the updated Design Doc against current codebase. + + Verification focus: Pay special attention to literal identifier referential + integrity in the updated sections (paths, endpoints, type names, config keys). +``` + +**Store output as**: `$CODE_VERIFICATION_OUTPUT` + +Invoke document-reviewer: +``` +subagent_type: document-reviewer +description: "Review updated document" +prompt: | + Review the following updated document. + + doc_type: [Design Doc / PRD / ADR] + target: [path from Step 1] + mode: standard + code_verification: $CODE_VERIFICATION_OUTPUT (Design Doc only, omit for PRD/ADR) + + Focus on: + - Consistency of updated sections with rest of document + - No contradictions introduced by changes + - Completeness of change history +``` + +**Store output as**: `$STEP_5_OUTPUT` + +**On review result**: +- Approved → Proceed to Step 6 +- Needs revision → Return to Step 4 with the following prompt (max 2 iterations): + ``` + subagent_type: [Update Agent from Step 2] + description: "Revise [Type from Step 2]" + prompt: | + Operation Mode: update + Existing Document: [path from Step 1] + + ## Review Feedback to Address + $STEP_5_OUTPUT + + Address each issue raised in the review feedback. + ``` +- **After 2 rejections** → Flag for human review, present accumulated feedback to user and end + +Present review result to user for approval. + +### Step 6: Consistency Verification (Design Doc only) [Stop] + +**Skip condition**: Document type is PRD or ADR → Proceed to completion. + +For Design Doc, invoke design-sync: +``` +subagent_type: design-sync +description: "Verify consistency" +prompt: | + Verify consistency of the updated Design Doc with other design documents. + + Updated document: [path from Step 1] +``` + +**On consistency result**: +- No conflicts → Present result to user for final approval +- Conflicts detected → Present conflicts to user with AskUserQuestion: + - A: Return to Step 4 to resolve conflicts in this document + - B: End and address conflicts separately + +## Error Handling + +| Error | Action | +|-------|--------| +| Target document not found | Report and end (document creation is out of scope) | +| Sub-agent update fails | Log failure, present error to user, retry once | +| Review rejects after 2 revisions | Stop loop, flag for human intervention | +| design-sync detects conflicts | Present to user for resolution decision | + +## Completion Criteria + +- [ ] Identified target document +- [ ] Clarified change content with user +- [ ] Updated document with appropriate agent (update mode) +- [ ] Executed code-verifier before document-reviewer (Design Doc only) +- [ ] Executed document-reviewer and addressed feedback +- [ ] Executed design-sync for consistency verification (Design Doc only) +- [ ] Obtained user approval for updated document + +## Output Example +Document update completed. +- Updated document: docs/design/[document-name].md +- Approval status: User approved diff --git a/dev-workflows-frontend/skills/subagents-orchestration-guide/SKILL.md b/dev-workflows-frontend/skills/subagents-orchestration-guide/SKILL.md new file mode 100644 index 0000000..966e3fc --- /dev/null +++ b/dev-workflows-frontend/skills/subagents-orchestration-guide/SKILL.md @@ -0,0 +1,419 @@ +--- +name: subagents-orchestration-guide +description: Guides subagent coordination through implementation workflows. Use when orchestrating multiple agents, managing workflow phases, or determining autonomous execution mode. +--- + +# Subagents Orchestration Guide + +## Role: The Orchestrator + +**The orchestrator coordinates subagents like a conductor—directing the musicians without playing the instruments.** + +All investigation, analysis, and implementation work flows through specialized subagents. + +### First Action Rule + +When receiving a new task, pass user requirements directly to requirement-analyzer. Determine the workflow based on its scale assessment result. + +### Requirement Change Detection During Flow + +**During flow execution**, monitor user responses for scope-expanding signals: +- Mentions of new features/behaviors (additional operation methods, display on different screens, etc.) +- Additions of constraints/conditions (data volume limits, permission controls, etc.) +- Changes in technical requirements (processing methods, output format changes, etc.) + +**When any signal is detected → Restart from requirement-analyzer with integrated requirements** + +## Available Subagents + +The following subagents are available: + +### Implementation Support Agents +1. **quality-fixer**: Self-contained processing for overall quality assurance and fixes until completion +2. **task-decomposer**: Appropriate task decomposition of work plans +3. **task-executor**: Individual task execution and structured response +4. **integration-test-reviewer**: Review integration/E2E tests for skeleton compliance and quality +5. **security-reviewer**: Security compliance review against Design Doc and coding-principles after all tasks complete + +### Document Creation Agents +6. **requirement-analyzer**: Requirement analysis and work scale determination +7. **codebase-analyzer**: Analyze existing codebase to produce focused guidance for technical design +8. **prd-creator**: Product Requirements Document creation +9. **ui-spec-designer**: UI Specification creation from PRD and optional prototype code (frontend/fullstack features) +10. **technical-designer**: ADR/Design Doc creation +11. **work-planner**: Work plan creation from Design Doc and test skeletons +12. **document-reviewer**: Single document quality and rule compliance check +13. **code-verifier**: Verify document-code consistency. Pre-implementation: Design Doc claims against existing codebase. Post-implementation: implementation against Design Doc +14. **design-sync**: Design Doc consistency verification across multiple documents +15. **acceptance-test-generator**: Generate integration and E2E test skeletons from Design Doc ACs + +## Orchestration Principles + +### Delegation Boundary: What vs How + +The orchestrator passes **what to accomplish** and **where to work**. Each specialist determines **how to execute** autonomously. + +**Pass to specialists** (what/where/constraints): +- Target directory, package, or file paths +- Task file path or scope description +- Acceptance criteria and hard constraints from the user or design artifacts + +**Let specialists determine** (how): +- Specific commands to run (specialists discover these from project configuration and repo conventions) +- Execution order and tool flags +- Which files to inspect or modify within the given scope + +| | Bad (orchestrator prescribes how) | Good (orchestrator passes what) | +|---|---|---| +| quality-fixer | "Run these checks: 1. lint 2. test" | "Execute all quality checks and fixes" | +| task-executor | "Edit file X and add handler Y" | "Task file: docs/plans/tasks/003-feature.md" | + +**Decision precedence when outputs conflict**: +1. User instructions (explicit requests or constraints) +2. Task files and design artifacts (Design Doc, PRD, work plan) +3. Objective repo state (git status, file system, project configuration) +4. Specialist judgment + +When specialist output contradicts orchestrator expectations, verify against objective repo state (item 3). If repo state confirms the specialist, follow the specialist. Override specialist output only when it conflicts with items 1 or 2. + +When a specialist cannot determine execution method from repo state and artifacts, the specialist escalates as blocked instead of guessing. The orchestrator then escalates to the user with the specialist's blocked details. + +### Task Assignment with Responsibility Separation + +Assign work based on each subagent's responsibilities: + +**What to delegate to task-executor**: +- Implementation work and test addition +- Confirmation of added tests passing (existing tests are not covered) +- Delegate quality assurance exclusively to quality-fixer (or quality-fixer-frontend for frontend tasks) + +**What to delegate to quality-fixer**: +- Overall quality assurance (static analysis, style check, all test execution, etc.) +- Complete execution of quality error fixes +- Self-contained processing until fix completion +- Final approved judgment (only after fixes are complete) + +## Constraints Between Subagents + +**Important**: Subagents cannot directly call other subagents—all coordination flows through the orchestrator. + +## Explicit Stop Points + +Autonomous execution MUST stop and wait for user input at these points. +**Use AskUserQuestion to present confirmations and questions.** + +| Phase | Stop Point | User Action Required | +|-------|------------|---------------------| +| Requirements | After requirement-analyzer completes | Confirm requirements / Answer questions | +| PRD | After document-reviewer completes PRD review | Approve PRD | +| UI Spec | After document-reviewer completes UI Spec review (frontend/fullstack) | Approve UI Spec | +| ADR | After document-reviewer completes ADR review (if ADR created) | Approve ADR | +| Design | After design-sync completes consistency verification | Approve Design Doc | +| Work Plan | After work-planner creates plan | Batch approval for implementation phase | + +**After batch approval**: Autonomous execution proceeds without stops until completion or escalation + +## Scale Determination and Document Requirements +| Scale | File Count | PRD | ADR | Design Doc | Work Plan | +|-------|------------|-----|-----|------------|-----------| +| Small | 1-2 | Update※1 | Not needed | Not needed | Simplified | +| Medium | 3-5 | Update※1 | Conditional※2 | **Required** | **Required** | +| Large | 6+ | **Required**※3 | Conditional※2 | **Required** | **Required** | + +※1: Update if PRD exists for the relevant feature +※2: When there are architecture changes, new technology introduction, or data flow changes +※3: New creation/update existing/reverse PRD (when no existing PRD) + +## How to Call Subagents + +### Execution Method +All subagent invocation uses the **Agent tool** with: +- `subagent_type`: Agent name (e.g., "task-executor") +- `description`: Concise task description (3-5 words) +- `prompt`: Specific instructions including deliverable paths + +### Orchestrator's Permitted Tools + +The orchestrator coordinates work using only the following tools: + +| Tool | Purpose | +|------|---------| +| Agent | Invoke subagents | +| AskUserQuestion | User confirmations and questions | +| TaskCreate / TaskUpdate | Progress tracking | +| Bash | Shell operations (git commit, ls, verification commands) | +| Read | Deliverable documents for information bridging between subagents | + +All implementation work (Edit, Write, MultiEdit) is performed by subagents, not the orchestrator. + +### Prompt Construction Rule +Every subagent prompt must include: +1. Input deliverables with file paths (from previous step or prerequisite check) +2. Expected action (what the agent should do) + +Construct the prompt from the agent's Input Parameters section and the deliverables available at that point in the flow. + +### Call Example (requirement-analyzer) +- subagent_type: "requirement-analyzer" +- description: "Requirement analysis" +- prompt: "Requirements: [user requirements]. Context: [any relevant context]. Perform requirement analysis and scale determination." + +### Call Example (codebase-analyzer) +- subagent_type: "codebase-analyzer" +- description: "Codebase analysis" +- prompt: "requirement_analysis: [JSON from requirement-analyzer]. prd_path: [path if exists]. requirements: [original user requirements]. Analyze the existing codebase and produce design guidance." + +### Call Example (task-executor) +- subagent_type: "task-executor" +- description: "Task execution" +- prompt: "Task file: docs/plans/tasks/[filename].md Please complete the implementation" + +## Structured Response Specification + +Subagents respond in JSON format. Key fields for orchestrator decisions: +- **requirement-analyzer**: scale, confidence, affectedLayers, adrRequired, scopeDependencies, questions +- **codebase-analyzer**: analysisScope.categoriesDetected, dataModel.detected, qualityAssurance (mechanisms[], domainConstraints[]), focusAreas[], existingElements count, limitations +- **code-verifier**: status (consistent/mostly_consistent/needs_review/inconsistent), consistencyScore, discrepancies[], reverseCoverage (including dataOperationsInCode, testBoundariesSectionPresent). Pre-implementation: verifies Design Doc claims against existing codebase. Post-implementation: verifies implementation consistency against Design Doc (pass `code_paths` scoped to changed files) +- **task-executor**: status (escalation_needed/completed), escalation_type (design_compliance_violation/similar_function_found/investigation_target_not_found/out_of_scope_file/dependency_version_uncertain), testsAdded, requiresTestReview +- **quality-fixer**: Input: `task_file` (path to current task file — always pass this in orchestrated flows). Status: approved/stub_detected/blocked. `stub_detected` → route back to task-executor with `incompleteImplementations[]` details for completion, then re-run quality-fixer. `blocked` → discriminate by `reason` field: `"Cannot determine due to unclear specification"` → read `blockingIssues[]` for specification details; `"Execution prerequisites not met"` → read `missingPrerequisites[]` with `resolutionSteps` — present these to the user as actionable next steps +- **document-reviewer**: approvalReady (true/false) +- **design-sync**: sync_status (synced/conflicts_found) +- **integration-test-reviewer**: status (approved/needs_revision/blocked), requiredFixes +- **security-reviewer**: status (approved/approved_with_notes/needs_revision/blocked), findings, notes, requiredFixes +- **acceptance-test-generator**: status, generatedFiles (integration: path|null, e2e: path|null), budgetUsage, e2eAbsenceReason (null when E2E emitted, otherwise: no_multi_step_journey|below_threshold_user_confirmed) + + +## Handling Requirement Changes + +### Handling Requirement Changes in requirement-analyzer +requirement-analyzer follows the "completely self-contained" principle and processes requirement changes as new input. + +#### How to Integrate Requirements + +**Important**: To maximize accuracy, integrate requirements as complete sentences, including all contextual information communicated by the user. + +```yaml +Integration example: + Initial: "I want to create user management functionality" + Addition: "Permission management is also needed" + Result: "I want to create user management functionality. Permission management is also needed. + + Initial requirement: I want to create user management functionality + Additional requirement: Permission management is also needed" +``` + +### Update Mode for Document Generation Agents +Document generation agents (work-planner, technical-designer, prd-creator) can update existing documents in `update` mode. + +- **Initial creation**: Create new document in create (default) mode +- **On requirement change**: Edit existing document and add history in update mode + +Criteria for timing when to call each agent: +- **work-planner**: Request updates only before execution +- **technical-designer**: Request updates according to design changes → Execute document-reviewer for consistency check +- **prd-creator**: Request updates according to requirement changes → Execute document-reviewer for consistency check +- **document-reviewer**: Always execute before user approval after PRD/ADR/Design Doc creation/update + +## Basic Flow for Work Planning + +Always start with requirement-analyzer, then select the minimum document flow required by scale and affected layers. + +| Scale | Required flow | +|-------|---------------| +| Large | requirement-analyzer → PRD → PRD review → optional UI Spec → optional ADR → codebase-analyzer → Design Doc → code-verifier → document-reviewer → design-sync → acceptance-test-generator → work-planner → task-decomposer | +| Medium | requirement-analyzer → codebase-analyzer → optional UI Spec → optional ADR → Design Doc → code-verifier → document-reviewer → design-sync → acceptance-test-generator → work-planner → task-decomposer | +| Small | requirement-analyzer → work-planner → direct implementation | + +Rules: +- Large scale requires PRD before Design Doc creation +- Frontend/fullstack flows add UI Spec before Design Doc creation +- Fullstack layer sequencing is defined only in `references/monorepo-flow.md` +- `design-sync` is required whenever multiple Design Docs exist +- `task-decomposer` begins only after work-planner batch approval + +## Autonomous Execution Mode + +### Pre-Execution Environment Check + +**Principle**: Verify subagents can complete their responsibilities + +**Required environments**: +- Commit capability (for per-task commit cycle) +- Quality check tools (quality-fixer will detect and escalate if missing) +- Test runner (task-executor will detect and escalate if missing) + +**If critical environment unavailable**: Escalate with specific missing component before entering autonomous mode +**If detectable by subagent**: Proceed (subagent will escalate with detailed context) + +### Authority Delegation + +**After environment check passes**: +- Batch approval for entire implementation phase delegates authority to subagents +- task-executor: Implementation authority (can use Edit/Write) +- quality-fixer: Fix authority (automatic quality error fixes) + +### Definition of Autonomous Execution Mode +After "batch approval for entire implementation phase" with work-planner, autonomously execute the following processes without human approval: + +```mermaid +graph TD + START[Batch approval for entire implementation phase] --> AUTO[Start autonomous execution mode] + AUTO --> TD[task-decomposer: Task decomposition] + TD --> LOOP[Task execution loop] + LOOP --> TE[task-executor: Implementation] + TE --> ESCJUDGE{Escalation judgment} + ESCJUDGE -->|escalation_needed/blocked| USERESC[Escalate to user] + ESCJUDGE -->|requiresTestReview: true| ITR[integration-test-reviewer] + ESCJUDGE -->|No issues| QF + ITR -->|needs_revision| TE + ITR -->|approved| QF + QF[quality-fixer: Quality check and fixes] --> QFJUDGE{quality-fixer result} + QFJUDGE -->|stub_detected| TE + QFJUDGE -->|approved| COMMIT[Orchestrator: Execute git commit] + QFJUDGE -->|blocked| USERESC + COMMIT --> CHECK{Any remaining tasks?} + CHECK -->|Yes| LOOP + CHECK -->|No| VERIFY[Post-implementation verification] + VERIFY --> CV[code-verifier: DD consistency check] + VERIFY --> SEC[security-reviewer: Security review] + CV --> VRESULT{Verification results} + SEC --> VRESULT + VRESULT -->|All passed| REPORT[Completion report] + VRESULT -->|Any failed| VFIX[task-executor: Verification fixes] + VFIX --> QF2[quality-fixer: Quality check] + QF2 --> REVERIFY[Re-run failed verifiers only] + REVERIFY --> VRESULT + VRESULT -->|blocked| USERESC + + LOOP --> INTERRUPT{User input?} + INTERRUPT -->|None| TE + INTERRUPT -->|Yes| REQCHECK{Requirement change check} + REQCHECK -->|No change| TE + REQCHECK -->|Change| STOP[Stop autonomous execution] + STOP --> RA[Re-analyze with requirement-analyzer] +``` + +### Post-Implementation Verification Pass/Fail Criteria + +| Verifier | Pass | Fail | Blocked | +|----------|------|------|---------| +| code-verifier | `status` is `consistent` or `mostly_consistent` | `status` is `needs_review` or `inconsistent` | — | +| security-reviewer | `status` is `approved` or `approved_with_notes` | `status` is `needs_revision` | `status` is `blocked` → Escalate to user | + +**Re-run rule**: After fix cycle, re-run only verifiers that returned **fail**. Verifiers that passed on the previous run are not re-run. + +### Conditions for Stopping Autonomous Execution +Stop autonomous execution and escalate to user in the following cases: + +1. **Escalation from subagent** + - When receiving response with `status: "escalation_needed"` + - When receiving response with `status: "blocked"` + +2. **When requirement change detected** + - Any match in requirement change detection checklist + - Stop autonomous execution and re-analyze with integrated requirements in requirement-analyzer + +3. **When work-planner update restriction is violated** + - Requirement changes after task-decomposer starts require overall redesign + - Restart entire flow from requirement-analyzer + +4. **When user explicitly stops** + - Direct stop instruction or interruption + +### Task Management: 4-Step Cycle + +**Per-task cycle**: +1. **Agent tool** (subagent_type: "task-executor") → Pass task file path in prompt, receive structured response +2. Check task-executor response: + - `status: escalation_needed` or `blocked` → Escalate to user + - `requiresTestReview` is `true` → Execute **integration-test-reviewer** + - `needs_revision` → Return to step 1 with `requiredFixes` + - `approved` → Proceed to step 3 + - Otherwise → Proceed to step 3 +3. quality-fixer → Quality check and fixes. **Always pass** the current task file path as `task_file` + - `stub_detected` → Return to step 1 with `incompleteImplementations[]` details + - `blocked` → Escalate to user + - `approved` → Proceed to step 4 +4. git commit → Execute with Bash (on `approved`) + +### Progress Tracking + +Register overall phases using TaskCreate. Update each phase with TaskUpdate as it completes. + +## Main Orchestrator Roles + +1. **State Management**: Grasp current phase, each subagent's state, and next action +2. **Information Bridging**: Data conversion and transmission between subagents + - Convert each subagent's output to next subagent's input format + - **Always pass deliverables from previous process to next agent** + - Extract necessary information from structured responses + - Compose commit messages from changeSummary + - Explicitly integrate initial and additional requirements when requirements change + + ### Handoff Contracts + + #### HC-01: requirement-analyzer → codebase-analyzer + - Pass: `requirement_analysis`, `prd_path` (if exists), original user requirements + + #### HC-02: codebase-analyzer → technical-designer + - Pass: full codebase-analyzer JSON as additional context + - Required downstream uses: + - `focusAreas` → canonical disposition-target list for the Fact Disposition Table + - `dataModel`, `dataTransformationPipelines`, `qualityAssurance` → Existing Codebase Analysis / Verification Strategy / Quality Assurance sections + + #### HC-03: technical-designer → code-verifier + - Pass: Design Doc path (`doc_type: design-doc`) + - Do not pass `code_paths`; code-verifier discovers scope from the document + + #### HC-04: code-verifier + codebase-analyzer → document-reviewer + - Pass: `code_verification` JSON and the same `codebase_analysis` JSON previously given to the designer + - Purpose: reviewer validates both discrepancy integration and Fact Disposition coverage against `focusAreas` + + #### HC-05: code-verifier → next-layer technical-designer (fullstack only) + - Defined only for multi-layer fullstack flow in `references/monorepo-flow.md` + - Pass: prior-layer Design Doc path plus `prior_layer_verification` + - Use only `discrepancies[]` as known issues to address or escalate. Do not infer verified claims that are not explicitly present in the verifier output. + + #### technical-designer → work-planner + + **Pass to work-planner**: Design Doc path. Work-planner reads the DD template from documentation-criteria skill, scans all DD sections, and extracts technical requirements in these categories: + - **Verification Strategy**: Extracted to work plan header (Correctness Proof Method + Early Verification Point) + - **Implementation targets**: Components, functions, or data structures to create or modify + - **Connection/switching/registration**: Integration points, dependency wiring, switching methods + - **Contract changes and propagation**: Interface changes, data contracts, field propagation across boundaries + - **Verification requirements**: Verification methods, test boundaries, integration verification points + - **Prerequisite work**: Migration steps, security measures, environment setup + + Work-planner produces a Design-to-Plan Traceability table mapping each extracted item to covering task(s). Items without a covering task must be marked as `gap` with justification. Unjustified gaps are errors. Justified gaps require user confirmation before plan approval. + + #### HC-06: acceptance-test-generator → work-planner + + **Pass to acceptance-test-generator**: + - Design Doc: [path] + - UI Spec: [path] (if exists) + + **Orchestrator verification items**: + - Verify `generatedFiles.integration` is a valid path (when not null) and the file exists + - Verify `generatedFiles.e2e` is a valid path (when not null) and the file exists + - When `generatedFiles.e2e` is null, verify `e2eAbsenceReason` is present — this is intentional absence, not an error + + **Pass to work-planner**: + - Integration test file: [path] (create and execute simultaneously with each phase implementation) + - E2E test file: [path] or null (execute only in final phase, when provided) + - E2E absence reason: [reason] (when E2E is null — pass this so work-planner can skip E2E Gap Check for intentional absence) + + **On error**: Escalate to user if integration file generation failed unexpectedly (status != completed). E2E being null with a valid absence reason is not an error. + +3. **ADR Status Management**: Update ADR status after user decision (Accepted/Rejected) + +## Important Constraints + +- **Quality check is mandatory**: quality-fixer approval needed before commit +- **Structured response mandatory**: Information transmission between subagents in JSON format +- **Approval management**: Document creation → Execute document-reviewer → Get user approval before proceeding +- **Flow confirmation**: After getting approval, always check next step with work planning flow (large/medium/small scale) +- **Consistency verification**: Resolve subagent conflicts per Decision precedence (see Delegation Boundary section) + +## References + +- `references/monorepo-flow.md`: Fullstack (monorepo) orchestration flow diff --git a/dev-workflows-frontend/skills/subagents-orchestration-guide/references/monorepo-flow.md b/dev-workflows-frontend/skills/subagents-orchestration-guide/references/monorepo-flow.md new file mode 100644 index 0000000..4304e07 --- /dev/null +++ b/dev-workflows-frontend/skills/subagents-orchestration-guide/references/monorepo-flow.md @@ -0,0 +1,139 @@ +# Fullstack (Monorepo) Flow + +This reference defines the orchestration flow for projects spanning multiple layers (backend + frontend). It extends the standard orchestration guide without modifying it. + +## When This Flow Applies + +- Multiple Design Docs exist targeting different layers (backend, frontend) +- A single feature requires implementation across both backend and frontend +- The orchestrator is invoked via `fullstack-implement` or `fullstack-build` commands + +## Design Phase + +### Large Scale Fullstack (6+ Files) - 15 Steps + +| Step | Agent | Purpose | Output | +|------|-------|---------|--------| +| 1 | requirement-analyzer | Requirement analysis + scale determination **[Stop]** | Requirements + scale | +| 2 | prd-creator | PRD covering entire feature (all layers) | Single PRD | +| 3 | document-reviewer | PRD review **[Stop]** | Approval | +| 4 | (orchestrator) | Ask user for prototype code **[Stop]** | Prototype path or none | +| 5 | ui-spec-designer | UI Spec from PRD + optional prototype | UI Spec | +| 6 | document-reviewer | UI Spec review **[Stop]** | Approval | +| 7 | codebase-analyzer ×2 | Codebase analysis per layer (pass req-analyzer output + PRD path, filtered to layer) | Codebase guidance per layer | +| 8 | technical-designer | **Backend** Design Doc (with backend codebase-analyzer context) | Backend Design Doc | +| 9 | code-verifier | Verify **Backend** Design Doc against existing code (its result JSON is passed to step 10 as `prior_layer_verification`) | Backend verification | +| 10 | technical-designer-frontend | **Frontend** Design Doc (with frontend codebase-analyzer context + backend Design Doc + `prior_layer_verification` from step 9 + UI Spec) | Frontend Design Doc | +| 11 | code-verifier | Verify **Frontend** Design Doc against existing code | Frontend verification | +| 12 | document-reviewer ×2 | Review each Design Doc (with code-verifier results as `code_verification`) | Reviews | +| 13 | design-sync | Cross-layer consistency verification (source: frontend Design Doc) **[Stop]** | Sync status | +| 14 | acceptance-test-generator | Integration/E2E test skeleton from cross-layer contracts | Test skeletons | +| 15 | work-planner | Work plan from all Design Docs **[Stop: Batch approval]** | Work plan | + +### Medium Scale Fullstack (3-5 Files) - 13 Steps + +| Step | Agent | Purpose | Output | +|------|-------|---------|--------| +| 1 | requirement-analyzer | Requirement analysis + scale determination **[Stop]** | Requirements + scale | +| 2 | codebase-analyzer ×2 | Codebase analysis per layer (pass req-analyzer output, filtered to layer) | Codebase guidance per layer | +| 3 | (orchestrator) | Ask user for prototype code **[Stop]** | Prototype path or none | +| 4 | ui-spec-designer | UI Spec from requirements + optional prototype | UI Spec | +| 5 | document-reviewer | UI Spec review **[Stop]** | Approval | +| 6 | technical-designer | **Backend** Design Doc (with backend codebase-analyzer context) | Backend Design Doc | +| 7 | code-verifier | Verify **Backend** Design Doc against existing code (its result JSON is passed to step 8 as `prior_layer_verification`) | Backend verification | +| 8 | technical-designer-frontend | **Frontend** Design Doc (with frontend codebase-analyzer context + backend Design Doc + `prior_layer_verification` from step 7 + UI Spec) | Frontend Design Doc | +| 9 | code-verifier | Verify **Frontend** Design Doc against existing code | Frontend verification | +| 10 | document-reviewer ×2 | Review each Design Doc (with code-verifier results as `code_verification`) | Reviews | +| 11 | design-sync | Cross-layer consistency verification (source: frontend Design Doc) **[Stop]** | Sync status | +| 12 | acceptance-test-generator | Integration/E2E test skeleton from cross-layer contracts | Test skeletons | +| 13 | work-planner | Work plan from all Design Docs **[Stop: Batch approval]** | Work plan | + +### Parallelization in Multi-Agent Steps + +Steps marked with ×2 (codebase-analyzer ×2, document-reviewer ×2) invoke the agent once per layer. These invocations are independent and can run in parallel when the orchestrator supports concurrent Agent tool calls. The two code-verifier invocations run sequentially: backend verification completes before frontend authoring begins so the frontend designer references verified backend contracts. + +### Layer Context in Design Doc Creation + +Use the common handoff contracts in `../SKILL.md`: +- Backend DD creation uses `HC-01` + `HC-02` +- Frontend DD creation uses `HC-01` + `HC-02` + `HC-05` +- Design Doc review uses `HC-03` + `HC-04` + +Prompt templates: + +**Backend Design Doc** +```text +Create a backend Design Doc from [PRD path or requirement_analysis]. +Codebase analysis: [JSON from codebase-analyzer for backend layer] +Focus on: API contracts, data layer, business logic, service architecture. +``` + +**Frontend Design Doc** +```text +Create a frontend Design Doc from [PRD path or requirement_analysis]. +Codebase analysis: [JSON from codebase-analyzer for frontend layer] +Backend Design Doc: [path] +prior_layer_verification: [JSON from code-verifier on backend Design Doc] +Reference UI Spec at [path] for component structure and state design. +Use `prior_layer_verification.discrepancies[]` as known issues to address or escalate. Do not infer verified claims beyond what the verifier output states explicitly. +Focus on: component hierarchy, state management, UI interactions, data fetching. +``` + +### design-sync for Cross-Layer Verification + +Call design-sync with `source_design` = frontend Design Doc (created last, referencing backend's Integration Points). design-sync auto-discovers other Design Docs in `docs/design/` for comparison. + +## Test Skeleton Generation Phase + +Orchestrator passes all Design Docs and UI Spec to acceptance-test-generator: + +``` +Generate test skeletons from the following documents: +- Design Doc (backend): [path] +- Design Doc (frontend): [path] +- UI Spec: [path] (if exists) +``` + +## Work Planning Phase + +Orchestrator passes all Design Docs to work-planner: + +``` +Create a work plan from the following documents: +- PRD: [path] (Large Scale only) +- Design Doc (backend): [path] +- Design Doc (frontend): [path] + +Compose phases as vertical feature slices where possible — each phase should contain +both backend and frontend work for the same feature area, enabling early integration +verification per phase. +``` + +work-planner's existing Integration Complete criteria naturally covers cross-layer verification when given multiple Design Docs. + +## Task Decomposition Phase + +task-decomposer follows standard decomposition from the work plan. The key addition is the **layer-aware naming convention**: + +| Filename Pattern | Meaning | Executor | Quality Fixer | +|-----------------|---------|----------|---------------| +| `{plan}-backend-task-{n}.md` | Backend only | task-executor | quality-fixer | +| `{plan}-frontend-task-{n}.md` | Frontend only | task-executor-frontend | quality-fixer-frontend | + +Layer is determined from the task's **Target files** paths — this is a factual determination, not inference. + +## Task Cycle + +Each task follows the standard 4-step cycle from `../SKILL.md`. Only agent routing varies by layer: + +| Task pattern | Executor | Quality fixer | +|-------------|----------|---------------| +| `*-backend-task-*` | `task-executor` | `quality-fixer` | +| `*-frontend-task-*` | `task-executor-frontend` | `quality-fixer-frontend` | + +### integration-test-reviewer Placement + +When `requiresTestReview` is `true`: +- Standard flow (integration-test-reviewer after task-executor, before quality-fixer) + +All other orchestration rules follow the standard subagents-orchestration-guide. diff --git a/dev-workflows-frontend/skills/task-analyzer/SKILL.md b/dev-workflows-frontend/skills/task-analyzer/SKILL.md new file mode 100644 index 0000000..82c3e14 --- /dev/null +++ b/dev-workflows-frontend/skills/task-analyzer/SKILL.md @@ -0,0 +1,128 @@ +--- +name: task-analyzer +description: Performs metacognitive task analysis and skill selection. Use when determining task complexity, selecting appropriate skills, or estimating work scale. +--- + +# Task Analyzer + +Provides metacognitive task analysis and skill selection guidance. + +## Skills Index + +See **[skills-index.yaml](references/skills-index.yaml)** for available skills metadata. + +## Task Analysis Process + +### 1. Understand Task Essence + +Identify the fundamental purpose beyond surface-level work: + +| Surface Work | Fundamental Purpose | +|--------------|---------------------| +| "Fix this bug" | Problem solving, root cause analysis | +| "Implement this feature" | Feature addition, value delivery | +| "Refactor this code" | Quality improvement, maintainability | +| "Update this file" | Change management, consistency | + +**Action**: Map the user request to one row in the Surface Work → Fundamental Purpose table above. If no row matches, state the fundamental purpose explicitly before proceeding. + +### 2. Estimate Task Scale + +| Scale | File Count | Indicators | +|-------|------------|------------| +| Small | 1-2 | Single function/component change | +| Medium | 3-5 | Multiple related components | +| Large | 6+ | Cross-cutting concerns, architecture impact | + +**Scale affects skill priority:** +- Scale >= Large → include documentation-criteria and implementation-approach in selectedSkills with priority high +- Scale = Small → limit selectedSkills to task-type essential skills only (max 3) + +### 3. Identify Task Type + +| Type | Characteristics | Key Skills | +|------|-----------------|------------| +| Implementation | New code, features | coding-principles, testing-principles | +| Fix | Bug resolution | ai-development-guide, testing-principles | +| Refactoring | Structure improvement | coding-principles, ai-development-guide | +| Design | Architecture decisions | documentation-criteria, implementation-approach | +| Quality | Testing, review | testing-principles, integration-e2e-testing | + +### 4. Tag-Based Skill Matching + +Extract relevant tags from task description and match against skills-index.yaml: + +```yaml +Task: "Implement user authentication with tests" +Extracted tags: [implementation, testing, security] +Matched skills: + - coding-principles (implementation, security) + - testing-principles (testing) + - ai-development-guide (implementation) +``` + +### 5. Implicit Relationships + +Consider hidden dependencies: + +| Task Involves | Also Include | +|---------------|--------------| +| Error handling | debugging, testing | +| New features | design, implementation, documentation | +| Performance | profiling, optimization, testing | +| Frontend | typescript-rules, test-implement | +| API/Integration | integration-e2e-testing | + +## Output Format + +Return structured analysis with skill metadata from skills-index.yaml: + +```yaml +taskAnalysis: + essence: # Fundamental purpose identified + type: + scale: + estimatedFiles: + tags: [, ...] # Extracted from task description + +selectedSkills: + - skill: # From skills-index.yaml + priority: + reason: # Why this skill was selected + # Pass through metadata from skills-index.yaml + tags: [...] + typical-use: + size: + sections: [...] # All sections from yaml, unfiltered +``` + +**Note**: Section selection (choosing which sections are relevant) is done after reading the actual SKILL.md files. + +## Skill Selection Priority + +1. **Essential** - Directly related to task type +2. **Quality** - Testing and quality assurance +3. **Process** - Workflow and documentation +4. **Supplementary** - Reference and best practices + +## Metacognitive Question Design + +Generate 3-5 questions according to task nature: + +| Task Type | Question Focus | +|-----------|----------------| +| Implementation | Design validity, edge cases, performance | +| Fix | Root cause (5 Whys), impact scope, regression testing | +| Refactoring | Current problems, target state, phased plan | +| Design | Requirement clarity, future extensibility, trade-offs | + +## Warning Patterns + +Detect and flag these patterns: + +| Pattern | Warning | Mitigation | +|---------|---------|------------| +| Large change detected | Pair with implementation-approach | Split into phases per strategy | +| Implementation task detected | Pair with testing-principles | Apply TDD from start | +| Error fix requested | Pair with ai-development-guide | Apply 5 Whys before fixing | +| Multi-file task without plan | Pair with documentation-criteria | Create work plan first | \ No newline at end of file diff --git a/dev-workflows-frontend/skills/task-analyzer/references/skills-index.yaml b/dev-workflows-frontend/skills/task-analyzer/references/skills-index.yaml new file mode 100644 index 0000000..dcc46bd --- /dev/null +++ b/dev-workflows-frontend/skills/task-analyzer/references/skills-index.yaml @@ -0,0 +1,216 @@ +# Skills Metadata Index +# Used to select appropriate skills based on task analysis + +skills: + coding-principles: + skill: "coding-principles" + tags: [implementation, code-quality, refactoring, clean-code, maintainability, function-design, error-handling, parameterized-dependencies, performance, security] + typical-use: "Language-agnostic code creation, modification, and refactoring principles applicable to all programming languages" + size: medium + key-references: + - "YAGNI Principle - Kent Beck" + - "Clean Code - Robert C. Martin" + - "DRY Principle - The Pragmatic Programmer" + - "Refactoring - Martin Fowler" + - "Single Responsibility Principle - SOLID" + sections: + - "Core Philosophy" + - "Code Quality" + - "Function Design" + - "Error Handling" + - "Dependency Management" + - "Performance Considerations" + - "Code Organization" + - "Commenting Principles" + - "Refactoring Approach" + - "Testing Considerations" + - "Security Principles (Secure Defaults, Input and Output Boundaries, Access Control, Knowledge Cutoff Supplement)" + - "Documentation" + - "Version Control Practices" + - "Language-Specific Adaptations" + + testing-principles: + skill: "testing-principles" + tags: [testing, tdd, quality, unit-testing, integration-testing, e2e-testing, test-design, coverage, mocking, test-independence, ci-cd, test-quality-criteria] + typical-use: "Universal testing principles, TDD practice, test quality criteria, test creation and quality assurance for all programming languages" + size: large + key-references: + - "Test-Driven Development - Kent Beck" + - "Red-Green-Refactor Cycle - Kent Beck" + - "AAA Pattern - Arrange-Act-Assert" + - "Test Pyramid - Mike Cohn" + sections: + - "Core Testing Philosophy" + - "Test-Driven Development (TDD)" + - "Quality Requirements" + - "Test Types" + - "Test Design Principles" + - "Test Independence" + - "Mocking and Test Doubles" + - "Data Layer Testing" + - "Test Quality Practices" + - "What to Test" + - "Test Quality Criteria" + - "Verification Requirements" + - "Test Organization" + - "Performance Considerations" + - "Continuous Integration" + - "Common Anti-Patterns to Avoid" + - "Regression Testing" + - "Testing Best Practices by Language Paradigm" + - "Documentation and Communication" + + ai-development-guide: + skill: "ai-development-guide" + tags: [anti-patterns, technical-judgment, debugging, quality-commands, rule-of-three, implementation, refactoring, code-reading, best-practices, fail-fast, error-handling, impact-analysis] + typical-use: "Technical decision criteria, anti-pattern detection, debugging techniques, quality check workflows, impact analysis procedures" + size: large + key-references: + - "Rule of Three - Martin Fowler" + - "5 Whys - Toyota Production System" + - "DRY Principle - The Pragmatic Programmer" + - "YAGNI Principle - Extreme Programming" + sections: + - "Technical Anti-patterns (Red Flag Patterns)" + - "Fail-Fast Fallback Design Principles" + - "Rule of Three - Criteria for Code Duplication" + - "Common Failure Patterns and Avoidance Methods" + - "Debugging Techniques" + - "Quality Check Workflow" + - "Situations Requiring Technical Decisions" + - "Implementation Completeness Assurance" + - "Impact Analysis" + + documentation-criteria: + skill: "documentation-criteria" + tags: [documentation, decision-making, adr, prd, design-doc, planning, process, scale-assessment] + typical-use: "Scale assessment at implementation start, document creation criteria, ADR/PRD/Design Doc/Work Plan creation standards" + size: medium + key-references: + - "ADR Method - Michael Nygard" + - "Design Doc Culture - Google Engineering Practices" + - "Single Source of Truth" + sections: + - "Templates" + - "Creation Decision Matrix" + - "ADR Creation Conditions (Required if Any Apply)" + - "Detailed Document Definitions" + - "Creation Process" + - "Storage Locations" + - "ADR Status" + - "AI Automation Rules" + - "Diagram Requirements" + - "Common ADR Relationships" + + implementation-approach: + skill: "implementation-approach" + tags: [architecture, implementation, task-decomposition, strategy-patterns, strangler-pattern, facade-pattern, design, planning, verification-levels] + typical-use: "Implementation strategy selection, task decomposition, design decisions, large-scale change planning" + size: medium + key-references: + - "Strangler Fig Pattern - Martin Fowler" + - "Feature Slicing - Martin Fowler" + - "Walking Skeleton - Alistair Cockburn" + sections: + - "Meta-cognitive Strategy Selection Process" + - "Verification Level Definitions" + - "Integration Point Definitions" + - "Anti-patterns" + - "Guidelines for Meta-cognitive Execution" + + integration-e2e-testing: + skill: "integration-e2e-testing" + tags: [testing, integration-testing, e2e-testing, test-design, behavior-first, roi, test-skeleton, ears-format] + typical-use: "Integration and E2E test design principles, ROI-based test selection, behavior-first approach, test skeleton specification" + size: medium + key-references: + - "Test Pyramid - Mike Cohn" + - "Behavior-Driven Development" + sections: + - "References" + - "Test Type Definition and Limits" + - "Behavior-First Principle" + - "ROI Calculation" + - "Test Skeleton Specification" + - "EARS Format Mapping" + - "Test File Naming Convention" + - "Review Criteria" + - "Quality Standards" + + subagents-orchestration-guide: + skill: "subagents-orchestration-guide" + tags: [orchestration, workflow, subagents, autonomous-execution, planning, design-flow, implementation-flow] + typical-use: "Orchestrating subagents through implementation workflows, scale determination, stop points, autonomous execution mode" + size: large + key-references: + - "Orchestrator Pattern" + - "Conductor Pattern" + sections: + - "Role: The Orchestrator" + - "Decision Flow When Receiving Tasks" + - "Available Subagents" + - "Orchestration Principles" + - "Constraints Between Subagents" + - "Explicit Stop Points" + - "Scale Determination and Document Requirements" + - "How to Call Subagents" + - "Structured Response Specification" + - "Handling Requirement Changes" + - "Basic Flow for Work Planning" + - "Autonomous Execution Mode" + - "Main Orchestrator Roles" + - "Important Constraints" + - "Required Dialogue Points with Humans" + - "Action Checklist" + - "References" + + # Frontend-Specific Skills + typescript-rules: + skill: "typescript-rules" + tags: [frontend, react, typescript, function-components, props-driven, type-safety, environment-variables, security, state-management] + typical-use: "Frontend TypeScript development rules, React function components, Props-driven design, type safety, state management patterns" + size: large + key-references: + - "React Function Components" + - "TypeScript Best Practices" + - "Props-Driven Design" + sections: + - "Basic Principles" + - "Comment Writing Rules" + - "Type Safety" + - "Coding Conventions" + - "Error Handling" + - "Refactoring Techniques" + - "Performance Optimization" + - "Non-functional Requirements" + + test-implement: + skill: "test-implement" + tags: [testing, frontend, react, react-testing-library, msw, playwright, e2e, coverage, tdd] + typical-use: "Test implementation patterns. references/frontend.md for RTL+Vitest+MSW, references/e2e.md for Playwright E2E" + size: large + key-references: + - "references/frontend.md" + - "references/e2e.md" + sections: + - "Reference Selection" + - "Common Principles" + + frontend-ai-guide: + skill: "frontend-ai-guide" + tags: [frontend, react, anti-patterns, technical-judgment, component-design, testing, quality-commands] + typical-use: "Frontend technical decision criteria, React anti-pattern detection, component quality check workflows" + size: large + key-references: + - "Rule of Three - Martin Fowler" + - "React Best Practices" + sections: + - "Technical Anti-patterns (Red Flag Patterns)" + - "Fallback Design Principles" + - "Rule of Three - Criteria for Code Duplication" + - "Common Failure Patterns and Avoidance Methods" + - "Debugging Techniques" + - "Quality Check Workflow" + - "Situations Requiring Technical Decisions" + - "Implementation Completeness Assurance" + - "Impact Analysis" diff --git a/dev-workflows-frontend/skills/test-implement/SKILL.md b/dev-workflows-frontend/skills/test-implement/SKILL.md new file mode 100644 index 0000000..ce67d77 --- /dev/null +++ b/dev-workflows-frontend/skills/test-implement/SKILL.md @@ -0,0 +1,30 @@ +--- +name: test-implement +description: Test implementation patterns and conventions. Use when implementing unit tests, integration tests, or E2E tests, including RTL+Vitest+MSW component testing and Playwright E2E testing. +--- + +# Test Implementation Patterns + +## Reference Selection + +| Test Type | Reference | When to Use | +|-----------|-----------|-------------| +| **Unit / Integration** | [references/frontend.md](references/frontend.md) | Implementing React component tests with RTL + Vitest + MSW | +| **E2E** | [references/e2e.md](references/e2e.md) | Implementing browser-level E2E tests with Playwright | + +## Common Principles + +### AAA Structure +All tests follow **Arrange-Act-Assert**: +- **Arrange**: Set up preconditions and inputs +- **Act**: Execute the behavior under test +- **Assert**: Verify the expected outcome + +### Test Independence +- Each test runs independently without depending on other tests +- No shared mutable state between tests +- Deterministic execution — no random or time dependencies without mocking + +### Naming +- Test names describe expected behavior from user perspective +- One test verifies one behavior diff --git a/dev-workflows-frontend/skills/test-implement/references/e2e.md b/dev-workflows-frontend/skills/test-implement/references/e2e.md new file mode 100644 index 0000000..573f765 --- /dev/null +++ b/dev-workflows-frontend/skills/test-implement/references/e2e.md @@ -0,0 +1,252 @@ +# E2E Test Implementation with Playwright + +## Test Framework +- **Playwright Test**: `@playwright/test` +- Test imports: `import { test, expect } from '@playwright/test'` + +## Test Structure + +### Directory Layout +``` +tests/ +└── e2e/ + ├── pages/ # Page objects + │ ├── login.page.ts + │ └── dashboard.page.ts + ├── fixtures/ # Test fixtures + │ └── auth.fixture.ts + └── *.e2e.test.ts # Test files +``` + +### Naming Conventions +- Test files: `{FeatureName}.e2e.test.ts` +- Page objects: `{PageName}.page.ts` +- Fixtures: `{Purpose}.fixture.ts` + +## Page Object Pattern + +Encapsulate page interactions for reusability and maintainability: + +```typescript +import { type Page, type Locator } from '@playwright/test' + +export class LoginPage { + readonly emailInput: Locator + readonly passwordInput: Locator + readonly submitButton: Locator + + constructor(private page: Page) { + this.emailInput = page.getByLabel('Email') + this.passwordInput = page.getByLabel('Password') + this.submitButton = page.getByRole('button', { name: 'Sign in' }) + } + + async login(email: string, password: string) { + await this.emailInput.fill(email) + await this.passwordInput.fill(password) + await this.submitButton.click() + } +} +``` + +## Test Patterns + +### Basic Test +```typescript +import { test, expect } from '@playwright/test' + +test('user can navigate to dashboard after login', async ({ page }) => { + // Arrange + await page.goto('/login') + + // Act + await page.getByLabel('Email').fill('user@example.com') + await page.getByLabel('Password').fill('password') + await page.getByRole('button', { name: 'Sign in' }).click() + + // Assert + await expect(page).toHaveURL('/dashboard') + await expect(page.getByRole('heading', { name: 'Dashboard' })).toBeVisible() +}) +``` + +### With Page Objects +```typescript +import { test, expect } from '@playwright/test' +import { LoginPage } from './pages/login.page' +import { DashboardPage } from './pages/dashboard.page' + +test('user completes purchase flow', async ({ page }) => { + const loginPage = new LoginPage(page) + const dashboardPage = new DashboardPage(page) + + await page.goto('/login') + await loginPage.login('user@example.com', 'password') + await expect(dashboardPage.heading).toBeVisible() +}) +``` + +### Auth Fixture +```typescript +import { test as base } from '@playwright/test' + +export const test = base.extend<{ authenticatedPage: Page }>({ + authenticatedPage: async ({ page }, use) => { + await page.goto('/login') + await page.getByLabel('Email').fill('user@example.com') + await page.getByLabel('Password').fill('password') + await page.getByRole('button', { name: 'Sign in' }).click() + await page.waitForURL('/dashboard') + await use(page) + }, +}) +``` + +## E2E Environment Prerequisites + +E2E tests require a running application with real data state. Unlike unit/integration tests, environment setup is part of E2E test implementation scope. + +### Seed Data Strategy + +Prepare test data via API calls or database seeding: + +```typescript +// fixtures/seed.fixture.ts +import { test as base } from '@playwright/test' + +export const test = base.extend<{ seededData: SeedResult }>({ + seededData: async ({ request }, use) => { + // Arrange: Create test data via API before test + // Example: adjust to the project's actual seeding mechanism + const result = await request.post('/api/test/seed', { + data: { scenario: 'e2e-user-with-subscription' } + }) + const seedData = await result.json() + + await use(seedData) + + // Cleanup: Remove test data after test + await request.delete(`/api/test/seed/${seedData.id}`) + }, +}) +``` + +**Principles**: +- Use the application's existing seeding mechanism if present; create new seed endpoints only when no alternative exists +- Seed data setup belongs to test fixtures, not to a separate manual step +- Each test must be self-contained: create its own data, clean up after +- Seed data via API endpoints or direct DB access only + +### Authentication Fixture + +Implement auth fixtures that match the application's actual login flow: + +```typescript +// fixtures/auth.fixture.ts +export const test = base.extend<{ playerPage: Page }>({ + playerPage: async ({ page, request }, use) => { + // Use the application's existing auth endpoint — not admin backdoors + // Example: adjust the URL and payload to match the project's actual login flow + await request.post('/api/login', { + data: { loginId: E2E_LOGIN_ID, password: E2E_PASSWORD } + }) + // Transfer session to browser context + await page.goto('/') + await use(page) + }, +}) +``` + +**Principles**: +- Use the application's existing authentication flow; auth fixtures must follow the same path that real users use +- Use the application's production authentication flow for E2E auth (the same endpoints real users hit) +- Store test credentials in environment variables only (`E2E_*` prefixed) +- If the auth flow requires specific user records, seed them in the fixture + +### Environment Checklist + +Before E2E tests can pass, verify: +- [ ] Application is running and accessible at `baseURL` +- [ ] Database has required seed data (test users, subscriptions, content) +- [ ] Authentication flow works with test credentials +- [ ] Environment variables are set (`E2E_*` prefixed) +- [ ] External services are either available or mocked via `page.route()` + +When the work plan includes dedicated environment setup tasks (Phase 0), follow those tasks. When no setup tasks exist in the plan, address missing prerequisites as part of the E2E test implementation task itself. + +## Locator Strategy + +Prefer accessible locators in this order: +1. `page.getByRole()` — best for accessibility +2. `page.getByLabel()` — form elements +3. `page.getByText()` — visible text +4. `page.getByTestId()` — last resort + +```typescript +await page.getByRole('button', { name: 'Submit' }).click() +``` + +## Assertions + +```typescript +// Visibility +await expect(page.getByText('Success')).toBeVisible() +await expect(page.getByText('Error')).not.toBeVisible() + +// Navigation +await expect(page).toHaveURL('/dashboard') +await expect(page).toHaveTitle('Dashboard') + +// Element state +await expect(page.getByRole('button')).toBeEnabled() +await expect(page.getByRole('button')).toBeDisabled() + +// Content +await expect(page.getByRole('heading')).toHaveText('Welcome') +``` + +## Viewport Testing + +When UI Spec defines responsive behavior: + +```typescript +test.describe('responsive navigation', () => { + test('shows hamburger menu on mobile', async ({ page }) => { + await page.setViewportSize({ width: 375, height: 667 }) + await page.goto('/') + await expect(page.getByRole('button', { name: 'Menu' })).toBeVisible() + await expect(page.getByRole('navigation')).not.toBeVisible() + }) + + test('shows full navigation on desktop', async ({ page }) => { + await page.setViewportSize({ width: 1280, height: 720 }) + await page.goto('/') + await expect(page.getByRole('navigation')).toBeVisible() + }) +}) +``` + +## Test Isolation + +- Each test starts from a clean browser context +- No shared state between tests +- Use `beforeEach` for common setup (auth, navigation) +- Prefer `page.goto()` over in-test navigation for setup steps + +## Skeleton Comment Format + +E2E test skeletons follow the same annotation format as integration tests (adapt comment syntax to the project's language): + +```typescript +// AC: [Original acceptance criteria text] +// Behavior: [User action] → [System response] → [Observable result] +// @category: e2e +// @dependency: full-system +// @complexity: high +// ROI: [score] +test('AC1: [Description]', async ({ page }) => { + // Arrange: [Setup description] + // Act: [Action description] + // Assert: [Verification description] +}) +``` diff --git a/dev-workflows-frontend/skills/test-implement/references/frontend.md b/dev-workflows-frontend/skills/test-implement/references/frontend.md new file mode 100644 index 0000000..e605e28 --- /dev/null +++ b/dev-workflows-frontend/skills/test-implement/references/frontend.md @@ -0,0 +1,217 @@ +# Frontend Test Implementation (RTL + Vitest + MSW) + +## Test Framework +- **Vitest**: This project uses Vitest +- **React Testing Library**: For component testing +- **MSW (Mock Service Worker)**: For API mocking +- Test imports: `import { describe, it, expect, beforeEach, vi } from 'vitest'` +- Component test imports: `import { render, screen } from '@testing-library/react'` +- User interaction: `import userEvent from '@testing-library/user-event'` (prefer over `fireEvent`) +- Mock creation: Use `vi.mock()` + +## Basic Testing Policy + +### Quality Requirements +- **Coverage**: Unit test coverage must be 60% or higher (Frontend standard 2025) +- **Independence**: Each test can run independently without depending on other tests +- **Reproducibility**: Tests are environment-independent and always return the same results +- **Readability**: Test code maintains the same quality as production code + +### Coverage Requirements (ADR-0002 Compliant) +**Component-specific targets**: +- Atoms (Button, Text, etc.): 70% or higher +- Molecules (FormField, etc.): 65% or higher +- Organisms (Header, Footer, etc.): 60% or higher +- Custom Hooks: 65% or higher +- Utils: 70% or higher + +**Metrics**: Statements, Branches, Functions, Lines + +### Test Types and Scope +1. **Unit Tests (React Testing Library)** + - Verify behavior of individual components or functions + - Mock all external dependencies + - Most numerous, implemented with fine granularity + - Focus on user-observable behavior + +2. **Integration Tests (React Testing Library + MSW)** + - Verify coordination between multiple components + - Mock APIs with MSW (Mock Service Worker) + - No actual DB connections (backend manages DB) + - Verify major functional flows + +## Red-Green-Refactor Process (Test-First Development) + +**Recommended Principle**: Always start code changes with tests + +**Background**: +- Ensure behavior before changes, prevent regression +- Clarify expected behavior before implementation +- Ensure safety during refactoring + +**Development Steps**: +1. **Red**: Write test for expected behavior (it fails) +2. **Green**: Pass test with minimal implementation +3. **Refactor**: Improve code while maintaining passing tests + +**NG Cases (Test-first not required)**: +- Pure configuration file changes (vite.config.ts, tailwind.config.js, etc.) +- Documentation-only updates (README, comments, etc.) +- Emergency production incident response (post-incident tests mandatory) + +## Test Design Principles + +### Test Case Structure +- Tests consist of three stages: "Arrange," "Act," "Assert" +- Clear naming that shows purpose of each test +- One test case verifies only one behavior + +### Test Data Management +- Manage test data in dedicated directories or co-located with tests +- Define test-specific environment variable values +- Always mock sensitive information +- Keep test data minimal, using only data directly related to test case verification purposes + +### Mock and Stub Usage Policy + +**Recommended: Mock external dependencies in unit tests** +- Merit: Ensures test independence and reproducibility +- Practice: Mock API calls with MSW, mock external libraries + +**Use MSW for all API interactions in unit tests**: Ensures speed and environment independence. + +### Test Failure Response Decision Criteria + +**Fix tests**: Wrong expected values, references to non-existent features, dependence on implementation details, implementation only for tests +**Fix implementation**: Valid specifications, business logic, important edge cases +**When in doubt**: Confirm with user + +## Test Helper Utilization Rules + +### Decision Criteria +| Mock Characteristics | Response Policy | +|---------------------|-----------------| +| **Simple and stable** | Consolidate in common helpers | +| **Complex or frequently changing** | Individual implementation | +| **Duplicated in 3+ places** | Consider consolidation | +| **Test-specific logic** | Individual implementation | + +### Test Helper Usage Examples +```typescript +// Builder pattern for test data +const testUser = createTestUser({ name: 'Test User', email: 'test@example.com' }) + +// Custom render function with providers +function renderWithProviders(ui: React.ReactElement) { + return render({ui}) +} +``` + +## Test Implementation Conventions + +### Directory Structure (Co-location Principle) +``` +src/ +└── components/ + └── Button/ + ├── Button.tsx + ├── Button.test.tsx # Co-located with component + └── index.ts +``` + +### Naming Conventions +- Test files: `{ComponentName}.test.tsx` +- Integration test files: `{FeatureName}.integration.test.tsx` +- Test suites: Names describing target components or features +- Test cases: Names describing expected behavior from user perspective + +### Test Code Quality Rules + +**Keep all tests always active** +- Fix problematic tests and activate them + +**Keep all tests executable**: Fix failing tests or delete tests that no longer apply. Remove any `test.skip()` before commit. + +## Test Granularity Principles + +### Core Principle: User-Observable Behavior Only +**Test only**: Rendered output, user interactions, accessibility, error states + +```typescript +// Test user-observable behavior +expect(screen.getByRole('button', { name: 'Submit' })).toBeInTheDocument() + +// NOT implementation details +expect(component.state.count).toBe(0) +``` + +## Test Quality Criteria + +### Literal Expected Values +Use hardcoded literal values for assertions. +```typescript +expect(formatPrice(1000)).toBe('¥1,000') +expect(calculateTax(100)).toBe(10) +expect(user.role).toBe('admin') +``` + +### Result-Based Verification +Verify final results and outcomes. +```typescript +expect(mockOnSubmit).toHaveBeenCalledWith({ name: 'test' }) +expect(result).toEqual({ id: '1', status: 'success' }) +expect(screen.getByText('Submitted')).toBeInTheDocument() +``` + +### Meaningful Assertions +Every test must include at least one `expect()` that validates observable behavior. + +### Appropriate Mock Scope +Mock only direct external I/O dependencies. Internal utilities should use real implementations. +```typescript +vi.mock('./api/userApi') // External API - mock +vi.mock('./lib/database') // External I/O - mock +// Internal utils like validators/formatters - use real implementations +``` + +## Mock Type Safety Enforcement + +### MSW (Mock Service Worker) Setup +```typescript +import { http, HttpResponse } from 'msw' + +const handlers = [ + http.get('/api/users/:id', () => { + return HttpResponse.json({ id: '1', name: 'John' } satisfies User) + }) +] +``` + +### Component Mock Type Safety +```typescript +type TestProps = Pick +const mockProps: TestProps = { label: 'Click', onClick: vi.fn() } +``` + +## Continuity Test Scope + +Limited to verifying existing feature impact when adding new features. Long-term operations and performance testing are infrastructure responsibilities, not test scope. + +## Basic React Testing Library Example + +```typescript +import { describe, it, expect, vi } from 'vitest' +import { render, screen } from '@testing-library/react' +import userEvent from '@testing-library/user-event' +import { Button } from './Button' + +describe('Button', () => { + it('should call onClick when clicked', async () => { + const user = userEvent.setup() + const onClick = vi.fn() + render(