From d9188637ba04887f394cc5c089d1424d91fcfb65 Mon Sep 17 00:00:00 2001 From: Koosha Paridehpour Date: Sun, 1 Mar 2026 06:38:36 -0700 Subject: [PATCH 01/10] chore: remove tracked AI artifact files Co-authored-by: Codex --- .gitignore | 23 +++++++++++++++++++++++ .kittify/.dashboard | 8 ++++---- .kittify/metadata.yaml | 2 +- 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 14fe46c937..e0325ed98b 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,26 @@ _bmad-output/* *.bak # Local worktree shelves (canonical checkout must stay clean) PROJECT-wtrees/ + +# Added by Spec Kitty CLI (auto-managed) +.opencode/ +.windsurf/ +.qwen/ +.augment/ +.roo/ +.amazonq/ +.github/copilot/ +.kittify/.dashboard + + +# AI tool artifacts +.claude/ +.codex/ +.cursor/ +.gemini/ +.kittify/ +.kilocode/ +.github/prompts/ +.github/copilot-instructions.md +.claudeignore +.llmignore diff --git a/.kittify/.dashboard b/.kittify/.dashboard index 58cb04eb89..5ca685a761 100644 --- a/.kittify/.dashboard +++ b/.kittify/.dashboard @@ -1,4 +1,4 @@ -http://127.0.0.1:9240 -9240 -eb37ee458d7c4b394e25b2661d00dcee -14406 +http://127.0.0.1:9243 +9243 +7cdbc40f9431d8d2cc5d2e418e2d4346 +2216 diff --git a/.kittify/metadata.yaml b/.kittify/metadata.yaml index 928d221c98..4b1af3ec58 100644 --- a/.kittify/metadata.yaml +++ b/.kittify/metadata.yaml @@ -4,7 +4,7 @@ spec_kitty: version: 0.14.2 - initialized_at: '2026-02-27T01:11:17.441920' + initialized_at: '2026-02-28T05:32:34.112867' last_upgraded_at: null environment: python_version: 3.14.0 From 77621d70479e76b7d42884735bbc47cc7540428f Mon Sep 17 00:00:00 2001 From: Koosha Paridehpour Date: Sun, 1 Mar 2026 07:05:32 -0700 Subject: [PATCH 02/10] chore(artifacts): remove stale AI tooling artifacts Co-authored-by: Codex --- .claudeignore | 58 -- .cursor/commands/spec-kitty.accept.md | 76 -- .cursor/commands/spec-kitty.analyze.md | 184 ---- .cursor/commands/spec-kitty.checklist.md | 287 ------ .cursor/commands/spec-kitty.clarify.md | 157 ---- .cursor/commands/spec-kitty.constitution.md | 433 --------- .cursor/commands/spec-kitty.dashboard.md | 37 - .cursor/commands/spec-kitty.implement.md | 61 -- .cursor/commands/spec-kitty.merge.md | 384 -------- .cursor/commands/spec-kitty.plan.md | 205 ----- .cursor/commands/spec-kitty.research.md | 86 -- .cursor/commands/spec-kitty.review.md | 33 - .cursor/commands/spec-kitty.specify.md | 328 ------- .cursor/commands/spec-kitty.status.md | 93 -- .cursor/commands/spec-kitty.tasks.md | 577 ------------ .cursorignore | 55 -- .github/copilot-instructions.md | 12 - .github/prompts/spec-kitty.accept.prompt.md | 76 -- .github/prompts/spec-kitty.analyze.prompt.md | 184 ---- .../prompts/spec-kitty.checklist.prompt.md | 287 ------ .github/prompts/spec-kitty.clarify.prompt.md | 157 ---- .../prompts/spec-kitty.constitution.prompt.md | 433 --------- .../prompts/spec-kitty.dashboard.prompt.md | 37 - .../prompts/spec-kitty.implement.prompt.md | 61 -- .github/prompts/spec-kitty.merge.prompt.md | 384 -------- .github/prompts/spec-kitty.plan.prompt.md | 205 ----- .github/prompts/spec-kitty.research.prompt.md | 86 -- .github/prompts/spec-kitty.review.prompt.md | 33 - .github/prompts/spec-kitty.specify.prompt.md | 328 ------- .github/prompts/spec-kitty.status.prompt.md | 93 -- .github/prompts/spec-kitty.tasks.prompt.md | 577 ------------ .kilocode/workflows/spec-kitty.accept.md | 76 -- .kilocode/workflows/spec-kitty.analyze.md | 184 ---- .kilocode/workflows/spec-kitty.checklist.md | 287 ------ .kilocode/workflows/spec-kitty.clarify.md | 157 ---- .../workflows/spec-kitty.constitution.md | 433 --------- .kilocode/workflows/spec-kitty.dashboard.md | 37 - .kilocode/workflows/spec-kitty.implement.md | 61 -- .kilocode/workflows/spec-kitty.merge.md | 384 -------- .kilocode/workflows/spec-kitty.plan.md | 205 ----- .kilocode/workflows/spec-kitty.research.md | 86 -- .kilocode/workflows/spec-kitty.review.md | 33 - .kilocode/workflows/spec-kitty.specify.md | 328 ------- .kilocode/workflows/spec-kitty.status.md | 93 -- .kilocode/workflows/spec-kitty.tasks.md | 577 ------------ .kittify/.dashboard | 4 - .kittify/metadata.yaml | 14 - .../command-templates/implement.md | 337 ------- .../documentation/command-templates/plan.md | 275 ------ .../documentation/command-templates/review.md | 344 ------- .../command-templates/specify.md | 206 ----- .../documentation/command-templates/tasks.md | 189 ---- .kittify/missions/documentation/mission.yaml | 115 --- .../templates/divio/explanation-template.md | 192 ---- .../templates/divio/howto-template.md | 168 ---- .../templates/divio/reference-template.md | 179 ---- .../templates/divio/tutorial-template.md | 146 --- .../templates/generators/jsdoc.json.template | 18 - .../generators/sphinx-conf.py.template | 36 - .../documentation/templates/plan-template.md | 269 ------ .../templates/release-template.md | 222 ----- .../documentation/templates/spec-template.md | 172 ---- .../templates/task-prompt-template.md | 140 --- .../documentation/templates/tasks-template.md | 159 ---- .../research/command-templates/implement.md | 255 ------ .../research/command-templates/merge.md | 388 -------- .../research/command-templates/plan.md | 125 --- .../research/command-templates/review.md | 191 ---- .../research/command-templates/specify.md | 220 ----- .../research/command-templates/tasks.md | 225 ----- .kittify/missions/research/mission.yaml | 115 --- .../research/templates/data-model-template.md | 33 - .../research/templates/plan-template.md | 191 ---- .../research/templates/research-template.md | 35 - .../templates/research/evidence-log.csv | 18 - .../templates/research/source-register.csv | 18 - .../research/templates/spec-template.md | 64 -- .../templates/task-prompt-template.md | 148 --- .../research/templates/tasks-template.md | 114 --- .../software-dev/command-templates/accept.md | 75 -- .../software-dev/command-templates/analyze.md | 183 ---- .../command-templates/checklist.md | 286 ------ .../software-dev/command-templates/clarify.md | 156 ---- .../command-templates/constitution.md | 432 --------- .../command-templates/dashboard.md | 36 - .../command-templates/implement.md | 60 -- .../software-dev/command-templates/merge.md | 383 -------- .../software-dev/command-templates/plan.md | 204 ----- .../software-dev/command-templates/review.md | 32 - .../software-dev/command-templates/specify.md | 327 ------- .../software-dev/command-templates/tasks.md | 576 ------------ .kittify/missions/software-dev/mission.yaml | 100 -- .../software-dev/templates/plan-template.md | 132 --- .../software-dev/templates/spec-template.md | 116 --- .../templates/task-prompt-template.md | 140 --- .../software-dev/templates/tasks-template.md | 159 ---- .kittify/scripts/debug-dashboard-scan.py | 61 -- .kittify/scripts/tasks/acceptance_core.py | 831 ----------------- .kittify/scripts/tasks/acceptance_support.py | 168 ---- .kittify/scripts/tasks/task_helpers.py | 103 --- .kittify/scripts/tasks/task_helpers_shared.py | 757 ---------------- .kittify/scripts/tasks/tasks_cli.py | 853 ------------------ .kittify/scripts/validate_encoding.py | 180 ---- .llmignore | 58 -- 104 files changed, 20681 deletions(-) delete mode 100644 .claudeignore delete mode 100644 .cursor/commands/spec-kitty.accept.md delete mode 100644 .cursor/commands/spec-kitty.analyze.md delete mode 100644 .cursor/commands/spec-kitty.checklist.md delete mode 100644 .cursor/commands/spec-kitty.clarify.md delete mode 100644 .cursor/commands/spec-kitty.constitution.md delete mode 100644 .cursor/commands/spec-kitty.dashboard.md delete mode 100644 .cursor/commands/spec-kitty.implement.md delete mode 100644 .cursor/commands/spec-kitty.merge.md delete mode 100644 .cursor/commands/spec-kitty.plan.md delete mode 100644 .cursor/commands/spec-kitty.research.md delete mode 100644 .cursor/commands/spec-kitty.review.md delete mode 100644 .cursor/commands/spec-kitty.specify.md delete mode 100644 .cursor/commands/spec-kitty.status.md delete mode 100644 .cursor/commands/spec-kitty.tasks.md delete mode 100644 .cursorignore delete mode 100644 .github/copilot-instructions.md delete mode 100644 .github/prompts/spec-kitty.accept.prompt.md delete mode 100644 .github/prompts/spec-kitty.analyze.prompt.md delete mode 100644 .github/prompts/spec-kitty.checklist.prompt.md delete mode 100644 .github/prompts/spec-kitty.clarify.prompt.md delete mode 100644 .github/prompts/spec-kitty.constitution.prompt.md delete mode 100644 .github/prompts/spec-kitty.dashboard.prompt.md delete mode 100644 .github/prompts/spec-kitty.implement.prompt.md delete mode 100644 .github/prompts/spec-kitty.merge.prompt.md delete mode 100644 .github/prompts/spec-kitty.plan.prompt.md delete mode 100644 .github/prompts/spec-kitty.research.prompt.md delete mode 100644 .github/prompts/spec-kitty.review.prompt.md delete mode 100644 .github/prompts/spec-kitty.specify.prompt.md delete mode 100644 .github/prompts/spec-kitty.status.prompt.md delete mode 100644 .github/prompts/spec-kitty.tasks.prompt.md delete mode 100644 .kilocode/workflows/spec-kitty.accept.md delete mode 100644 .kilocode/workflows/spec-kitty.analyze.md delete mode 100644 .kilocode/workflows/spec-kitty.checklist.md delete mode 100644 .kilocode/workflows/spec-kitty.clarify.md delete mode 100644 .kilocode/workflows/spec-kitty.constitution.md delete mode 100644 .kilocode/workflows/spec-kitty.dashboard.md delete mode 100644 .kilocode/workflows/spec-kitty.implement.md delete mode 100644 .kilocode/workflows/spec-kitty.merge.md delete mode 100644 .kilocode/workflows/spec-kitty.plan.md delete mode 100644 .kilocode/workflows/spec-kitty.research.md delete mode 100644 .kilocode/workflows/spec-kitty.review.md delete mode 100644 .kilocode/workflows/spec-kitty.specify.md delete mode 100644 .kilocode/workflows/spec-kitty.status.md delete mode 100644 .kilocode/workflows/spec-kitty.tasks.md delete mode 100644 .kittify/.dashboard delete mode 100644 .kittify/metadata.yaml delete mode 100644 .kittify/missions/documentation/command-templates/implement.md delete mode 100644 .kittify/missions/documentation/command-templates/plan.md delete mode 100644 .kittify/missions/documentation/command-templates/review.md delete mode 100644 .kittify/missions/documentation/command-templates/specify.md delete mode 100644 .kittify/missions/documentation/command-templates/tasks.md delete mode 100644 .kittify/missions/documentation/mission.yaml delete mode 100644 .kittify/missions/documentation/templates/divio/explanation-template.md delete mode 100644 .kittify/missions/documentation/templates/divio/howto-template.md delete mode 100644 .kittify/missions/documentation/templates/divio/reference-template.md delete mode 100644 .kittify/missions/documentation/templates/divio/tutorial-template.md delete mode 100644 .kittify/missions/documentation/templates/generators/jsdoc.json.template delete mode 100644 .kittify/missions/documentation/templates/generators/sphinx-conf.py.template delete mode 100644 .kittify/missions/documentation/templates/plan-template.md delete mode 100644 .kittify/missions/documentation/templates/release-template.md delete mode 100644 .kittify/missions/documentation/templates/spec-template.md delete mode 100644 .kittify/missions/documentation/templates/task-prompt-template.md delete mode 100644 .kittify/missions/documentation/templates/tasks-template.md delete mode 100644 .kittify/missions/research/command-templates/implement.md delete mode 100644 .kittify/missions/research/command-templates/merge.md delete mode 100644 .kittify/missions/research/command-templates/plan.md delete mode 100644 .kittify/missions/research/command-templates/review.md delete mode 100644 .kittify/missions/research/command-templates/specify.md delete mode 100644 .kittify/missions/research/command-templates/tasks.md delete mode 100644 .kittify/missions/research/mission.yaml delete mode 100644 .kittify/missions/research/templates/data-model-template.md delete mode 100644 .kittify/missions/research/templates/plan-template.md delete mode 100644 .kittify/missions/research/templates/research-template.md delete mode 100644 .kittify/missions/research/templates/research/evidence-log.csv delete mode 100644 .kittify/missions/research/templates/research/source-register.csv delete mode 100644 .kittify/missions/research/templates/spec-template.md delete mode 100644 .kittify/missions/research/templates/task-prompt-template.md delete mode 100644 .kittify/missions/research/templates/tasks-template.md delete mode 100644 .kittify/missions/software-dev/command-templates/accept.md delete mode 100644 .kittify/missions/software-dev/command-templates/analyze.md delete mode 100644 .kittify/missions/software-dev/command-templates/checklist.md delete mode 100644 .kittify/missions/software-dev/command-templates/clarify.md delete mode 100644 .kittify/missions/software-dev/command-templates/constitution.md delete mode 100644 .kittify/missions/software-dev/command-templates/dashboard.md delete mode 100644 .kittify/missions/software-dev/command-templates/implement.md delete mode 100644 .kittify/missions/software-dev/command-templates/merge.md delete mode 100644 .kittify/missions/software-dev/command-templates/plan.md delete mode 100644 .kittify/missions/software-dev/command-templates/review.md delete mode 100644 .kittify/missions/software-dev/command-templates/specify.md delete mode 100644 .kittify/missions/software-dev/command-templates/tasks.md delete mode 100644 .kittify/missions/software-dev/mission.yaml delete mode 100644 .kittify/missions/software-dev/templates/plan-template.md delete mode 100644 .kittify/missions/software-dev/templates/spec-template.md delete mode 100644 .kittify/missions/software-dev/templates/task-prompt-template.md delete mode 100644 .kittify/missions/software-dev/templates/tasks-template.md delete mode 100644 .kittify/scripts/debug-dashboard-scan.py delete mode 100644 .kittify/scripts/tasks/acceptance_core.py delete mode 100644 .kittify/scripts/tasks/acceptance_support.py delete mode 100644 .kittify/scripts/tasks/task_helpers.py delete mode 100644 .kittify/scripts/tasks/task_helpers_shared.py delete mode 100644 .kittify/scripts/tasks/tasks_cli.py delete mode 100644 .kittify/scripts/validate_encoding.py delete mode 100644 .llmignore diff --git a/.claudeignore b/.claudeignore deleted file mode 100644 index 5391107261..0000000000 --- a/.claudeignore +++ /dev/null @@ -1,58 +0,0 @@ -# Spec Kitty Configuration and Templates -# These are internal directories that shouldn't be scanned by AI assistants - -# Template directories (not working code) -.kittify/templates/ -.kittify/missions/ -.kittify/scripts/ - -# Agent command directories (generated from templates, not source) -.claude/ -.codex/ -.gemini/ -.cursor/ -.qwen/ -.opencode/ -.windsurf/ -.kilocode/ -.augment/ -.roo/ -.amazonq/ -.github/copilot/ - -# Git metadata -.git/ - -# Build artifacts and caches -__pycache__/ -*.pyc -*.pyo -.pytest_cache/ -.coverage -htmlcov/ -node_modules/ -dist/ -build/ -*.egg-info/ - -# Virtual environments -.venv/ -venv/ -env/ - -# OS-specific files -.DS_Store -Thumbs.db -desktop.ini - -# IDE directories -.vscode/ -.idea/ -*.swp -*.swo -*~ - -# Logs and databases -*.log -*.db -*.sqlite diff --git a/.cursor/commands/spec-kitty.accept.md b/.cursor/commands/spec-kitty.accept.md deleted file mode 100644 index 9ce09b7be5..0000000000 --- a/.cursor/commands/spec-kitty.accept.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -description: Validate feature readiness and guide final acceptance steps. ---- - - -# /spec-kitty.accept - Validate Feature Readiness - -**Version**: 0.11.0+ -**Purpose**: Validate all work packages are complete and feature is ready to merge. - -## 📍 WORKING DIRECTORY: Run from MAIN repository - -**IMPORTANT**: Accept runs from the main repository root, NOT from a WP worktree. - -```bash -# If you're in a worktree, return to main first: -cd $(git rev-parse --show-toplevel) - -# Then run accept: -spec-kitty accept -``` - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Discovery (mandatory) - -Before running the acceptance workflow, gather the following: - -1. **Feature slug** (e.g., `005-awesome-thing`). If omitted, detect automatically. -2. **Acceptance mode**: - - `pr` when the feature will merge via hosted pull request. - - `local` when the feature will merge locally without a PR. - - `checklist` to run the readiness checklist without committing or producing merge instructions. -3. **Validation commands executed** (tests/builds). Collect each command verbatim; omit if none. -4. **Acceptance actor** (optional, defaults to the current agent name). - -Ask one focused question per item and confirm the summary before continuing. End the discovery turn with `WAITING_FOR_ACCEPTANCE_INPUT` until all answers are provided. - -## Execution Plan - -1. Compile the acceptance options into an argument list: - - Always include `--actor "cursor"`. - - Append `--feature ""` when the user supplied a slug. - - Append `--mode ` (`pr`, `local`, or `checklist`). - - Append `--test ""` for each validation command provided. -2. Run `(Missing script command for sh)` (the CLI wrapper) with the assembled arguments **and** `--json`. -3. Parse the JSON response. It contains: - - `summary.ok` (boolean) and other readiness details. - - `summary.outstanding` categories when issues remain. - - `instructions` (merge steps) and `cleanup_instructions`. - - `notes` (e.g., acceptance commit hash). -4. Present the outcome: - - If `summary.ok` is `false`, list each outstanding category with bullet points and advise the user to resolve them before retrying acceptance. - - If `summary.ok` is `true`, display: - - Acceptance timestamp, actor, and (if present) acceptance commit hash. - - Merge instructions and cleanup instructions as ordered steps. - - Validation commands executed (if any). -5. When the mode is `checklist`, make it clear no commits or merge instructions were produced. - -## Output Requirements - -- Summaries must be in plain text (no tables). Use short bullet lists for instructions. -- Surface outstanding issues before any congratulations or success messages. -- If the JSON payload includes warnings, surface them under an explicit **Warnings** section. -- Never fabricate results; only report what the JSON contains. - -## Error Handling - -- If the command fails or returns invalid JSON, report the failure and request user guidance (do not retry automatically). -- When outstanding issues exist, do **not** attempt to force acceptance—return the checklist and prompt the user to fix the blockers. diff --git a/.cursor/commands/spec-kitty.analyze.md b/.cursor/commands/spec-kitty.analyze.md deleted file mode 100644 index e2cd797d48..0000000000 --- a/.cursor/commands/spec-kitty.analyze.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -description: Perform a non-destructive cross-artifact consistency and quality analysis across spec.md, plan.md, and tasks.md after task generation. ---- - - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Goal - -Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/tasks` has successfully produced a complete `tasks.md`. - -## Operating Constraints - -**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually). - -**Constitution Authority**: The project constitution (`/.kittify/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/analyze`. - -## Execution Steps - -### 1. Initialize Analysis Context - -Run `(Missing script command for sh)` once from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS. Derive absolute paths: - -- SPEC = FEATURE_DIR/spec.md -- PLAN = FEATURE_DIR/plan.md -- TASKS = FEATURE_DIR/tasks.md - -Abort with an error message if any required file is missing (instruct the user to run missing prerequisite command). - -### 2. Load Artifacts (Progressive Disclosure) - -Load only the minimal necessary context from each artifact: - -**From spec.md:** - -- Overview/Context -- Functional Requirements -- Non-Functional Requirements -- User Stories -- Edge Cases (if present) - -**From plan.md:** - -- Architecture/stack choices -- Data Model references -- Phases -- Technical constraints - -**From tasks.md:** - -- Task IDs -- Descriptions -- Phase grouping -- Parallel markers [P] -- Referenced file paths - -**From constitution:** - -- Load `/.kittify/memory/constitution.md` for principle validation - -### 3. Build Semantic Models - -Create internal representations (do not include raw artifacts in output): - -- **Requirements inventory**: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" → `user-can-upload-file`) -- **User story/action inventory**: Discrete user actions with acceptance criteria -- **Task coverage mapping**: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases) -- **Constitution rule set**: Extract principle names and MUST/SHOULD normative statements - -### 4. Detection Passes (Token-Efficient Analysis) - -Focus on high-signal findings. Limit to 50 findings total; aggregate remainder in overflow summary. - -#### A. Duplication Detection - -- Identify near-duplicate requirements -- Mark lower-quality phrasing for consolidation - -#### B. Ambiguity Detection - -- Flag vague adjectives (fast, scalable, secure, intuitive, robust) lacking measurable criteria -- Flag unresolved placeholders (TODO, TKTK, ???, ``, etc.) - -#### C. Underspecification - -- Requirements with verbs but missing object or measurable outcome -- User stories missing acceptance criteria alignment -- Tasks referencing files or components not defined in spec/plan - -#### D. Constitution Alignment - -- Any requirement or plan element conflicting with a MUST principle -- Missing mandated sections or quality gates from constitution - -#### E. Coverage Gaps - -- Requirements with zero associated tasks -- Tasks with no mapped requirement/story -- Non-functional requirements not reflected in tasks (e.g., performance, security) - -#### F. Inconsistency - -- Terminology drift (same concept named differently across files) -- Data entities referenced in plan but absent in spec (or vice versa) -- Task ordering contradictions (e.g., integration tasks before foundational setup tasks without dependency note) -- Conflicting requirements (e.g., one requires Next.js while other specifies Vue) - -### 5. Severity Assignment - -Use this heuristic to prioritize findings: - -- **CRITICAL**: Violates constitution MUST, missing core spec artifact, or requirement with zero coverage that blocks baseline functionality -- **HIGH**: Duplicate or conflicting requirement, ambiguous security/performance attribute, untestable acceptance criterion -- **MEDIUM**: Terminology drift, missing non-functional task coverage, underspecified edge case -- **LOW**: Style/wording improvements, minor redundancy not affecting execution order - -### 6. Produce Compact Analysis Report - -Output a Markdown report (no file writes) with the following structure: - -## Specification Analysis Report - -| ID | Category | Severity | Location(s) | Summary | Recommendation | -|----|----------|----------|-------------|---------|----------------| -| A1 | Duplication | HIGH | spec.md:L120-134 | Two similar requirements ... | Merge phrasing; keep clearer version | - -(Add one row per finding; generate stable IDs prefixed by category initial.) - -**Coverage Summary Table:** - -| Requirement Key | Has Task? | Task IDs | Notes | -|-----------------|-----------|----------|-------| - -**Constitution Alignment Issues:** (if any) - -**Unmapped Tasks:** (if any) - -**Metrics:** - -- Total Requirements -- Total Tasks -- Coverage % (requirements with >=1 task) -- Ambiguity Count -- Duplication Count -- Critical Issues Count - -### 7. Provide Next Actions - -At end of report, output a concise Next Actions block: - -- If CRITICAL issues exist: Recommend resolving before `/implement` -- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions -- Provide explicit command suggestions: e.g., "Run /spec-kitty.specify with refinement", "Run /plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'" - -### 8. Offer Remediation - -Ask the user: "Would you like me to suggest concrete remediation edits for the top N issues?" (Do NOT apply them automatically.) - -## Operating Principles - -### Context Efficiency - -- **Minimal high-signal tokens**: Focus on actionable findings, not exhaustive documentation -- **Progressive disclosure**: Load artifacts incrementally; don't dump all content into analysis -- **Token-efficient output**: Limit findings table to 50 rows; summarize overflow -- **Deterministic results**: Rerunning without changes should produce consistent IDs and counts - -### Analysis Guidelines - -- **NEVER modify files** (this is read-only analysis) -- **NEVER hallucinate missing sections** (if absent, report them accurately) -- **Prioritize constitution violations** (these are always CRITICAL) -- **Use examples over exhaustive rules** (cite specific instances, not generic patterns) -- **Report zero issues gracefully** (emit success report with coverage statistics) - -## Context - -$ARGUMENTS diff --git a/.cursor/commands/spec-kitty.checklist.md b/.cursor/commands/spec-kitty.checklist.md deleted file mode 100644 index 97228e12f3..0000000000 --- a/.cursor/commands/spec-kitty.checklist.md +++ /dev/null @@ -1,287 +0,0 @@ ---- -description: Generate a custom checklist for the current feature based on user requirements. ---- - - -## Checklist Purpose: "Unit Tests for English" - -**CRITICAL CONCEPT**: Checklists are **UNIT TESTS FOR REQUIREMENTS WRITING** - they validate the quality, clarity, and completeness of requirements in a given domain. - -**NOT for verification/testing**: -- ❌ NOT "Verify the button clicks correctly" -- ❌ NOT "Test error handling works" -- ❌ NOT "Confirm the API returns 200" -- ❌ NOT checking if code/implementation matches the spec - -**FOR requirements quality validation**: -- ✅ "Are visual hierarchy requirements defined for all card types?" (completeness) -- ✅ "Is 'prominent display' quantified with specific sizing/positioning?" (clarity) -- ✅ "Are hover state requirements consistent across all interactive elements?" (consistency) -- ✅ "Are accessibility requirements defined for keyboard navigation?" (coverage) -- ✅ "Does the spec define what happens when logo image fails to load?" (edge cases) - -**Metaphor**: If your spec is code written in English, the checklist is its unit test suite. You're testing whether the requirements are well-written, complete, unambiguous, and ready for implementation - NOT whether the implementation works. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Execution Steps - -1. **Setup**: Run `(Missing script command for sh)` from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS list. - - All file paths must be absolute. - -2. **Clarify intent (dynamic)**: Derive up to THREE initial contextual clarifying questions (no pre-baked catalog). They MUST: - - Be generated from the user's phrasing + extracted signals from spec/plan/tasks - - Only ask about information that materially changes checklist content - - Be skipped individually if already unambiguous in `$ARGUMENTS` - - Prefer precision over breadth - - Generation algorithm: - 1. Extract signals: feature domain keywords (e.g., auth, latency, UX, API), risk indicators ("critical", "must", "compliance"), stakeholder hints ("QA", "review", "security team"), and explicit deliverables ("a11y", "rollback", "contracts"). - 2. Cluster signals into candidate focus areas (max 4) ranked by relevance. - 3. Identify probable audience & timing (author, reviewer, QA, release) if not explicit. - 4. Detect missing dimensions: scope breadth, depth/rigor, risk emphasis, exclusion boundaries, measurable acceptance criteria. - 5. Formulate questions chosen from these archetypes: - - Scope refinement (e.g., "Should this include integration touchpoints with X and Y or stay limited to local module correctness?") - - Risk prioritization (e.g., "Which of these potential risk areas should receive mandatory gating checks?") - - Depth calibration (e.g., "Is this a lightweight pre-commit sanity list or a formal release gate?") - - Audience framing (e.g., "Will this be used by the author only or peers during PR review?") - - Boundary exclusion (e.g., "Should we explicitly exclude performance tuning items this round?") - - Scenario class gap (e.g., "No recovery flows detected—are rollback / partial failure paths in scope?") - - Question formatting rules: - - If presenting options, generate a compact table with columns: Option | Candidate | Why It Matters - - Limit to A–E options maximum; omit table if a free-form answer is clearer - - Never ask the user to restate what they already said - - Avoid speculative categories (no hallucination). If uncertain, ask explicitly: "Confirm whether X belongs in scope." - - Defaults when interaction impossible: - - Depth: Standard - - Audience: Reviewer (PR) if code-related; Author otherwise - - Focus: Top 2 relevance clusters - - Output the questions (label Q1/Q2/Q3). After answers: if ≥2 scenario classes (Alternate / Exception / Recovery / Non-Functional domain) remain unclear, you MAY ask up to TWO more targeted follow‑ups (Q4/Q5) with a one-line justification each (e.g., "Unresolved recovery path risk"). Do not exceed five total questions. Skip escalation if user explicitly declines more. - -3. **Understand user request**: Combine `$ARGUMENTS` + clarifying answers: - - Derive checklist theme (e.g., security, review, deploy, ux) - - Consolidate explicit must-have items mentioned by user - - Map focus selections to category scaffolding - - Infer any missing context from spec/plan/tasks (do NOT hallucinate) - -4. **Load feature context**: Read from FEATURE_DIR: - - spec.md: Feature requirements and scope - - plan.md (if exists): Technical details, dependencies - - tasks.md (if exists): Implementation tasks - - **Context Loading Strategy**: - - Load only necessary portions relevant to active focus areas (avoid full-file dumping) - - Prefer summarizing long sections into concise scenario/requirement bullets - - Use progressive disclosure: add follow-on retrieval only if gaps detected - - If source docs are large, generate interim summary items instead of embedding raw text - -5. **Generate checklist** - Create "Unit Tests for Requirements": - - Create `FEATURE_DIR/checklists/` directory if it doesn't exist - - Generate unique checklist filename: - - Use short, descriptive name based on domain (e.g., `ux.md`, `api.md`, `security.md`) - - Format: `[domain].md` - - If file exists, append to existing file - - Number items sequentially starting from CHK001 - - Each `/spec-kitty.checklist` run creates a NEW file (never overwrites existing checklists) - - **CORE PRINCIPLE - Test the Requirements, Not the Implementation**: - Every checklist item MUST evaluate the REQUIREMENTS THEMSELVES for: - - **Completeness**: Are all necessary requirements present? - - **Clarity**: Are requirements unambiguous and specific? - - **Consistency**: Do requirements align with each other? - - **Measurability**: Can requirements be objectively verified? - - **Coverage**: Are all scenarios/edge cases addressed? - - **Category Structure** - Group items by requirement quality dimensions: - - **Requirement Completeness** (Are all necessary requirements documented?) - - **Requirement Clarity** (Are requirements specific and unambiguous?) - - **Requirement Consistency** (Do requirements align without conflicts?) - - **Acceptance Criteria Quality** (Are success criteria measurable?) - - **Scenario Coverage** (Are all flows/cases addressed?) - - **Edge Case Coverage** (Are boundary conditions defined?) - - **Non-Functional Requirements** (Performance, Security, Accessibility, etc. - are they specified?) - - **Dependencies & Assumptions** (Are they documented and validated?) - - **Ambiguities & Conflicts** (What needs clarification?) - - **HOW TO WRITE CHECKLIST ITEMS - "Unit Tests for English"**: - - ❌ **WRONG** (Testing implementation): - - "Verify landing page displays 3 episode cards" - - "Test hover states work on desktop" - - "Confirm logo click navigates home" - - ✅ **CORRECT** (Testing requirements quality): - - "Are the exact number and layout of featured episodes specified?" [Completeness] - - "Is 'prominent display' quantified with specific sizing/positioning?" [Clarity] - - "Are hover state requirements consistent across all interactive elements?" [Consistency] - - "Are keyboard navigation requirements defined for all interactive UI?" [Coverage] - - "Is the fallback behavior specified when logo image fails to load?" [Edge Cases] - - "Are loading states defined for asynchronous episode data?" [Completeness] - - "Does the spec define visual hierarchy for competing UI elements?" [Clarity] - - **ITEM STRUCTURE**: - Each item should follow this pattern: - - Question format asking about requirement quality - - Focus on what's WRITTEN (or not written) in the spec/plan - - Include quality dimension in brackets [Completeness/Clarity/Consistency/etc.] - - Reference spec section `[Spec §X.Y]` when checking existing requirements - - Use `[Gap]` marker when checking for missing requirements - - **EXAMPLES BY QUALITY DIMENSION**: - - Completeness: - - "Are error handling requirements defined for all API failure modes? [Gap]" - - "Are accessibility requirements specified for all interactive elements? [Completeness]" - - "Are mobile breakpoint requirements defined for responsive layouts? [Gap]" - - Clarity: - - "Is 'fast loading' quantified with specific timing thresholds? [Clarity, Spec §NFR-2]" - - "Are 'related episodes' selection criteria explicitly defined? [Clarity, Spec §FR-5]" - - "Is 'prominent' defined with measurable visual properties? [Ambiguity, Spec §FR-4]" - - Consistency: - - "Do navigation requirements align across all pages? [Consistency, Spec §FR-10]" - - "Are card component requirements consistent between landing and detail pages? [Consistency]" - - Coverage: - - "Are requirements defined for zero-state scenarios (no episodes)? [Coverage, Edge Case]" - - "Are concurrent user interaction scenarios addressed? [Coverage, Gap]" - - "Are requirements specified for partial data loading failures? [Coverage, Exception Flow]" - - Measurability: - - "Are visual hierarchy requirements measurable/testable? [Acceptance Criteria, Spec §FR-1]" - - "Can 'balanced visual weight' be objectively verified? [Measurability, Spec §FR-2]" - - **Scenario Classification & Coverage** (Requirements Quality Focus): - - Check if requirements exist for: Primary, Alternate, Exception/Error, Recovery, Non-Functional scenarios - - For each scenario class, ask: "Are [scenario type] requirements complete, clear, and consistent?" - - If scenario class missing: "Are [scenario type] requirements intentionally excluded or missing? [Gap]" - - Include resilience/rollback when state mutation occurs: "Are rollback requirements defined for migration failures? [Gap]" - - **Traceability Requirements**: - - MINIMUM: ≥80% of items MUST include at least one traceability reference - - Each item should reference: spec section `[Spec §X.Y]`, or use markers: `[Gap]`, `[Ambiguity]`, `[Conflict]`, `[Assumption]` - - If no ID system exists: "Is a requirement & acceptance criteria ID scheme established? [Traceability]" - - **Surface & Resolve Issues** (Requirements Quality Problems): - Ask questions about the requirements themselves: - - Ambiguities: "Is the term 'fast' quantified with specific metrics? [Ambiguity, Spec §NFR-1]" - - Conflicts: "Do navigation requirements conflict between §FR-10 and §FR-10a? [Conflict]" - - Assumptions: "Is the assumption of 'always available podcast API' validated? [Assumption]" - - Dependencies: "Are external podcast API requirements documented? [Dependency, Gap]" - - Missing definitions: "Is 'visual hierarchy' defined with measurable criteria? [Gap]" - - **Content Consolidation**: - - Soft cap: If raw candidate items > 40, prioritize by risk/impact - - Merge near-duplicates checking the same requirement aspect - - If >5 low-impact edge cases, create one item: "Are edge cases X, Y, Z addressed in requirements? [Coverage]" - - **🚫 ABSOLUTELY PROHIBITED** - These make it an implementation test, not a requirements test: - - ❌ Any item starting with "Verify", "Test", "Confirm", "Check" + implementation behavior - - ❌ References to code execution, user actions, system behavior - - ❌ "Displays correctly", "works properly", "functions as expected" - - ❌ "Click", "navigate", "render", "load", "execute" - - ❌ Test cases, test plans, QA procedures - - ❌ Implementation details (frameworks, APIs, algorithms) - - **✅ REQUIRED PATTERNS** - These test requirements quality: - - ✅ "Are [requirement type] defined/specified/documented for [scenario]?" - - ✅ "Is [vague term] quantified/clarified with specific criteria?" - - ✅ "Are requirements consistent between [section A] and [section B]?" - - ✅ "Can [requirement] be objectively measured/verified?" - - ✅ "Are [edge cases/scenarios] addressed in requirements?" - - ✅ "Does the spec define [missing aspect]?" - -6. **Structure Reference**: Generate the checklist following the canonical template in `.kittify/templates/checklist-template.md` for title, meta section, category headings, and ID formatting. If template is unavailable, use: H1 title, purpose/created meta lines, `##` category sections containing `- [ ] CHK### ` lines with globally incrementing IDs starting at CHK001. - -7. **Report**: Output full path to created checklist, item count, and remind user that each run creates a new file. Summarize: - - Focus areas selected - - Depth level - - Actor/timing - - Any explicit user-specified must-have items incorporated - -**Important**: Each `/spec-kitty.checklist` command invocation creates a checklist file using short, descriptive names unless file already exists. This allows: - -- Multiple checklists of different types (e.g., `ux.md`, `test.md`, `security.md`) -- Simple, memorable filenames that indicate checklist purpose -- Easy identification and navigation in the `checklists/` folder - -To avoid clutter, use descriptive types and clean up obsolete checklists when done. - -## Example Checklist Types & Sample Items - -**UX Requirements Quality:** `ux.md` - -Sample items (testing the requirements, NOT the implementation): -- "Are visual hierarchy requirements defined with measurable criteria? [Clarity, Spec §FR-1]" -- "Is the number and positioning of UI elements explicitly specified? [Completeness, Spec §FR-1]" -- "Are interaction state requirements (hover, focus, active) consistently defined? [Consistency]" -- "Are accessibility requirements specified for all interactive elements? [Coverage, Gap]" -- "Is fallback behavior defined when images fail to load? [Edge Case, Gap]" -- "Can 'prominent display' be objectively measured? [Measurability, Spec §FR-4]" - -**API Requirements Quality:** `api.md` - -Sample items: -- "Are error response formats specified for all failure scenarios? [Completeness]" -- "Are rate limiting requirements quantified with specific thresholds? [Clarity]" -- "Are authentication requirements consistent across all endpoints? [Consistency]" -- "Are retry/timeout requirements defined for external dependencies? [Coverage, Gap]" -- "Is versioning strategy documented in requirements? [Gap]" - -**Performance Requirements Quality:** `performance.md` - -Sample items: -- "Are performance requirements quantified with specific metrics? [Clarity]" -- "Are performance targets defined for all critical user journeys? [Coverage]" -- "Are performance requirements under different load conditions specified? [Completeness]" -- "Can performance requirements be objectively measured? [Measurability]" -- "Are degradation requirements defined for high-load scenarios? [Edge Case, Gap]" - -**Security Requirements Quality:** `security.md` - -Sample items: -- "Are authentication requirements specified for all protected resources? [Coverage]" -- "Are data protection requirements defined for sensitive information? [Completeness]" -- "Is the threat model documented and requirements aligned to it? [Traceability]" -- "Are security requirements consistent with compliance obligations? [Consistency]" -- "Are security failure/breach response requirements defined? [Gap, Exception Flow]" - -## Anti-Examples: What NOT To Do - -**❌ WRONG - These test implementation, not requirements:** - -```markdown -- [ ] CHK001 - Verify landing page displays 3 episode cards [Spec §FR-001] -- [ ] CHK002 - Test hover states work correctly on desktop [Spec §FR-003] -- [ ] CHK003 - Confirm logo click navigates to home page [Spec §FR-010] -- [ ] CHK004 - Check that related episodes section shows 3-5 items [Spec §FR-005] -``` - -**✅ CORRECT - These test requirements quality:** - -```markdown -- [ ] CHK001 - Are the number and layout of featured episodes explicitly specified? [Completeness, Spec §FR-001] -- [ ] CHK002 - Are hover state requirements consistently defined for all interactive elements? [Consistency, Spec §FR-003] -- [ ] CHK003 - Are navigation requirements clear for all clickable brand elements? [Clarity, Spec §FR-010] -- [ ] CHK004 - Is the selection criteria for related episodes documented? [Gap, Spec §FR-005] -- [ ] CHK005 - Are loading state requirements defined for asynchronous episode data? [Gap] -- [ ] CHK006 - Can "visual hierarchy" requirements be objectively measured? [Measurability, Spec §FR-001] -``` - -**Key Differences:** -- Wrong: Tests if the system works correctly -- Correct: Tests if the requirements are written correctly -- Wrong: Verification of behavior -- Correct: Validation of requirement quality -- Wrong: "Does it do X?" -- Correct: "Is X clearly specified?" diff --git a/.cursor/commands/spec-kitty.clarify.md b/.cursor/commands/spec-kitty.clarify.md deleted file mode 100644 index 6cc7b09ae5..0000000000 --- a/.cursor/commands/spec-kitty.clarify.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -description: Identify underspecified areas in the current feature spec by asking up to 5 highly targeted clarification questions and encoding answers back into the spec. ---- - - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Outline - -Goal: Detect and reduce ambiguity or missing decision points in the active feature specification and record the clarifications directly in the spec file. - -Note: This clarification workflow is expected to run (and be completed) BEFORE invoking `/spec-kitty.plan`. If the user explicitly states they are skipping clarification (e.g., exploratory spike), you may proceed, but must warn that downstream rework risk increases. - -Execution steps: - -1. Run `spec-kitty agent feature check-prerequisites --json --paths-only` from the repository root and parse JSON for: - - `FEATURE_DIR` - Absolute path to feature directory (e.g., `/path/to/kitty-specs/017-my-feature/`) - - `FEATURE_SPEC` - Absolute path to spec.md file - - If command fails or JSON parsing fails, abort and instruct user to run `/spec-kitty.specify` first or verify they are in a spec-kitty-initialized repository. - -2. Load the current spec file. Perform a structured ambiguity & coverage scan using this taxonomy. For each category, mark status: Clear / Partial / Missing. Produce an internal coverage map used for prioritization (do not output raw map unless no questions will be asked). - - Functional Scope & Behavior: - - Core user goals & success criteria - - Explicit out-of-scope declarations - - User roles / personas differentiation - - Domain & Data Model: - - Entities, attributes, relationships - - Identity & uniqueness rules - - Lifecycle/state transitions - - Data volume / scale assumptions - - Interaction & UX Flow: - - Critical user journeys / sequences - - Error/empty/loading states - - Accessibility or localization notes - - Non-Functional Quality Attributes: - - Performance (latency, throughput targets) - - Scalability (horizontal/vertical, limits) - - Reliability & availability (uptime, recovery expectations) - - Observability (logging, metrics, tracing signals) - - Security & privacy (authN/Z, data protection, threat assumptions) - - Compliance / regulatory constraints (if any) - - Integration & External Dependencies: - - External services/APIs and failure modes - - Data import/export formats - - Protocol/versioning assumptions - - Edge Cases & Failure Handling: - - Negative scenarios - - Rate limiting / throttling - - Conflict resolution (e.g., concurrent edits) - - Constraints & Tradeoffs: - - Technical constraints (language, storage, hosting) - - Explicit tradeoffs or rejected alternatives - - Terminology & Consistency: - - Canonical glossary terms - - Avoided synonyms / deprecated terms - - Completion Signals: - - Acceptance criteria testability - - Measurable Definition of Done style indicators - - Misc / Placeholders: - - TODO markers / unresolved decisions - - Ambiguous adjectives ("robust", "intuitive") lacking quantification - - For each category with Partial or Missing status, add a candidate question opportunity unless: - - Clarification would not materially change implementation or validation strategy - - Information is better deferred to planning phase (note internally) - -3. Generate (internally) a prioritized queue of candidate clarification questions (maximum 5). Do NOT output them all at once. Apply these constraints: - - Maximum of 10 total questions across the whole session. - - Each question must be answerable with EITHER: - * A short multiple‑choice selection (2–5 distinct, mutually exclusive options), OR - * A one-word / short‑phrase answer (explicitly constrain: "Answer in <=5 words"). - - Only include questions whose answers materially impact architecture, data modeling, task decomposition, test design, UX behavior, operational readiness, or compliance validation. - - Ensure category coverage balance: attempt to cover the highest impact unresolved categories first; avoid asking two low-impact questions when a single high-impact area (e.g., security posture) is unresolved. - - Exclude questions already answered, trivial stylistic preferences, or plan-level execution details (unless blocking correctness). - - Favor clarifications that reduce downstream rework risk or prevent misaligned acceptance tests. - - Scale thoroughness to the feature’s complexity: a lightweight enhancement may only need one or two confirmations, while multi-system efforts warrant the full question budget if gaps remain critical. - - If more than 5 categories remain unresolved, select the top 5 by (Impact * Uncertainty) heuristic. - -4. Sequential questioning loop (interactive): - - Present EXACTLY ONE question at a time. - - For multiple-choice questions, list options inline using letter prefixes rather than tables, e.g. - `Options: (A) describe option A · (B) describe option B · (C) describe option C · (D) short custom answer (<=5 words)` - Ask the user to reply with the letter (or short custom text when offered). - - For short-answer style (no meaningful discrete options), output a single line after the question: `Format: Short answer (<=5 words)`. - - After the user answers: - * Validate the answer maps to one option or fits the <=5 word constraint. - * If ambiguous, ask for a quick disambiguation (count still belongs to same question; do not advance). - * Once satisfactory, record it in working memory (do not yet write to disk) and move to the next queued question. - - Stop asking further questions when: - * All critical ambiguities resolved early (remaining queued items become unnecessary), OR - * User signals completion ("done", "good", "no more"), OR - * You reach 5 asked questions. - - Never reveal future queued questions in advance. - - If no valid questions exist at start, immediately report no critical ambiguities. - -5. Integration after EACH accepted answer (incremental update approach): - - Maintain in-memory representation of the spec (loaded once at start) plus the raw file contents. - - For the first integrated answer in this session: - * Ensure a `## Clarifications` section exists (create it just after the highest-level contextual/overview section per the spec template if missing). - * Under it, create (if not present) a `### Session YYYY-MM-DD` subheading for today. - - Append a bullet line immediately after acceptance: `- Q: → A: `. - - Then immediately apply the clarification to the most appropriate section(s): - * Functional ambiguity → Update or add a bullet in Functional Requirements. - * User interaction / actor distinction → Update User Stories or Actors subsection (if present) with clarified role, constraint, or scenario. - * Data shape / entities → Update Data Model (add fields, types, relationships) preserving ordering; note added constraints succinctly. - * Non-functional constraint → Add/modify measurable criteria in Non-Functional / Quality Attributes section (convert vague adjective to metric or explicit target). - * Edge case / negative flow → Add a new bullet under Edge Cases / Error Handling (or create such subsection if template provides placeholder for it). - * Terminology conflict → Normalize term across spec; retain original only if necessary by adding `(formerly referred to as "X")` once. - - If the clarification invalidates an earlier ambiguous statement, replace that statement instead of duplicating; leave no obsolete contradictory text. - - Save the spec file AFTER each integration to minimize risk of context loss (atomic overwrite). - - Preserve formatting: do not reorder unrelated sections; keep heading hierarchy intact. - - Keep each inserted clarification minimal and testable (avoid narrative drift). - -6. Validation (performed after EACH write plus final pass): - - Clarifications session contains exactly one bullet per accepted answer (no duplicates). - - Total asked (accepted) questions ≤ 5. - - Updated sections contain no lingering vague placeholders the new answer was meant to resolve. - - No contradictory earlier statement remains (scan for now-invalid alternative choices removed). - - Markdown structure valid; only allowed new headings: `## Clarifications`, `### Session YYYY-MM-DD`. - - Terminology consistency: same canonical term used across all updated sections. - -7. Write the updated spec back to `FEATURE_SPEC`. - -8. Report completion (after questioning loop ends or early termination): - - Number of questions asked & answered. - - Path to updated spec. - - Sections touched (list names). - - Coverage summary listing each taxonomy category with a status label (Resolved / Deferred / Clear / Outstanding). Present as plain text or bullet list, not a table. - - If any Outstanding or Deferred remain, recommend whether to proceed to `/spec-kitty.plan` or run `/spec-kitty.clarify` again later post-plan. - - Suggested next command. - -Behavior rules: -- If no meaningful ambiguities found (or all potential questions would be low-impact), respond: "No critical ambiguities detected worth formal clarification." and suggest proceeding. -- If spec file missing, instruct user to run `/spec-kitty.specify` first (do not create a new spec here). -- Never exceed 5 total asked questions (clarification retries for a single question do not count as new questions). -- Avoid speculative tech stack questions unless the absence blocks functional clarity. -- Respect user early termination signals ("stop", "done", "proceed"). - - If no questions asked due to full coverage, output a compact coverage summary (all categories Clear) then suggest advancing. - - If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale. - -Context for prioritization: User arguments from $ARGUMENTS section above (if provided). Use these to focus clarification on specific areas of concern mentioned by the user. diff --git a/.cursor/commands/spec-kitty.constitution.md b/.cursor/commands/spec-kitty.constitution.md deleted file mode 100644 index 6c79509b73..0000000000 --- a/.cursor/commands/spec-kitty.constitution.md +++ /dev/null @@ -1,433 +0,0 @@ ---- -description: Create or update the project constitution through interactive phase-based discovery. ---- - -**Path reference rule:** When you mention directories or files, provide either the absolute path or a path relative to the project root (for example, `kitty-specs//tasks/`). Never refer to a folder by name alone. - -*Path: [.kittify/templates/commands/constitution.md](.kittify/templates/commands/constitution.md)* - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - ---- - -## What This Command Does - -This command creates or updates the **project constitution** through an interactive, phase-based discovery workflow. - -**Location**: `.kittify/memory/constitution.md` (project root, not worktrees) -**Scope**: Project-wide principles that apply to ALL features - -**Important**: The constitution is OPTIONAL. All spec-kitty commands work without it. - -**Constitution Purpose**: -- Capture technical standards (languages, testing, deployment) -- Document code quality expectations (review process, quality gates) -- Record tribal knowledge (team conventions, lessons learned) -- Define governance (how the constitution changes, who enforces it) - ---- - -## Discovery Workflow - -This command uses a **4-phase discovery process**: - -1. **Phase 1: Technical Standards** (Recommended) - - Languages, frameworks, testing requirements - - Performance targets, deployment constraints - - ≈3-4 questions, creates a lean foundation - -2. **Phase 2: Code Quality** (Optional) - - PR requirements, review checklist, quality gates - - Documentation standards - - ≈3-4 questions - -3. **Phase 3: Tribal Knowledge** (Optional) - - Team conventions, lessons learned - - Historical decisions (optional) - - ≈2-4 questions - -4. **Phase 4: Governance** (Optional) - - Amendment process, compliance validation - - Exception handling (optional) - - ≈2-3 questions - -**Paths**: -- **Minimal** (≈1 page): Phase 1 only → ≈3-5 questions -- **Comprehensive** (≈2-3 pages): All phases → ≈8-12 questions - ---- - -## Execution Outline - -### Step 1: Initial Choice - -Ask the user: -``` -Do you want to establish a project constitution? - -A) No, skip it - I don't need a formal constitution -B) Yes, minimal - Core technical standards only (≈1 page, 3-5 questions) -C) Yes, comprehensive - Full governance and tribal knowledge (≈2-3 pages, 8-12 questions) -``` - -Handle responses: -- **A (Skip)**: Create a minimal placeholder at `.kittify/memory/constitution.md`: - - Title + short note: "Constitution skipped - not required for spec-kitty usage. Run /spec-kitty.constitution anytime to create one." - - Exit successfully. -- **B (Minimal)**: Continue with Phase 1 only. -- **C (Comprehensive)**: Continue through all phases, asking whether to skip each optional phase. - -### Step 2: Phase 1 - Technical Standards - -Context: -``` -Phase 1: Technical Standards -These are the non-negotiable technical requirements that all features must follow. -This phase is recommended for all projects. -``` - -Ask one question at a time: - -**Q1: Languages and Frameworks** -``` -What languages and frameworks are required for this project? -Examples: -- "Python 3.11+ with FastAPI for backend" -- "TypeScript 4.9+ with React 18 for frontend" -- "Rust 1.70+ with no external dependencies" -``` - -**Q2: Testing Requirements** -``` -What testing framework and coverage requirements? -Examples: -- "pytest with 80% line coverage, 100% for critical paths" -- "Jest with 90% coverage, unit + integration tests required" -- "cargo test, no specific coverage target but all features must have tests" -``` - -**Q3: Performance and Scale Targets** -``` -What are the performance and scale expectations? -Examples: -- "Handle 1000 requests/second at p95 < 200ms" -- "Support 10k concurrent users, 1M daily active users" -- "CLI operations complete in < 2 seconds" -- "N/A - performance not a primary concern" -``` - -**Q4: Deployment and Constraints** -``` -What are the deployment constraints or platform requirements? -Examples: -- "Docker-only, deployed to Kubernetes" -- "Must run on Ubuntu 20.04 LTS without external dependencies" -- "Cross-platform: Linux, macOS, Windows 10+" -- "N/A - no specific deployment constraints" -``` - -### Step 3: Phase 2 - Code Quality (Optional) - -Ask only if comprehensive path is selected: -``` -Phase 2: Code Quality -Skip this if your team uses standard practices without special requirements. - -Do you want to define code quality standards? -A) Yes, ask questions -B) No, skip this phase (use standard practices) -``` - -If yes, ask one at a time: - -**Q5: PR Requirements** -``` -What are the requirements for pull requests? -Examples: -- "2 approvals required, 1 must be from core team" -- "1 approval required, PR must pass CI checks" -- "Self-merge allowed after CI passes for maintainers" -``` - -**Q6: Code Review Checklist** -``` -What should reviewers check during code review? -Examples: -- "Tests added, docstrings updated, follows PEP 8, no security issues" -- "Type annotations present, error handling robust, performance considered" -- "Standard review - correctness, clarity, maintainability" -``` - -**Q7: Quality Gates** -``` -What quality gates must pass before merging? -Examples: -- "All tests pass, coverage ≥80%, linter clean, security scan clean" -- "Tests pass, type checking passes, manual QA approved" -- "CI green, no merge conflicts, PR approved" -``` - -**Q8: Documentation Standards** -``` -What documentation is required? -Examples: -- "All public APIs must have docstrings + examples" -- "README updated for new features, ADRs for architectural decisions" -- "Inline comments for complex logic, keep docs up to date" -- "Minimal - code should be self-documenting" -``` - -### Step 4: Phase 3 - Tribal Knowledge (Optional) - -Ask only if comprehensive path is selected: -``` -Phase 3: Tribal Knowledge -Skip this for new projects or if team conventions are minimal. - -Do you want to capture tribal knowledge? -A) Yes, ask questions -B) No, skip this phase -``` - -If yes, ask: - -**Q9: Team Conventions** -``` -What team conventions or coding styles should everyone follow? -Examples: -- "Use Result for fallible operations, never unwrap() in prod" -- "Prefer composition over inheritance, keep classes small (<200 lines)" -- "Use feature flags for gradual rollouts, never merge half-finished features" -``` - -**Q10: Lessons Learned** -``` -What past mistakes or lessons learned should guide future work? -Examples: -- "Always version APIs from day 1" -- "Write integration tests first" -- "Keep dependencies minimal - every dependency is a liability" -- "N/A - no major lessons yet" -``` - -Optional follow-up: -``` -Do you want to document historical architectural decisions? -A) Yes -B) No -``` - -**Q11: Historical Decisions** (only if yes) -``` -Any historical architectural decisions that should guide future work? -Examples: -- "Chose microservices for independent scaling" -- "Chose monorepo for atomic changes across services" -- "Chose SQLite for simplicity over PostgreSQL" -``` - -### Step 5: Phase 4 - Governance (Optional) - -Ask only if comprehensive path is selected: -``` -Phase 4: Governance -Skip this to use simple defaults. - -Do you want to define governance process? -A) Yes, ask questions -B) No, skip this phase (use simple defaults) -``` - -If skipped, use defaults: -- Amendment: Any team member can propose changes via PR -- Compliance: Team validates during code review -- Exceptions: Discuss with team, document in PR - -If yes, ask: - -**Q12: Amendment Process** -``` -How should the constitution be amended? -Examples: -- "PR with 2 approvals, announce in team chat, 1 week discussion" -- "Any maintainer can update via PR" -- "Quarterly review, team votes on changes" -``` - -**Q13: Compliance Validation** -``` -Who validates that features comply with the constitution? -Examples: -- "Code reviewers check compliance, block merge if violated" -- "Team lead reviews architecture" -- "Self-managed - developers responsible" -``` - -Optional follow-up: -``` -Do you want to define exception handling? -A) Yes -B) No -``` - -**Q14: Exception Handling** (only if yes) -``` -How should exceptions to the constitution be handled? -Examples: -- "Document in ADR, require 3 approvals, set sunset date" -- "Case-by-case discussion, strong justification required" -- "Exceptions discouraged - update constitution instead" -``` - -### Step 6: Summary and Confirmation - -Present a summary and ask for confirmation: -``` -Constitution Summary -==================== - -You've completed [X] phases and answered [Y] questions. -Here's what will be written to .kittify/memory/constitution.md: - -Technical Standards: -- Languages: [Q1] -- Testing: [Q2] -- Performance: [Q3] -- Deployment: [Q4] - -[If Phase 2 completed] -Code Quality: -- PR Requirements: [Q5] -- Review Checklist: [Q6] -- Quality Gates: [Q7] -- Documentation: [Q8] - -[If Phase 3 completed] -Tribal Knowledge: -- Conventions: [Q9] -- Lessons Learned: [Q10] -- Historical Decisions: [Q11 if present] - -Governance: [Custom if Phase 4 completed, otherwise defaults] - -Estimated length: ≈[50-80 lines minimal] or ≈[150-200 lines comprehensive] - -Proceed with writing constitution? -A) Yes, write it -B) No, let me start over -C) Cancel, don't create constitution -``` - -Handle responses: -- **A**: Write the constitution file. -- **B**: Restart from Step 1. -- **C**: Exit without writing. - -### Step 7: Write Constitution File - -Generate the constitution as Markdown: - -```markdown -# [PROJECT_NAME] Constitution - -> Auto-generated by spec-kitty constitution command -> Created: [YYYY-MM-DD] -> Version: 1.0.0 - -## Purpose - -This constitution captures the technical standards, code quality expectations, -tribal knowledge, and governance rules for [PROJECT_NAME]. All features and -pull requests should align with these principles. - -## Technical Standards - -### Languages and Frameworks -[Q1] - -### Testing Requirements -[Q2] - -### Performance and Scale -[Q3] - -### Deployment and Constraints -[Q4] - -[If Phase 2 completed] -## Code Quality - -### Pull Request Requirements -[Q5] - -### Code Review Checklist -[Q6] - -### Quality Gates -[Q7] - -### Documentation Standards -[Q8] - -[If Phase 3 completed] -## Tribal Knowledge - -### Team Conventions -[Q9] - -### Lessons Learned -[Q10] - -[If Q11 present] -### Historical Decisions -[Q11] - -## Governance - -[If Phase 4 completed] -### Amendment Process -[Q12] - -### Compliance Validation -[Q13] - -[If Q14 present] -### Exception Handling -[Q14] - -[If Phase 4 skipped, use defaults] -### Amendment Process -Any team member can propose amendments via pull request. Changes are discussed -and merged following standard PR review process. - -### Compliance Validation -Code reviewers validate compliance during PR review. Constitution violations -should be flagged and addressed before merge. - -### Exception Handling -Exceptions discussed case-by-case with team. Strong justification required. -Consider updating constitution if exceptions become common. -``` - -### Step 8: Success Message - -After writing, provide: -- Location of the file -- Phases completed and questions answered -- Next steps (review, share with team, run /spec-kitty.specify) - ---- - -## Required Behaviors - -- Ask one question at a time. -- Offer skip options and explain when to skip. -- Keep responses concise and user-focused. -- Ensure the constitution stays lean (1-3 pages, not 10 pages). -- If user chooses to skip entirely, still create the minimal placeholder file and exit successfully. diff --git a/.cursor/commands/spec-kitty.dashboard.md b/.cursor/commands/spec-kitty.dashboard.md deleted file mode 100644 index af4eff346a..0000000000 --- a/.cursor/commands/spec-kitty.dashboard.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -description: Open the Spec Kitty dashboard in your browser. ---- - - -## Dashboard Access - -This command launches the Spec Kitty dashboard in your browser using the spec-kitty CLI. - -## What to do - -Simply run the `spec-kitty dashboard` command to: -- Start the dashboard if it's not already running -- Open it in your default web browser -- Display the dashboard URL - -If you need to stop the dashboard, you can use `spec-kitty dashboard --kill`. - -## Implementation - -Execute the following terminal command: - -```bash -spec-kitty dashboard -``` - -## Additional Options - -- To specify a preferred port: `spec-kitty dashboard --port 8080` -- To stop the dashboard: `spec-kitty dashboard --kill` - -## Success Criteria - -- User sees the dashboard URL clearly displayed -- Browser opens automatically to the dashboard -- If browser doesn't open, user gets clear instructions -- Error messages are helpful and actionable diff --git a/.cursor/commands/spec-kitty.implement.md b/.cursor/commands/spec-kitty.implement.md deleted file mode 100644 index cf59f9e163..0000000000 --- a/.cursor/commands/spec-kitty.implement.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -description: Create an isolated workspace (worktree) for implementing a specific work package. ---- - - -## ⚠️ CRITICAL: Working Directory Requirement - -**After running `spec-kitty implement WP##`, you MUST:** - -1. **Run the cd command shown in the output** - e.g., `cd .worktrees/###-feature-WP##/` -2. **ALL file operations happen in this directory** - Read, Write, Edit tools must target files in the workspace -3. **NEVER write deliverable files to the main repository** - This is a critical workflow error - -**Why this matters:** -- Each WP has an isolated worktree with its own branch -- Changes in main repository will NOT be seen by reviewers looking at the WP worktree -- Writing to main instead of the workspace causes review failures and merge conflicts - ---- - -**IMPORTANT**: After running the command below, you'll see a LONG work package prompt (~1000+ lines). - -**You MUST scroll to the BOTTOM** to see the completion command! - -Run this command to get the work package prompt and implementation instructions: - -```bash -spec-kitty agent workflow implement $ARGUMENTS --agent -``` - -**CRITICAL**: You MUST provide `--agent ` to track who is implementing! - -If no WP ID is provided, it will automatically find the first work package with `lane: "planned"` and move it to "doing" for you. - ---- - -## Commit Workflow - -**BEFORE moving to for_review**, you MUST commit your implementation: - -```bash -cd .worktrees/###-feature-WP##/ -git add -A -git commit -m "feat(WP##): " -``` - -**Then move to review:** -```bash -spec-kitty agent tasks move-task WP## --to for_review --note "Ready for review: " -``` - -**Why this matters:** -- `move-task` validates that your worktree has commits beyond main -- Uncommitted changes will block the move to for_review -- This prevents lost work and ensures reviewers see complete implementations - ---- - -**The Python script handles all file updates automatically - no manual editing required!** - -**NOTE**: If `/spec-kitty.status` shows your WP in "doing" after you moved it to "for_review", don't panic - a reviewer may have moved it back (changes requested), or there's a sync delay. Focus on your WP. diff --git a/.cursor/commands/spec-kitty.merge.md b/.cursor/commands/spec-kitty.merge.md deleted file mode 100644 index 9f739a89b4..0000000000 --- a/.cursor/commands/spec-kitty.merge.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -description: Merge a completed feature into the main branch and clean up worktree ---- - - -# /spec-kitty.merge - Merge Feature to Main - -**Version**: 0.11.0+ -**Purpose**: Merge ALL completed work packages for a feature into main branch. - -## CRITICAL: Workspace-per-WP Model (0.11.0) - -In 0.11.0, each work package has its own worktree: -- `.worktrees/###-feature-WP01/` -- `.worktrees/###-feature-WP02/` -- `.worktrees/###-feature-WP03/` - -**Merge merges ALL WP branches at once** (not incrementally one-by-one). - -## ⛔ Location Pre-flight Check (CRITICAL) - -**BEFORE PROCEEDING:** You MUST be in a feature worktree, NOT the main repository. - -Verify your current location: -```bash -pwd -git branch --show-current -``` - -**Expected output:** -- `pwd`: Should end with `.worktrees/###-feature-name-WP01` (or similar feature worktree) -- Branch: Should show your feature branch name like `###-feature-name-WP01` (NOT `main` or `release/*`) - -**If you see:** -- Branch showing `main` or `release/` -- OR pwd shows the main repository root - -⛔ **STOP - DANGER! You are in the wrong location!** - -**Correct the issue:** -1. Navigate to ANY worktree for this feature: `cd .worktrees/###-feature-name-WP01` -2. Verify you're on a feature branch: `git branch --show-current` -3. Then run this merge command again - -**Exception (main branch):** -If you are on `main` and need to merge a workspace-per-WP feature, run: -```bash -spec-kitty merge --feature -``` - ---- - -## Location Pre-flight Check (CRITICAL for AI Agents) - -Before merging, verify you are in the correct working directory by running this validation: - -```bash -python3 -c " -from specify_cli.guards import validate_worktree_location -result = validate_worktree_location() -if not result.is_valid: - print(result.format_error()) - print('\nThis command MUST run from a feature worktree, not the main repository.') - print('\nFor workspace-per-WP features, run from ANY WP worktree:') - print(' cd /path/to/project/.worktrees/-WP01') - print(' # or any other WP worktree for this feature') - raise SystemExit(1) -else: - print('✓ Location verified:', result.branch_name) -" -``` - -**What this validates**: -- Current branch follows the feature pattern like `001-feature-name` or `001-feature-name-WP01` -- You're not attempting to run from `main` or any release branch -- The validator prints clear navigation instructions if you're outside the feature worktree - -**For workspace-per-WP features (0.11.0+)**: -- Run merge from ANY WP worktree (e.g., `.worktrees/014-feature-WP09/`) -- The merge command automatically detects all WP branches and merges them sequentially -- You do NOT need to run merge from each WP worktree individually - -## Prerequisites - -Before running this command: - -1. ✅ All work packages must be in `done` lane (reviewed and approved) -2. ✅ Feature must pass `/spec-kitty.accept` checks -3. ✅ Working directory must be clean (no uncommitted changes in main) -4. ✅ **You must be in main repository root** (not in a worktree) - -## Command Syntax - -```bash -spec-kitty merge ###-feature-slug [OPTIONS] -``` - -**Example**: -```bash -cd /tmp/spec-kitty-test/test-project # Main repo root -spec-kitty merge 001-cli-hello-world -``` - -## What This Command Does - -1. **Detects** your current feature branch and worktree status -2. **Runs** pre-flight validation across all worktrees and the target branch -3. **Determines** merge order based on WP dependencies (workspace-per-WP) -4. **Forecasts** conflicts during `--dry-run` and flags auto-resolvable status files -5. **Verifies** working directory is clean (legacy single-worktree) -6. **Switches** to the target branch (default: `main`) -7. **Updates** the target branch (`git pull --ff-only`) -8. **Merges** the feature using your chosen strategy -9. **Auto-resolves** status file conflicts after each WP merge -10. **Optionally pushes** to origin -11. **Removes** the feature worktree (if in one) -12. **Deletes** the feature branch - -## Usage - -### Basic merge (default: merge commit, cleanup everything) - -```bash -spec-kitty merge -``` - -This will: -- Create a merge commit -- Remove the worktree -- Delete the feature branch -- Keep changes local (no push) - -### Merge with options - -```bash -# Squash all commits into one -spec-kitty merge --strategy squash - -# Push to origin after merging -spec-kitty merge --push - -# Keep the feature branch -spec-kitty merge --keep-branch - -# Keep the worktree -spec-kitty merge --keep-worktree - -# Merge into a different branch -spec-kitty merge --target develop - -# See what would happen without doing it -spec-kitty merge --dry-run - -# Run merge from main for a workspace-per-WP feature -spec-kitty merge --feature 017-feature-slug -``` - -### Common workflows - -```bash -# Feature complete, squash and push -spec-kitty merge --strategy squash --push - -# Keep branch for reference -spec-kitty merge --keep-branch - -# Merge into develop instead of main -spec-kitty merge --target develop --push -``` - -## Merge Strategies - -### `merge` (default) -Creates a merge commit preserving all feature branch commits. -```bash -spec-kitty merge --strategy merge -``` -✅ Preserves full commit history -✅ Clear feature boundaries in git log -❌ More commits in main branch - -### `squash` -Squashes all feature commits into a single commit. -```bash -spec-kitty merge --strategy squash -``` -✅ Clean, linear history on main -✅ Single commit per feature -❌ Loses individual commit details - -### `rebase` -Requires manual rebase first (command will guide you). -```bash -spec-kitty merge --strategy rebase -``` -✅ Linear history without merge commits -❌ Requires manual intervention -❌ Rewrites commit history - -## Options - -| Option | Description | Default | -|--------|-------------|---------| -| `--strategy` | Merge strategy: `merge`, `squash`, or `rebase` | `merge` | -| `--delete-branch` / `--keep-branch` | Delete feature branch after merge | delete | -| `--remove-worktree` / `--keep-worktree` | Remove feature worktree after merge | remove | -| `--push` | Push to origin after merge | no push | -| `--target` | Target branch to merge into | `main` | -| `--dry-run` | Show what would be done without executing | off | -| `--feature` | Feature slug when merging from main branch | none | -| `--resume` | Resume an interrupted merge | off | - -## Worktree Strategy - -Spec Kitty uses an **opinionated worktree approach**: - -### Workspace-per-WP Model (0.11.0+) - -In the current model, each work package gets its own worktree: - -``` -my-project/ # Main repo (main branch) -├── .worktrees/ -│ ├── 001-auth-system-WP01/ # WP01 worktree -│ ├── 001-auth-system-WP02/ # WP02 worktree -│ ├── 001-auth-system-WP03/ # WP03 worktree -│ └── 002-dashboard-WP01/ # Different feature -├── .kittify/ -├── kitty-specs/ -└── ... (main branch files) -``` - -**Merge behavior for workspace-per-WP**: -- Run `spec-kitty merge` from **any** WP worktree for the feature -- The command automatically detects all WP branches (WP01, WP02, WP03, etc.) -- Merges each WP branch into main in sequence -- Cleans up all WP worktrees and branches - -### Legacy Pattern (0.10.x) -``` -my-project/ # Main repo (main branch) -├── .worktrees/ -│ ├── 001-auth-system/ # Feature 1 worktree (single) -│ ├── 002-dashboard/ # Feature 2 worktree (single) -│ └── 003-notifications/ # Feature 3 worktree (single) -├── .kittify/ -├── kitty-specs/ -└── ... (main branch files) -``` - -### The Rules -1. **Main branch** stays in the primary repo root -2. **Feature branches** live in `.worktrees//` -3. **Work on features** happens in their worktrees (isolation) -4. **Merge from worktrees** using this command -5. **Cleanup is automatic** - worktrees removed after merge - -### Why Worktrees? -- ✅ Work on multiple features simultaneously -- ✅ Each feature has its own sandbox -- ✅ No branch switching in main repo -- ✅ Easy to compare features -- ✅ Clean separation of concerns - -### The Flow -``` -1. /spec-kitty.specify → Creates branch + worktree -2. cd .worktrees// → Enter worktree -3. /spec-kitty.plan → Work in isolation -4. /spec-kitty.tasks -5. /spec-kitty.implement -6. /spec-kitty.review -7. /spec-kitty.accept -8. /spec-kitty.merge → Merge + cleanup worktree -9. Back in main repo! → Ready for next feature -``` - -## Error Handling - -### "Already on main branch" -You're not on a feature branch. Switch to your feature branch first: -```bash -cd .worktrees/ -# or -git checkout -``` - -### "Working directory has uncommitted changes" -Commit or stash your changes: -```bash -git add . -git commit -m "Final changes" -# or -git stash -``` - -### "Could not fast-forward main" -Your main branch is behind origin: -```bash -git checkout main -git pull -git checkout -spec-kitty merge -``` - -### "Merge failed - conflicts" -Resolve conflicts manually: -```bash -# Fix conflicts in files -git add -git commit -# Then complete cleanup manually: -git worktree remove .worktrees/ -git branch -d -``` - -## Safety Features - -1. **Clean working directory check** - Won't merge with uncommitted changes -2. **Fast-forward only pull** - Won't proceed if main has diverged -3. **Graceful failure** - If merge fails, you can fix manually -4. **Optional operations** - Push, branch delete, and worktree removal are configurable -5. **Dry run mode** - Preview exactly what will happen - -## Examples - -### Complete feature and push -```bash -cd .worktrees/001-auth-system -/spec-kitty.accept -/spec-kitty.merge --push -``` - -### Squash merge for cleaner history -```bash -spec-kitty merge --strategy squash --push -``` - -### Merge but keep branch for reference -```bash -spec-kitty merge --keep-branch --push -``` - -### Check what will happen first -```bash -spec-kitty merge --dry-run -``` - -## After Merging - -After a successful merge, you're back on the main branch with: -- ✅ Feature code integrated -- ✅ Worktree removed (if it existed) -- ✅ Feature branch deleted (unless `--keep-branch`) -- ✅ Ready to start your next feature! - -## Integration with Accept - -The typical flow is: - -```bash -# 1. Run acceptance checks -/spec-kitty.accept --mode local - -# 2. If checks pass, merge -/spec-kitty.merge --push -``` - -Or combine conceptually: -```bash -# Accept verifies readiness -/spec-kitty.accept --mode local - -# Merge performs integration -/spec-kitty.merge --strategy squash --push -``` - -The `/spec-kitty.accept` command **verifies** your feature is complete. -The `/spec-kitty.merge` command **integrates** your feature into main. - -Together they complete the workflow: -``` -specify → plan → tasks → implement → review → accept → merge ✅ -``` diff --git a/.cursor/commands/spec-kitty.plan.md b/.cursor/commands/spec-kitty.plan.md deleted file mode 100644 index 36e2de1874..0000000000 --- a/.cursor/commands/spec-kitty.plan.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -description: Execute the implementation planning workflow using the plan template to generate design artifacts. ---- - - -# /spec-kitty.plan - Create Implementation Plan - -**Version**: 0.11.0+ - -## 📍 WORKING DIRECTORY: Stay in planning repository - -**IMPORTANT**: Plan works in the planning repository. NO worktrees created. - -```bash -# Run from project root (same directory as /spec-kitty.specify): -# You should already be here if you just ran /spec-kitty.specify - -# Creates: -# - kitty-specs/###-feature/plan.md → In planning repository -# - Commits to target branch -# - NO worktrees created -``` - -**Do NOT cd anywhere**. Stay in the planning repository root. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Location Check (0.11.0+) - -This command runs in the **planning repository**, not in a worktree. - -- Verify you're on the target branch (meta.json → target_branch) before scaffolding plan.md -- Planning artifacts live in `kitty-specs/###-feature/` -- The plan template is committed to the target branch after generation - -**Path reference rule:** When you mention directories or files, provide either the absolute path or a path relative to the project root (for example, `kitty-specs//tasks/`). Never refer to a folder by name alone. - -## Planning Interrogation (mandatory) - -Before executing any scripts or generating artifacts you must interrogate the specification and stakeholders. - -- **Scope proportionality (CRITICAL)**: FIRST, assess the feature's complexity from the spec: - - **Trivial/Test Features** (hello world, simple static pages, basic demos): Ask 1-2 questions maximum about tech stack preference, then proceed with sensible defaults - - **Simple Features** (small components, minor API additions): Ask 2-3 questions about tech choices and constraints - - **Complex Features** (new subsystems, multi-component features): Ask 3-5 questions covering architecture, NFRs, integrations - - **Platform/Critical Features** (core infrastructure, security, payments): Full interrogation with 5+ questions - -- **User signals to reduce questioning**: If the user says "use defaults", "just make it simple", "skip to implementation", "vanilla HTML/CSS/JS" - recognize these as signals to minimize planning questions and use standard approaches. - -- **First response rule**: - - For TRIVIAL features: Ask ONE tech stack question, then if answer is simple (e.g., "vanilla HTML"), proceed directly to plan generation - - For other features: Ask a single architecture question and end with `WAITING_FOR_PLANNING_INPUT` - -- If the user has not provided plan context, keep interrogating with one question at a time. - -- **Conversational cadence**: After each reply, assess if you have SUFFICIENT context for this feature's scope. For trivial features, knowing the basic stack is enough. Only continue if critical unknowns remain. - -Planning requirements (scale to complexity): - -1. Maintain a **Planning Questions** table internally covering questions appropriate to the feature's complexity (1-2 for trivial, up to 5+ for platform-level). Track columns `#`, `Question`, `Why it matters`, and `Current insight`. Do **not** render this table to the user. -2. For trivial features, standard practices are acceptable (vanilla HTML, simple file structure, no build tools). Only probe if the user's request suggests otherwise. -3. When you have sufficient context for the scope, summarize into an **Engineering Alignment** note and confirm. -4. If user explicitly asks to skip questions or use defaults, acknowledge and proceed with best practices for that feature type. - -## Outline - -1. **Check planning discovery status**: - - If any planning questions remain unanswered or the user has not confirmed the **Engineering Alignment** summary, stay in the one-question cadence, capture the user's response, update your internal table, and end with `WAITING_FOR_PLANNING_INPUT`. Do **not** surface the table. Do **not** run the setup command yet. - - Once every planning question has a concrete answer and the alignment summary is confirmed by the user, continue. - -2. **Detect feature context** (CRITICAL - prevents wrong feature selection): - - Before running any commands, detect which feature you're working on: - - a. **Check git branch name**: - - Run: `git rev-parse --abbrev-ref HEAD` - - If branch matches pattern `###-feature-name` or `###-feature-name-WP##`, extract the feature slug (strip `-WP##` suffix if present) - - Example: Branch `020-my-feature` or `020-my-feature-WP01` → Feature `020-my-feature` - - b. **Check current directory**: - - Look for `###-feature-name` pattern in the current path - - Examples: - - Inside `kitty-specs/020-my-feature/` → Feature `020-my-feature` - - Not in a worktree during planning (worktrees only used during implement): If detection runs from `.worktrees/020-my-feature-WP01/` → Feature `020-my-feature` - - c. **Prioritize features without plan.md** (if multiple exist): - - If multiple features exist and none detected from branch/path, list all features in `kitty-specs/` - - Prefer features that don't have `plan.md` yet (unplanned features) - - If ambiguous, ask the user which feature to plan - - d. **Extract feature slug**: - - Feature slug format: `###-feature-name` (e.g., `020-my-feature`) - - You MUST pass this explicitly to the setup-plan command using `--feature` flag - - **DO NOT** rely on auto-detection by the CLI (prevents wrong feature selection) - -3. **Setup**: Run `spec-kitty agent feature setup-plan --feature --json` from the repository root and parse JSON for: - - `result`: "success" or error message - - `plan_file`: Absolute path to the created plan.md - - `feature_dir`: Absolute path to the feature directory - - **Example**: - ```bash - # If detected feature is 020-my-feature: - spec-kitty agent feature setup-plan --feature 020-my-feature --json - ``` - - **Error handling**: If the command fails with "Cannot detect feature" or "Multiple features found", verify your feature detection logic in step 2 and ensure you're passing the correct feature slug. - -4. **Load context**: Read FEATURE_SPEC and `.kittify/memory/constitution.md` if it exists. If the constitution file is missing, skip Constitution Check and note that it is absent. Load IMPL_PLAN template (already copied). - -5. **Execute plan workflow**: Follow the structure in IMPL_PLAN template, using the validated planning answers as ground truth: - - Update Technical Context with explicit statements from the user or discovery research; mark `[NEEDS CLARIFICATION: …]` only when the user deliberately postpones a decision - - If a constitution exists, fill Constitution Check section from it and challenge any conflicts directly with the user. If no constitution exists, mark the section as skipped. - - Evaluate gates (ERROR if violations unjustified or questions remain unanswered) - - Phase 0: Generate research.md (commission research to resolve every outstanding clarification) - - Phase 1: Generate data-model.md, contracts/, quickstart.md based on confirmed intent - - Phase 1: Update agent context by running the agent script - - Re-evaluate Constitution Check post-design, asking the user to resolve new gaps before proceeding - -6. **STOP and report**: This command ends after Phase 1 planning. Report branch, IMPL_PLAN path, and generated artifacts. - - **⚠️ CRITICAL: DO NOT proceed to task generation!** The user must explicitly run `/spec-kitty.tasks` to generate work packages. Your job is COMPLETE after reporting the planning artifacts. - -## Phases - -### Phase 0: Outline & Research - -1. **Extract unknowns from Technical Context** above: - - For each NEEDS CLARIFICATION → research task - - For each dependency → best practices task - - For each integration → patterns task - -2. **Generate and dispatch research agents**: - ``` - For each unknown in Technical Context: - Task: "Research {unknown} for {feature context}" - For each technology choice: - Task: "Find best practices for {tech} in {domain}" - ``` - -3. **Consolidate findings** in `research.md` using format: - - Decision: [what was chosen] - - Rationale: [why chosen] - - Alternatives considered: [what else evaluated] - -**Output**: research.md with all NEEDS CLARIFICATION resolved - -### Phase 1: Design & Contracts - -**Prerequisites:** `research.md` complete - -1. **Extract entities from feature spec** → `data-model.md`: - - Entity name, fields, relationships - - Validation rules from requirements - - State transitions if applicable - -2. **Generate API contracts** from functional requirements: - - For each user action → endpoint - - Use standard REST/GraphQL patterns - - Output OpenAPI/GraphQL schema to `/contracts/` - -3. **Agent context update**: - - Run `` - - These scripts detect which AI agent is in use - - Update the appropriate agent-specific context file - - Add only new technology from current plan - - Preserve manual additions between markers - -**Output**: data-model.md, /contracts/*, quickstart.md, agent-specific file - -## Key rules - -- Use absolute paths -- ERROR on gate failures or unresolved clarifications - ---- - -## ⛔ MANDATORY STOP POINT - -**This command is COMPLETE after generating planning artifacts.** - -After reporting: -- `plan.md` path -- `research.md` path (if generated) -- `data-model.md` path (if generated) -- `contracts/` contents (if generated) -- Agent context file updated - -**YOU MUST STOP HERE.** - -Do NOT: -- ❌ Generate `tasks.md` -- ❌ Create work package (WP) files -- ❌ Create `tasks/` subdirectories -- ❌ Proceed to implementation - -The user will run `/spec-kitty.tasks` when they are ready to generate work packages. - -**Next suggested command**: `/spec-kitty.tasks` (user must invoke this explicitly) diff --git a/.cursor/commands/spec-kitty.research.md b/.cursor/commands/spec-kitty.research.md deleted file mode 100644 index b6bdff8ea7..0000000000 --- a/.cursor/commands/spec-kitty.research.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: Run the Phase 0 research workflow to scaffold research artifacts before task planning. ---- - -**Path reference rule:** When you mention directories or files, provide either the absolute path or a path relative to the project root (for example, `kitty-specs//tasks/`). Never refer to a folder by name alone. - - -*Path: [.kittify/templates/commands/research.md](.kittify/templates/commands/research.md)* - - -## Location Pre-flight Check - -**BEFORE PROCEEDING:** Verify you are working in the feature worktree. - -```bash -pwd -git branch --show-current -``` - -**Expected output:** -- `pwd`: Should end with `.worktrees/001-feature-name` (or similar feature worktree) -- Branch: Should show your feature branch name like `001-feature-name` (NOT `main`) - -**If you see the main branch or main repository path:** - -⛔ **STOP - You are in the wrong location!** - -This command creates research artifacts in your feature directory. You must be in the feature worktree. - -**Correct the issue:** -1. Navigate to your feature worktree: `cd .worktrees/001-feature-name` -2. Verify you're on the correct feature branch: `git branch --show-current` -3. Then run this research command again - ---- - -## What This Command Creates - -When you run `spec-kitty research`, the following files are generated in your feature directory: - -**Generated files**: -- **research.md** – Decisions, rationale, and supporting evidence -- **data-model.md** – Entities, attributes, and relationships -- **research/evidence-log.csv** – Sources and findings audit trail -- **research/source-register.csv** – Reference tracking for all sources - -**Location**: All files go in `kitty-specs/001-feature-name/` - ---- - -## Workflow Context - -**Before this**: `/spec-kitty.plan` calls this as "Phase 0" research phase - -**This command**: -- Scaffolds research artifacts -- Creates templates for capturing decisions and evidence -- Establishes audit trail for traceability - -**After this**: -- Fill in research.md, data-model.md, and CSV logs with actual findings -- Continue with `/spec-kitty.plan` which uses your research to drive technical design - ---- - -## Goal - -Create `research.md`, `data-model.md`, and supporting CSV stubs based on the active mission so implementation planning can reference concrete decisions and evidence. - -## What to do - -1. You should already be in the correct feature worktree (verified above with pre-flight check). -2. Run `spec-kitty research` to generate the mission-specific research artifacts. (Add `--force` only when it is acceptable to overwrite existing drafts.) -3. Open the generated files and fill in the required content: - - `research.md` – capture decisions, rationale, and supporting evidence. - - `data-model.md` – document entities, attributes, and relationships discovered during research. - - `research/evidence-log.csv` & `research/source-register.csv` – log all sources and findings so downstream reviewers can audit the trail. -4. If your research generates additional templates (spreadsheets, notebooks, etc.), store them under `research/` and reference them inside `research.md`. -5. Summarize open questions or risks at the bottom of `research.md`. These should feed directly into `/spec-kitty.tasks` and future implementation prompts. - -## Success Criteria - -- `kitty-specs//research.md` explains every major decision with references to evidence. -- `kitty-specs//data-model.md` lists the entities and relationships needed for implementation. -- CSV logs exist (even if partially filled) so evidence gathering is traceable. -- Outstanding questions from the research phase are tracked and ready for follow-up during planning or execution. diff --git a/.cursor/commands/spec-kitty.review.md b/.cursor/commands/spec-kitty.review.md deleted file mode 100644 index fde47891fc..0000000000 --- a/.cursor/commands/spec-kitty.review.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -description: Perform structured code review and kanban transitions for completed task prompt files ---- - - -**IMPORTANT**: After running the command below, you'll see a LONG work package prompt (~1000+ lines). - -**You MUST scroll to the BOTTOM** to see the completion commands! - -Run this command to get the work package prompt and review instructions: - -```bash -spec-kitty agent workflow review $ARGUMENTS --agent -``` - -**CRITICAL**: You MUST provide `--agent ` to track who is reviewing! - -If no WP ID is provided, it will automatically find the first work package with `lane: "for_review"` and move it to "doing" for you. - -## Dependency checks (required) - -- dependency_check: If the WP frontmatter lists `dependencies`, confirm each dependency WP is merged to main before you review this WP. -- dependent_check: Identify any WPs that list this WP as a dependency and note their current lanes. -- rebase_warning: If you request changes AND any dependents exist, warn those agents to rebase and provide a concrete command (example: `cd .worktrees/FEATURE-WP02 && git rebase FEATURE-WP01`). -- verify_instruction: Confirm dependency declarations match actual code coupling (imports, shared modules, API contracts). - -**After reviewing, scroll to the bottom and run ONE of these commands**: -- ✅ Approve: `spec-kitty agent tasks move-task WP## --to done --note "Review passed: "` -- ❌ Reject: Write feedback to the temp file path shown in the prompt, then run `spec-kitty agent tasks move-task WP## --to planned --review-feedback-file ` - -**The prompt will provide a unique temp file path for feedback - use that exact path to avoid conflicts with other agents!** - -**The Python script handles all file updates automatically - no manual editing required!** diff --git a/.cursor/commands/spec-kitty.specify.md b/.cursor/commands/spec-kitty.specify.md deleted file mode 100644 index cc2735849c..0000000000 --- a/.cursor/commands/spec-kitty.specify.md +++ /dev/null @@ -1,328 +0,0 @@ ---- -description: Create or update the feature specification from a natural language feature description. ---- - - -# /spec-kitty.specify - Create Feature Specification - -**Version**: 0.11.0+ - -## 📍 WORKING DIRECTORY: Stay in planning repository - -**IMPORTANT**: Specify works in the planning repository. NO worktrees are created. - -```bash -# Run from project root: -cd /path/to/project/root # Your planning repository - -# All planning artifacts are created in the planning repo and committed: -# - kitty-specs/###-feature/spec.md → Created in planning repo -# - Committed to target branch (meta.json → target_branch) -# - NO worktrees created -``` - -**Worktrees are created later** during `/spec-kitty.implement`, not during planning. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Discovery Gate (mandatory) - -Before running any scripts or writing to disk you **must** conduct a structured discovery interview. - -- **Scope proportionality (CRITICAL)**: FIRST, gauge the inherent complexity of the request: - - **Trivial/Test Features** (hello world, simple pages, proof-of-concept): Ask 1-2 questions maximum, then proceed. Examples: "a simple hello world page", "tic-tac-toe game", "basic contact form" - - **Simple Features** (small UI additions, minor enhancements): Ask 2-3 questions covering purpose and basic constraints - - **Complex Features** (new subsystems, integrations): Ask 3-5 questions covering goals, users, constraints, risks - - **Platform/Critical Features** (authentication, payments, infrastructure): Full discovery with 5+ questions - -- **User signals to reduce questioning**: If the user says "just testing", "quick prototype", "skip to next phase", "stop asking questions" - recognize this as a signal to minimize discovery and proceed with reasonable defaults. - -- **First response rule**: - - For TRIVIAL features (hello world, simple test): Ask ONE clarifying question, then if the answer confirms it's simple, proceed directly to spec generation - - For other features: Ask a single focused discovery question and end with `WAITING_FOR_DISCOVERY_INPUT` - -- If the user provides no initial description (empty command), stay in **Interactive Interview Mode**: keep probing with one question at a time. - -- **Conversational cadence**: After each user reply, decide if you have ENOUGH context for this feature's complexity level. For trivial features, 1-2 questions is sufficient. Only continue asking if truly necessary for the scope. - -Discovery requirements (scale to feature complexity): - -1. Maintain a **Discovery Questions** table internally covering questions appropriate to the feature's complexity (1-2 for trivial, up to 5+ for complex). Track columns `#`, `Question`, `Why it matters`, and `Current insight`. Do **not** render this table to the user. -2. For trivial features, reasonable defaults are acceptable. Only probe if truly ambiguous. -3. When you have sufficient context for the feature's scope, paraphrase into an **Intent Summary** and confirm. For trivial features, this can be very brief. -4. If user explicitly asks to skip questions or says "just testing", acknowledge and proceed with minimal discovery. - -## Mission Selection - -After completing discovery and confirming the Intent Summary, determine the appropriate mission for this feature. - -### Available Missions - -- **software-dev**: For building software features, APIs, CLI tools, applications - - Phases: research → design → implement → test → review - - Best for: code changes, new features, bug fixes, refactoring - -- **research**: For investigations, literature reviews, technical analysis - - Phases: question → methodology → gather → analyze → synthesize → publish - - Best for: feasibility studies, market research, technology evaluation - -### Mission Inference - -1. **Analyze the feature description** to identify the primary goal: - - Building, coding, implementing, creating software → **software-dev** - - Researching, investigating, analyzing, evaluating → **research** - -2. **Check for explicit mission requests** in the user's description: - - If user mentions "research project", "investigation", "analysis" → use research - - If user mentions "build", "implement", "create feature" → use software-dev - -3. **Confirm with user** (unless explicit): - > "Based on your description, this sounds like a **[software-dev/research]** project. - > I'll use the **[mission name]** mission. Does that work for you?" - -4. **Handle user response**: - - If confirmed: proceed with selected mission - - If user wants different mission: use their choice - -5. **Handle --mission flag**: If the user provides `--mission ` in their command, skip inference and use the specified mission directly. - -Store the final mission selection in your notes and include it in the spec output. Do not pass a `--mission` flag to feature creation. - -## Workflow (0.11.0+) - -**Planning happens in the planning repository - NO worktree created!** - -1. Creates `kitty-specs/###-feature/spec.md` directly in planning repo -2. Automatically commits to target branch -3. No worktree created during specify - -**Worktrees created later**: Use `spec-kitty implement WP##` to create a workspace for each work package. Worktrees are created later during implement (e.g., `.worktrees/###-feature-WP##`). - -## Location - -- Work in: **Planning repository** (not a worktree) -- Creates: `kitty-specs/###-feature/spec.md` -- Commits to: target branch (`meta.json` → `target_branch`) - -## Outline - -### 0. Generate a Friendly Feature Title - -- Summarize the agreed intent into a short, descriptive title (aim for ≤7 words; avoid filler like "feature" or "thing"). -- Read that title back during the Intent Summary and revise it if the user requests changes. -- Use the confirmed title to derive the kebab-case feature slug for the create-feature command. - -The text the user typed after `/spec-kitty.specify` in the triggering message **is** the initial feature description. Capture it verbatim, but treat it only as a starting point for discovery—not the final truth. Your job is to interrogate the request, surface gaps, and co-create a complete specification with the user. - -Given that feature description, do this: - -- **Generation Mode (arguments provided)**: Use the provided text as a starting point, validate it through discovery, and fill gaps with explicit questions or clearly documented assumptions (limit `[NEEDS CLARIFICATION: …]` to at most three critical decisions the user has postponed). -- **Interactive Interview Mode (no arguments)**: Use the discovery interview to elicit all necessary context, synthesize the working feature description, and confirm it with the user before you generate any specification artifacts. - -1. **Check discovery status**: - - If this is your first message or discovery questions remain unanswered, stay in the one-question loop, capture the user's response, update your internal table, and end with `WAITING_FOR_DISCOVERY_INPUT`. Do **not** surface the table; keep it internal. Do **not** call the creation command yet. - - Only proceed once every discovery question has an explicit answer and the user has acknowledged the Intent Summary. - - Empty invocation rule: stay in interview mode until you can restate the agreed-upon feature description. Do **not** call the creation command while the description is missing or provisional. - -2. When discovery is complete and the intent summary, **title**, and **mission** are confirmed, run the feature creation command from repo root: - - ```bash - spec-kitty agent feature create-feature "" --json - ``` - - Where `` is a kebab-case version of the friendly title (e.g., "Checkout Upsell Flow" → "checkout-upsell-flow"). - - The command returns JSON with: - - `result`: "success" or error message - - `feature`: Feature number and slug (e.g., "014-checkout-upsell-flow") - - `feature_dir`: Absolute path to the feature directory inside the main repo - - Parse these values for use in subsequent steps. All file paths are absolute. - - **IMPORTANT**: You must only ever run this command once. The JSON is provided in the terminal output - always refer to it to get the actual paths you're looking for. -3. **Stay in the main repository**: No worktree is created during specify. - -4. The spec template is bundled with spec-kitty at `src/specify_cli/missions/software-dev/.kittify/templates/spec-template.md`. The template defines required sections for software development features. - -5. Create meta.json in the feature directory with: - ```json - { - "feature_number": "", - "slug": "", - "friendly_name": "", - "mission": "", - "source_description": "$ARGUMENTS", - "created_at": "", - "target_branch": "main", - "vcs": "git" - } - ``` - - **CRITICAL**: Always set these fields explicitly: - - `target_branch`: Set to "main" by default (user can change to "2.x" for dual-branch features) - - `vcs`: Set to "git" by default (enables VCS locking and prevents jj fallback) - -6. Generate the specification content by following this flow: - - Use the discovery answers as your authoritative source of truth (do **not** rely on raw `$ARGUMENTS`) - - For empty invocations, treat the synthesized interview summary as the canonical feature description - - Identify: actors, actions, data, constraints, motivations, success metrics - - For any remaining ambiguity: - * Ask the user a focused follow-up question immediately and halt work until they answer - * Only use `[NEEDS CLARIFICATION: …]` when the user explicitly defers the decision - * Record any interim assumption in the Assumptions section - * Prioritize clarifications by impact: scope > outcomes > risks/security > user experience > technical details - - Fill User Scenarios & Testing section (ERROR if no clear user flow can be determined) - - Generate Functional Requirements (each requirement must be testable) - - Define Success Criteria (measurable, technology-agnostic outcomes) - - Identify Key Entities (if data involved) - -7. Write the specification to `/spec.md` using the template structure, replacing placeholders with concrete details derived from the feature description while preserving section order and headings. - -8. **Specification Quality Validation**: After writing the initial spec, validate it against quality criteria: - - a. **Create Spec Quality Checklist**: Generate a checklist file at `FEATURE_DIR/checklists/requirements.md` using the checklist template structure with these validation items: - - ```markdown - # Specification Quality Checklist: [FEATURE NAME] - - **Purpose**: Validate specification completeness and quality before proceeding to planning - **Created**: [DATE] - **Feature**: [Link to spec.md] - - ## Content Quality - - - [ ] No implementation details (languages, frameworks, APIs) - - [ ] Focused on user value and business needs - - [ ] Written for non-technical stakeholders - - [ ] All mandatory sections completed - - ## Requirement Completeness - - - [ ] No [NEEDS CLARIFICATION] markers remain - - [ ] Requirements are testable and unambiguous - - [ ] Success criteria are measurable - - [ ] Success criteria are technology-agnostic (no implementation details) - - [ ] All acceptance scenarios are defined - - [ ] Edge cases are identified - - [ ] Scope is clearly bounded - - [ ] Dependencies and assumptions identified - - ## Feature Readiness - - - [ ] All functional requirements have clear acceptance criteria - - [ ] User scenarios cover primary flows - - [ ] Feature meets measurable outcomes defined in Success Criteria - - [ ] No implementation details leak into specification - - ## Notes - - - Items marked incomplete require spec updates before `/spec-kitty.clarify` or `/spec-kitty.plan` - ``` - - b. **Run Validation Check**: Review the spec against each checklist item: - - For each item, determine if it passes or fails - - Document specific issues found (quote relevant spec sections) - - c. **Handle Validation Results**: - - - **If all items pass**: Mark checklist complete and proceed to step 6 - - - **If items fail (excluding [NEEDS CLARIFICATION])**: - 1. List the failing items and specific issues - 2. Update the spec to address each issue - 3. Re-run validation until all items pass (max 3 iterations) - 4. If still failing after 3 iterations, document remaining issues in checklist notes and warn user - - - **If [NEEDS CLARIFICATION] markers remain**: - 1. Extract all [NEEDS CLARIFICATION: ...] markers from the spec - 2. Re-confirm with the user whether each outstanding decision truly needs to stay unresolved. Do not assume away critical gaps. - 3. For each clarification the user has explicitly deferred, present options using plain text—no tables: - - ``` - Question [N]: [Topic] - Context: [Quote relevant spec section] - Need: [Specific question from NEEDS CLARIFICATION marker] - Options: (A) [First answer — implications] · (B) [Second answer — implications] · (C) [Third answer — implications] · (D) Custom (describe your own answer) - Reply with a letter or a custom answer. - ``` - - 4. Number questions sequentially (Q1, Q2, Q3 - max 3 total) - 5. Present all questions together before waiting for responses - 6. Wait for user to respond with their choices for all questions (e.g., "Q1: A, Q2: Custom - [details], Q3: B") - 7. Update the spec by replacing each [NEEDS CLARIFICATION] marker with the user's selected or provided answer - 9. Re-run validation after all clarifications are resolved - - d. **Update Checklist**: After each validation iteration, update the checklist file with current pass/fail status - -9. Report completion with feature directory, spec file path, checklist results, and readiness for the next phase (`/spec-kitty.clarify` or `/spec-kitty.plan`). - -**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing. - -## General Guidelines - -## Quick Guidelines - -- Focus on **WHAT** users need and **WHY**. -- Avoid HOW to implement (no tech stack, APIs, code structure). -- Written for business stakeholders, not developers. -- DO NOT create any checklists that are embedded in the spec. That will be a separate command. - -### Section Requirements - -- **Mandatory sections**: Must be completed for every feature -- **Optional sections**: Include only when relevant to the feature -- When a section doesn't apply, remove it entirely (don't leave as "N/A") - -### For AI Generation - -When creating this spec from a user prompt: - -1. **Make informed guesses**: Use context, industry standards, and common patterns to fill gaps -2. **Document assumptions**: Record reasonable defaults in the Assumptions section -3. **Limit clarifications**: Maximum 3 [NEEDS CLARIFICATION] markers - use only for critical decisions that: - - Significantly impact feature scope or user experience - - Have multiple reasonable interpretations with different implications - - Lack any reasonable default -4. **Prioritize clarifications**: scope > security/privacy > user experience > technical details -5. **Think like a tester**: Every vague requirement should fail the "testable and unambiguous" checklist item -6. **Common areas needing clarification** (only if no reasonable default exists): - - Feature scope and boundaries (include/exclude specific use cases) - - User types and permissions (if multiple conflicting interpretations possible) - - Security/compliance requirements (when legally/financially significant) - -**Examples of reasonable defaults** (don't ask about these): - -- Data retention: Industry-standard practices for the domain -- Performance targets: Standard web/mobile app expectations unless specified -- Error handling: User-friendly messages with appropriate fallbacks -- Authentication method: Standard session-based or OAuth2 for web apps -- Integration patterns: RESTful APIs unless specified otherwise - -### Success Criteria Guidelines - -Success criteria must be: - -1. **Measurable**: Include specific metrics (time, percentage, count, rate) -2. **Technology-agnostic**: No mention of frameworks, languages, databases, or tools -3. **User-focused**: Describe outcomes from user/business perspective, not system internals -4. **Verifiable**: Can be tested/validated without knowing implementation details - -**Good examples**: - -- "Users can complete checkout in under 3 minutes" -- "System supports 10,000 concurrent users" -- "95% of searches return results in under 1 second" -- "Task completion rate improves by 40%" - -**Bad examples** (implementation-focused): - -- "API response time is under 200ms" (too technical, use "Users see results instantly") -- "Database can handle 1000 TPS" (implementation detail, use user-facing metric) -- "React components render efficiently" (framework-specific) -- "Redis cache hit rate above 80%" (technology-specific) diff --git a/.cursor/commands/spec-kitty.status.md b/.cursor/commands/spec-kitty.status.md deleted file mode 100644 index 8776b1ca64..0000000000 --- a/.cursor/commands/spec-kitty.status.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -description: Display kanban board status showing work package progress across lanes (planned/doing/for_review/done). ---- - - -## Status Board - -Show the current status of all work packages in the active feature. This displays: -- Kanban board with WPs organized by lane -- Progress bar showing completion percentage -- Parallelization opportunities (which WPs can run concurrently) -- Next steps recommendations - -## When to Use - -- Before starting work (see what's ready to implement) -- During implementation (track overall progress) -- After completing a WP (see what's next) -- When planning parallelization (identify independent WPs) - -## Implementation - -Run the CLI command to display the status board: - -```bash -spec-kitty agent tasks status -``` - -To specify a feature explicitly: - -```bash -spec-kitty agent tasks status --feature 012-documentation-mission -``` - -The command displays a rich kanban board with: -- Progress bar showing completion percentage -- Work packages organized by lane (planned/doing/for_review/done) -- Summary metrics - -## Alternative: Python API - -For programmatic access (e.g., in Jupyter notebooks or scripts), use the Python function: - -```python -from specify_cli.agent_utils.status import show_kanban_status - -# Auto-detect feature from current directory/branch -result = show_kanban_status() - -# Or specify feature explicitly: -# result = show_kanban_status("012-documentation-mission") -``` - -Returns structured data: - -```python -{ - 'feature_slug': '012-documentation-mission', - 'progress_percentage': 80.0, - 'done_count': 8, - 'total_wps': 10, - 'by_lane': { - 'planned': ['WP09'], - 'doing': ['WP10'], - 'for_review': [], - 'done': ['WP01', 'WP02', ...] - }, - 'parallelization': { - 'ready_wps': [...], - 'can_parallelize': True/False, - 'parallel_groups': [...] - } -} - -## Output Example - -``` -╭─────────────────────────────────────────────────────────────────────╮ -│ 012-documentation-mission │ -│ Progress: 80% [████████░░] │ -╰─────────────────────────────────────────────────────────────────────╯ - -┌─────────────┬─────────────┬─────────────┬─────────────┐ -│ PLANNED │ DOING │ FOR_REVIEW │ DONE │ -├─────────────┼─────────────┼─────────────┼─────────────┤ -│ WP09 │ WP10 │ │ WP01 │ -│ │ │ │ WP02 │ -│ │ │ │ WP03 │ -│ │ │ │ ... │ -└─────────────┴─────────────┴─────────────┴─────────────┘ - -🔀 Parallelization: WP09 can start (no dependencies) -``` diff --git a/.cursor/commands/spec-kitty.tasks.md b/.cursor/commands/spec-kitty.tasks.md deleted file mode 100644 index e170ee580e..0000000000 --- a/.cursor/commands/spec-kitty.tasks.md +++ /dev/null @@ -1,577 +0,0 @@ ---- -description: Generate grouped work packages with actionable subtasks and matching prompt files for the feature in one pass. ---- - - -# /spec-kitty.tasks - Generate Work Packages - -**Version**: 0.11.0+ - -## ⚠️ CRITICAL: THIS IS THE MOST IMPORTANT PLANNING WORK - -**You are creating the blueprint for implementation**. The quality of work packages determines: -- How easily agents can implement the feature -- How parallelizable the work is -- How reviewable the code will be -- Whether the feature succeeds or fails - -**QUALITY OVER SPEED**: This is NOT the time to save tokens or rush. Take your time to: -- Understand the full scope deeply -- Break work into clear, manageable pieces -- Write detailed, actionable guidance -- Think through risks and edge cases - -**Token usage is EXPECTED and GOOD here**. A thorough task breakdown saves 10x the effort during implementation. Do not cut corners. - ---- - -## 📍 WORKING DIRECTORY: Stay in planning repository - -**IMPORTANT**: Tasks works in the planning repository. NO worktrees created. - -```bash -# Run from project root (same directory as /spec-kitty.plan): -# You should already be here if you just ran /spec-kitty.plan - -# Creates: -# - kitty-specs/###-feature/tasks/WP01-*.md → In planning repository -# - kitty-specs/###-feature/tasks/WP02-*.md → In planning repository -# - Commits ALL to target branch -# - NO worktrees created -``` - -**Do NOT cd anywhere**. Stay in the planning repository root. - -**Worktrees created later**: After tasks are generated, use `spec-kitty implement WP##` to create workspace for each WP. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Location Check (0.11.0+) - -Before proceeding, verify you are in the planning repository: - -**Check your current branch:** -```bash -git branch --show-current -``` - -**Expected output:** the target branch (meta.json → target_branch), typically `main` or `2.x` -**If you see a feature branch:** You're in the wrong place. Return to the target branch: -```bash -cd $(git rev-parse --show-toplevel) -git checkout -``` - -Work packages are generated directly in `kitty-specs/###-feature/` and committed to the target branch. Worktrees are created later when implementing each work package. - -## Outline - -1. **Setup**: Run `spec-kitty agent feature check-prerequisites --json --paths-only --include-tasks` from the repository root and capture `FEATURE_DIR` plus `AVAILABLE_DOCS`. All paths must be absolute. - - **CRITICAL**: The command returns JSON with `FEATURE_DIR` as an ABSOLUTE path (e.g., `/Users/robert/Code/new_specify/kitty-specs/001-feature-name`). - - **YOU MUST USE THIS PATH** for ALL subsequent file operations. Example: - ``` - FEATURE_DIR = "/Users/robert/Code/new_specify/kitty-specs/001-a-simple-hello" - tasks.md location: FEATURE_DIR + "/tasks.md" - prompt location: FEATURE_DIR + "/tasks/WP01-slug.md" - ``` - - **DO NOT CREATE** paths like: - - ❌ `tasks/WP01-slug.md` (missing FEATURE_DIR prefix) - - ❌ `/tasks/WP01-slug.md` (wrong root) - - ❌ `FEATURE_DIR/tasks/planned/WP01-slug.md` (WRONG - no subdirectories!) - - ❌ `WP01-slug.md` (wrong directory) - -2. **Load design documents** from `FEATURE_DIR` (only those present): - - **Required**: plan.md (tech architecture, stack), spec.md (user stories & priorities) - - **Optional**: data-model.md (entities), contracts/ (API schemas), research.md (decisions), quickstart.md (validation scenarios) - - Scale your effort to the feature: simple UI tweaks deserve lighter coverage, multi-system releases require deeper decomposition. - -3. **Derive fine-grained subtasks** (IDs `T001`, `T002`, ...): - - Parse plan/spec to enumerate concrete implementation steps, tests (only if explicitly requested), migrations, and operational work. - - Capture prerequisites, dependencies, and parallelizability markers (`[P]` means safe to parallelize per file/concern). - - Maintain the subtask list internally; it feeds the work-package roll-up and the prompts. - -4. **Roll subtasks into work packages** (IDs `WP01`, `WP02`, ...): - - **IDEAL WORK PACKAGE SIZE** (most important guideline): - - **Target: 3-7 subtasks per WP** (results in 200-500 line prompts) - - **Maximum: 10 subtasks per WP** (results in ~700 line prompts) - - **If more than 10 subtasks needed**: Create additional WPs, don't pack them in - - **WHY SIZE MATTERS**: - - **Too large** (>10 subtasks, >700 lines): Agents get overwhelmed, skip details, make mistakes - - **Too small** (<3 subtasks, <150 lines): Overhead of worktree creation not worth it - - **Just right** (3-7 subtasks, 200-500 lines): Agent can hold entire context, implements thoroughly - - **NUMBER OF WPs**: Let the work dictate the count - - Simple feature (5-10 subtasks total): 2-3 WPs - - Medium feature (20-40 subtasks): 5-8 WPs - - Complex feature (50+ subtasks): 10-20 WPs ← **This is OK!** - - **Better to have 20 focused WPs than 5 overwhelming WPs** - - **GROUPING PRINCIPLES**: - - Each WP should be independently implementable - - Root in a single user story or cohesive subsystem - - Ensure every subtask appears in exactly one work package - - Name with succinct goal (e.g., "User Story 1 – Real-time chat happy path") - - Record metadata: priority, success criteria, risks, dependencies, included subtasks - -5. **Write `tasks.md`** using the bundled tasks template (`src/specify_cli/missions/software-dev/.kittify/templates/tasks-template.md`): - - **Location**: Write to `FEATURE_DIR/tasks.md` (use the absolute FEATURE_DIR path from step 1) - - Populate the Work Package sections (setup, foundational, per-story, polish) with the `WPxx` entries - - Under each work package include: - - Summary (goal, priority, independent test) - - Included subtasks (checkbox list referencing `Txxx`) - - Implementation sketch (high-level sequence) - - Parallel opportunities, dependencies, and risks - - Preserve the checklist style so implementers can mark progress - -6. **Generate prompt files (one per work package)**: - - **CRITICAL PATH RULE**: All work package files MUST be created in a FLAT `FEATURE_DIR/tasks/` directory, NOT in subdirectories! - - Correct structure: `FEATURE_DIR/tasks/WPxx-slug.md` (flat, no subdirectories) - - WRONG (do not create): `FEATURE_DIR/tasks/planned/`, `FEATURE_DIR/tasks/doing/`, or ANY lane subdirectories - - WRONG (do not create): `/tasks/`, `tasks/`, or any path not under FEATURE_DIR - - Ensure `FEATURE_DIR/tasks/` exists (create as flat directory, NO subdirectories) - - For each work package: - - Derive a kebab-case slug from the title; filename: `WPxx-slug.md` - - Full path example: `FEATURE_DIR/tasks/WP01-create-html-page.md` (use ABSOLUTE path from FEATURE_DIR variable) - - Use the bundled task prompt template (`src/specify_cli/missions/software-dev/.kittify/templates/task-prompt-template.md`) to capture: - - Frontmatter with `work_package_id`, `subtasks` array, `lane: "planned"`, `dependencies`, history entry - - Objective, context, detailed guidance per subtask - - Test strategy (only if requested) - - Definition of Done, risks, reviewer guidance - - Update `tasks.md` to reference the prompt filename - - **TARGET PROMPT SIZE**: 200-500 lines per WP (results from 3-7 subtasks) - - **MAXIMUM PROMPT SIZE**: 700 lines per WP (10 subtasks max) - - **If prompts are >700 lines**: Split the WP - it's too large - - **IMPORTANT**: All WP files live in flat `tasks/` directory. Lane status is tracked ONLY in the `lane:` frontmatter field, NOT by directory location. Agents can change lanes by editing the `lane:` field directly or using `spec-kitty agent tasks move-task`. - -7. **Finalize tasks with dependency parsing and commit**: - After generating all WP prompt files, run the finalization command to: - - Parse dependencies from tasks.md - - Update WP frontmatter with dependencies field - - Validate dependencies (check for cycles, invalid references) - - Commit all tasks to target branch - - **CRITICAL**: Run this command from repo root: - ```bash - spec-kitty agent feature finalize-tasks --json - ``` - - This step is MANDATORY for workspace-per-WP features. Without it: - - Dependencies won't be in frontmatter - - Agents won't know which --base flag to use - - Tasks won't be committed to target branch - - **IMPORTANT - DO NOT COMMIT AGAIN AFTER THIS COMMAND**: - - finalize-tasks COMMITS the files automatically - - JSON output includes "commit_created": true/false and "commit_hash" - - If commit_created=true, files are ALREADY committed - do not run git commit again - - Other dirty files shown by 'git status' (templates, config) are UNRELATED - - Verify using the commit_hash from JSON output, not by running git add/commit again - -8. **Report**: Provide a concise outcome summary: - - Path to `tasks.md` - - Work package count and per-package subtask tallies - - **Average prompt size** (estimate lines per WP) - - **Validation**: Flag if any WP has >10 subtasks or >700 estimated lines - - Parallelization highlights - - MVP scope recommendation (usually Work Package 1) - - Prompt generation stats (files written, directory structure, any skipped items with rationale) - - Finalization status (dependencies parsed, X WP files updated, committed to target branch) - - Next suggested command (e.g., `/spec-kitty.analyze` or `/spec-kitty.implement`) - -Context for work-package planning: $ARGUMENTS - -The combination of `tasks.md` and the bundled prompt files must enable a new engineer to pick up any work package and deliver it end-to-end without further specification spelunking. - -## Dependency Detection (0.11.0+) - -**Parse dependencies from tasks.md structure**: - -The LLM should analyze tasks.md for dependency relationships: -- Explicit phrases: "Depends on WP##", "Dependencies: WP##" -- Phase grouping: Phase 2 WPs typically depend on Phase 1 -- Default to empty if unclear - -**Generate dependencies in WP frontmatter**: - -Each WP prompt file MUST include a `dependencies` field: -```yaml ---- -work_package_id: "WP02" -title: "Build API" -lane: "planned" -dependencies: ["WP01"] # Generated from tasks.md -subtasks: ["T001", "T002"] ---- -``` - -**Include the correct implementation command**: -- No dependencies: `spec-kitty implement WP01` -- With dependencies: `spec-kitty implement WP02 --base WP01` - -The WP prompt must show the correct command so agents don't branch from the wrong base. - -## Work Package Sizing Guidelines (CRITICAL) - -### Ideal WP Size - -**Target: 3-7 subtasks per WP** -- Results in 200-500 line prompt files -- Agent can hold entire context in working memory -- Clear scope - easy to review -- Parallelizable - multiple agents can work simultaneously - -**Examples of well-sized WPs**: -- WP01: Foundation Setup (5 subtasks, ~300 lines) - - T001: Create database schema - - T002: Set up migration system - - T003: Create base models - - T004: Add validation layer - - T005: Write foundation tests - -- WP02: User Authentication (6 subtasks, ~400 lines) - - T006: Implement login endpoint - - T007: Implement logout endpoint - - T008: Add session management - - T009: Add password reset flow - - T010: Write auth tests - - T011: Add rate limiting - -### Maximum WP Size - -**Hard limit: 10 subtasks, ~700 lines** -- Beyond this, agents start making mistakes -- Prompts become overwhelming -- Reviews take too long -- Integration risk increases - -**If you need more than 10 subtasks**: SPLIT into multiple WPs. - -### Number of WPs: No Arbitrary Limit - -**DO NOT limit based on WP count. Limit based on SIZE.** - -- ✅ **20 WPs of 5 subtasks each** = 100 subtasks, manageable prompts -- ❌ **5 WPs of 20 subtasks each** = 100 subtasks, overwhelming 1400-line prompts - -**Feature complexity scales with subtask count, not WP count**: -- Simple feature: 10-15 subtasks → 2-4 WPs -- Medium feature: 30-50 subtasks → 6-10 WPs -- Complex feature: 80-120 subtasks → 15-20 WPs ← **Totally fine!** -- Very complex: 150+ subtasks → 25-30 WPs ← **Also fine!** - -**The goal is manageable WP size, not minimizing WP count.** - -### When to Split a WP - -**Split if ANY of these are true**: -- More than 10 subtasks -- Prompt would exceed 700 lines -- Multiple independent concerns mixed together -- Different phases or priorities mixed -- Agent would need to switch contexts multiple times - -**How to split**: -- By phase: Foundation WP01, Implementation WP02, Testing WP03 -- By component: Database WP01, API WP02, UI WP03 -- By user story: Story 1 WP01, Story 2 WP02, Story 3 WP03 -- By type of work: Code WP01, Tests WP02, Migration WP03, Docs WP04 - -### When to Merge WPs - -**Merge if ALL of these are true**: -- Each WP has <3 subtasks -- Combined would be <7 subtasks -- Both address the same concern/component -- No natural parallelization opportunity -- Implementation is highly coupled - -**Don't merge just to hit a WP count target!** - -## Task Generation Rules - -**Tests remain optional**. Only include testing tasks/steps if the feature spec or user explicitly demands them. - -1. **Subtask derivation**: - - Assign IDs `Txxx` sequentially in execution order. - - Use `[P]` for parallel-safe items (different files/components). - - Include migrations, data seeding, observability, and operational chores. - - **Ideal subtask granularity**: One clear action (e.g., "Create user model", "Add login endpoint") - - **Too granular**: "Add import statement", "Fix typo" (bundle these) - - **Too coarse**: "Build entire API" (split into endpoints) - -2. **Work package grouping**: - - **Focus on SIZE first, count second** - - Target 3-7 subtasks per WP (200-500 line prompts) - - Maximum 10 subtasks per WP (700 line prompts) - - Keep each work package laser-focused on a single goal - - Avoid mixing unrelated concerns - - **Let complexity dictate WP count**: 20+ WPs is fine for complex features - -3. **Prioritisation & dependencies**: - - Sequence work packages: setup → foundational → story phases (priority order) → polish. - - Call out inter-package dependencies explicitly in both `tasks.md` and the prompts. - - Front-load infrastructure/foundation WPs (enable parallelization) - -4. **Prompt composition**: - - Mirror subtask order inside the prompt. - - Provide actionable implementation and test guidance per subtask—short for trivial work, exhaustive for complex flows. - - **Aim for 30-70 lines per subtask** in the prompt (includes purpose, steps, files, validation) - - Surface risks, integration points, and acceptance gates clearly so reviewers know what to verify. - - Include examples where helpful (API request/response shapes, config file structures, test cases) - -5. **Quality checkpoints**: - - After drafting WPs, review each prompt size estimate - - If any WP >700 lines: **STOP and split it** - - If most WPs <200 lines: Consider merging related ones - - Aim for consistency: Most WPs should be similar size (within 200-line range) - - **Think like an implementer**: Can I complete this WP in one focused session? If not, it's too big. - -6. **Think like a reviewer**: Any vague requirement should be tightened until a reviewer can objectively mark it done or not done. - -## Step-by-Step Process - -### Step 1: Setup - -Run `spec-kitty agent feature check-prerequisites --json --paths-only --include-tasks` and capture `FEATURE_DIR`. - -### Step 2: Load Design Documents - -Read from `FEATURE_DIR`: -- spec.md (required) -- plan.md (required) -- data-model.md (optional) -- research.md (optional) -- contracts/ (optional) - -### Step 3: Derive ALL Subtasks - -Create complete list of subtasks with IDs T001, T002, etc. - -**Don't worry about count yet - capture EVERYTHING needed.** - -### Step 4: Group into Work Packages - -**SIZING ALGORITHM**: - -``` -For each cohesive unit of work: - 1. List related subtasks - 2. Count subtasks - 3. Estimate prompt lines (subtasks × 50 lines avg) - - If subtasks <= 7 AND estimated lines <= 500: - ✓ Good WP size - create it - - Else if subtasks > 10 OR estimated lines > 700: - ✗ Too large - split into 2+ WPs - - Else if subtasks < 3 AND can merge with related WP: - → Consider merging (but don't force it) -``` - -**Examples**: - -**Good sizing**: -- WP01: Database Foundation (5 subtasks, ~300 lines) ✓ -- WP02: User Authentication (7 subtasks, ~450 lines) ✓ -- WP03: Admin Dashboard (6 subtasks, ~400 lines) ✓ - -**Too large - MUST SPLIT**: -- ❌ WP01: Entire Backend (25 subtasks, ~1500 lines) - - ✓ Split into: DB Layer (5), Business Logic (6), API Layer (7), Auth (7) - -**Too small - CONSIDER MERGING**: -- WP01: Add config file (2 subtasks, ~100 lines) -- WP02: Add logging (2 subtasks, ~120 lines) - - ✓ Merge into: WP01: Infrastructure Setup (4 subtasks, ~220 lines) - -### Step 5: Write tasks.md - -Create work package sections with: -- Summary (goal, priority, test criteria) -- Included subtasks (checkbox list) -- Implementation notes -- Parallel opportunities -- Dependencies -- **Estimated prompt size** (e.g., "~400 lines") - -### Step 6: Generate WP Prompt Files - -For each WP, generate `FEATURE_DIR/tasks/WPxx-slug.md` using the template. - -**CRITICAL VALIDATION**: After generating each prompt: -1. Count lines in the prompt -2. If >700 lines: GO BACK and split the WP -3. If >1000 lines: **STOP - this will fail** - you MUST split it - -**Self-check**: -- Subtask count: 3-7? ✓ | 8-10? ⚠️ | 11+? ❌ SPLIT -- Estimated lines: 200-500? ✓ | 500-700? ⚠️ | 700+? ❌ SPLIT -- Can implement in one session? ✓ | Multiple sessions needed? ❌ SPLIT - -### Step 7: Finalize Tasks - -Run `spec-kitty agent feature finalize-tasks --json` to: -- Parse dependencies -- Update frontmatter -- Validate (cycles, invalid refs) -- Commit to target branch - -**DO NOT run git commit after this** - finalize-tasks commits automatically. -Check JSON output for "commit_created": true and "commit_hash" to verify. - -### Step 8: Report - -Provide summary with: -- WP count and subtask tallies -- **Size distribution** (e.g., "6 WPs ranging from 250-480 lines") -- **Size validation** (e.g., "✓ All WPs within ideal range" OR "⚠️ WP05 is 820 lines - consider splitting") -- Parallelization opportunities -- MVP scope -- Next command - -## Dependency Detection (0.11.0+) - -**Parse dependencies from tasks.md structure**: - -The LLM should analyze tasks.md for dependency relationships: -- Explicit phrases: "Depends on WP##", "Dependencies: WP##" -- Phase grouping: Phase 2 WPs typically depend on Phase 1 -- Default to empty if unclear - -**Generate dependencies in WP frontmatter**: - -Each WP prompt file MUST include a `dependencies` field: -```yaml ---- -work_package_id: "WP02" -title: "Build API" -lane: "planned" -dependencies: ["WP01"] # Generated from tasks.md -subtasks: ["T001", "T002"] ---- -``` - -**Include the correct implementation command**: -- No dependencies: `spec-kitty implement WP01` -- With dependencies: `spec-kitty implement WP02 --base WP01` - -The WP prompt must show the correct command so agents don't branch from the wrong base. - -## ⚠️ Common Mistakes to Avoid - -### ❌ MISTAKE 1: Optimizing for WP Count - -**Bad thinking**: "I'll create exactly 5-7 WPs to keep it manageable" -→ Results in: 20 subtasks per WP, 1200-line prompts, overwhelmed agents - -**Good thinking**: "Each WP should be 3-7 subtasks (200-500 lines). If that means 15 WPs, that's fine." -→ Results in: Focused WPs, successful implementation, happy agents - -### ❌ MISTAKE 2: Token Conservation During Planning - -**Bad thinking**: "I'll save tokens by writing brief prompts with minimal guidance" -→ Results in: Agents confused during implementation, asking clarifying questions, doing work wrong, requiring rework - -**Good thinking**: "I'll invest tokens now to write thorough prompts with examples and edge cases" -→ Results in: Agents implement correctly the first time, no rework needed, net token savings - -### ❌ MISTAKE 3: Mixing Unrelated Concerns - -**Bad example**: WP03: Misc Backend Work (12 subtasks) -- T010: Add user model -- T011: Configure logging -- T012: Set up email service -- T013: Add admin dashboard -- ... (8 more unrelated tasks) - -**Good approach**: Split by concern -- WP03: User Management (T010-T013, 4 subtasks) -- WP04: Infrastructure Services (T014-T017, 4 subtasks) -- WP05: Admin Dashboard (T018-T021, 4 subtasks) - -### ❌ MISTAKE 4: Insufficient Prompt Detail - -**Bad prompt** (~20 lines per subtask): -```markdown -### Subtask T001: Add user authentication - -**Purpose**: Implement login - -**Steps**: -1. Create endpoint -2. Add validation -3. Test it -``` - -**Good prompt** (~60 lines per subtask): -```markdown -### Subtask T001: Implement User Login Endpoint - -**Purpose**: Create POST /api/auth/login endpoint that validates credentials and returns JWT token. - -**Steps**: -1. Create endpoint handler in `src/api/auth.py`: - - Route: POST /api/auth/login - - Request body: `{email: string, password: string}` - - Response: `{token: string, user: UserProfile}` on success - - Error codes: 400 (invalid input), 401 (bad credentials), 429 (rate limited) - -2. Implement credential validation: - - Hash password with bcrypt (matches registration hash) - - Compare against stored hash from database - - Use constant-time comparison to prevent timing attacks - -3. Generate JWT token on success: - - Include: user_id, email, issued_at, expires_at (24 hours) - - Sign with SECRET_KEY from environment - - Algorithm: HS256 - -4. Add rate limiting: - - Max 5 attempts per IP per 15 minutes - - Return 429 with Retry-After header - -**Files**: -- `src/api/auth.py` (new file, ~80 lines) -- `tests/api/test_auth.py` (new file, ~120 lines) - -**Validation**: -- [ ] Valid credentials return 200 with token -- [ ] Invalid credentials return 401 -- [ ] Missing fields return 400 -- [ ] Rate limit enforced (test with 6 requests) -- [ ] JWT token is valid and contains correct claims -- [ ] Token expires after 24 hours - -**Edge Cases**: -- Account doesn't exist: Return 401 (same as wrong password - don't leak info) -- Empty password: Return 400 -- SQL injection in email field: Prevented by parameterized queries -- Concurrent login attempts: Handle with database locking -``` - -## Remember - -**This is the most important planning work you'll do.** - -A well-crafted set of work packages with detailed prompts makes implementation smooth and parallelizable. - -A rushed job with vague, oversized WPs causes: -- Agents getting stuck -- Implementation taking 2-3x longer -- Rework and review cycles -- Feature failure - -**Invest the tokens now. Be thorough. Future agents will thank you.** diff --git a/.cursorignore b/.cursorignore deleted file mode 100644 index 85e9fdded2..0000000000 --- a/.cursorignore +++ /dev/null @@ -1,55 +0,0 @@ -# Spec Kitty Configuration and Templates -.kittify/templates/ -.kittify/missions/ -.kittify/scripts/ - -# Agent command directories (generated from templates, not source) -.claude/ -.codex/ -.gemini/ -.cursor/ -.qwen/ -.opencode/ -.windsurf/ -.kilocode/ -.augment/ -.roo/ -.amazonq/ -.github/copilot/ - -# Git metadata -.git/ - -# Build artifacts and caches -__pycache__/ -*.pyc -*.pyo -.pytest_cache/ -.coverage -htmlcov/ -node_modules/ -dist/ -build/ -*.egg-info/ - -# Virtual environments -.venv/ -venv/ -env/ - -# OS-specific files -.DS_Store -Thumbs.db -desktop.ini - -# IDE directories -.vscode/ -.idea/ -*.swp -*.swo -*~ - -# Logs and databases -*.log -*.db -*.sqlite diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md deleted file mode 100644 index 55afde2030..0000000000 --- a/.github/copilot-instructions.md +++ /dev/null @@ -1,12 +0,0 @@ -# Copilot Instructions - -## Project Context -This is a Phenotype organization project. Follow existing code patterns and conventions. - -## Guidelines -- Follow the project's existing code style and patterns -- Prefer editing existing files over creating new ones -- Do not introduce security vulnerabilities (injection, XSS, etc.) -- Keep solutions simple and focused — avoid over-engineering -- Do not add unnecessary comments, docstrings, or type annotations to unchanged code -- Respect .gitignore and .claudeignore exclusions diff --git a/.github/prompts/spec-kitty.accept.prompt.md b/.github/prompts/spec-kitty.accept.prompt.md deleted file mode 100644 index 1408176581..0000000000 --- a/.github/prompts/spec-kitty.accept.prompt.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -description: Validate feature readiness and guide final acceptance steps. ---- - - -# /spec-kitty.accept - Validate Feature Readiness - -**Version**: 0.11.0+ -**Purpose**: Validate all work packages are complete and feature is ready to merge. - -## 📍 WORKING DIRECTORY: Run from MAIN repository - -**IMPORTANT**: Accept runs from the main repository root, NOT from a WP worktree. - -```bash -# If you're in a worktree, return to main first: -cd $(git rev-parse --show-toplevel) - -# Then run accept: -spec-kitty accept -``` - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Discovery (mandatory) - -Before running the acceptance workflow, gather the following: - -1. **Feature slug** (e.g., `005-awesome-thing`). If omitted, detect automatically. -2. **Acceptance mode**: - - `pr` when the feature will merge via hosted pull request. - - `local` when the feature will merge locally without a PR. - - `checklist` to run the readiness checklist without committing or producing merge instructions. -3. **Validation commands executed** (tests/builds). Collect each command verbatim; omit if none. -4. **Acceptance actor** (optional, defaults to the current agent name). - -Ask one focused question per item and confirm the summary before continuing. End the discovery turn with `WAITING_FOR_ACCEPTANCE_INPUT` until all answers are provided. - -## Execution Plan - -1. Compile the acceptance options into an argument list: - - Always include `--actor "copilot"`. - - Append `--feature ""` when the user supplied a slug. - - Append `--mode ` (`pr`, `local`, or `checklist`). - - Append `--test ""` for each validation command provided. -2. Run `(Missing script command for sh)` (the CLI wrapper) with the assembled arguments **and** `--json`. -3. Parse the JSON response. It contains: - - `summary.ok` (boolean) and other readiness details. - - `summary.outstanding` categories when issues remain. - - `instructions` (merge steps) and `cleanup_instructions`. - - `notes` (e.g., acceptance commit hash). -4. Present the outcome: - - If `summary.ok` is `false`, list each outstanding category with bullet points and advise the user to resolve them before retrying acceptance. - - If `summary.ok` is `true`, display: - - Acceptance timestamp, actor, and (if present) acceptance commit hash. - - Merge instructions and cleanup instructions as ordered steps. - - Validation commands executed (if any). -5. When the mode is `checklist`, make it clear no commits or merge instructions were produced. - -## Output Requirements - -- Summaries must be in plain text (no tables). Use short bullet lists for instructions. -- Surface outstanding issues before any congratulations or success messages. -- If the JSON payload includes warnings, surface them under an explicit **Warnings** section. -- Never fabricate results; only report what the JSON contains. - -## Error Handling - -- If the command fails or returns invalid JSON, report the failure and request user guidance (do not retry automatically). -- When outstanding issues exist, do **not** attempt to force acceptance—return the checklist and prompt the user to fix the blockers. diff --git a/.github/prompts/spec-kitty.analyze.prompt.md b/.github/prompts/spec-kitty.analyze.prompt.md deleted file mode 100644 index e2cd797d48..0000000000 --- a/.github/prompts/spec-kitty.analyze.prompt.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -description: Perform a non-destructive cross-artifact consistency and quality analysis across spec.md, plan.md, and tasks.md after task generation. ---- - - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Goal - -Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/tasks` has successfully produced a complete `tasks.md`. - -## Operating Constraints - -**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually). - -**Constitution Authority**: The project constitution (`/.kittify/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/analyze`. - -## Execution Steps - -### 1. Initialize Analysis Context - -Run `(Missing script command for sh)` once from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS. Derive absolute paths: - -- SPEC = FEATURE_DIR/spec.md -- PLAN = FEATURE_DIR/plan.md -- TASKS = FEATURE_DIR/tasks.md - -Abort with an error message if any required file is missing (instruct the user to run missing prerequisite command). - -### 2. Load Artifacts (Progressive Disclosure) - -Load only the minimal necessary context from each artifact: - -**From spec.md:** - -- Overview/Context -- Functional Requirements -- Non-Functional Requirements -- User Stories -- Edge Cases (if present) - -**From plan.md:** - -- Architecture/stack choices -- Data Model references -- Phases -- Technical constraints - -**From tasks.md:** - -- Task IDs -- Descriptions -- Phase grouping -- Parallel markers [P] -- Referenced file paths - -**From constitution:** - -- Load `/.kittify/memory/constitution.md` for principle validation - -### 3. Build Semantic Models - -Create internal representations (do not include raw artifacts in output): - -- **Requirements inventory**: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" → `user-can-upload-file`) -- **User story/action inventory**: Discrete user actions with acceptance criteria -- **Task coverage mapping**: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases) -- **Constitution rule set**: Extract principle names and MUST/SHOULD normative statements - -### 4. Detection Passes (Token-Efficient Analysis) - -Focus on high-signal findings. Limit to 50 findings total; aggregate remainder in overflow summary. - -#### A. Duplication Detection - -- Identify near-duplicate requirements -- Mark lower-quality phrasing for consolidation - -#### B. Ambiguity Detection - -- Flag vague adjectives (fast, scalable, secure, intuitive, robust) lacking measurable criteria -- Flag unresolved placeholders (TODO, TKTK, ???, ``, etc.) - -#### C. Underspecification - -- Requirements with verbs but missing object or measurable outcome -- User stories missing acceptance criteria alignment -- Tasks referencing files or components not defined in spec/plan - -#### D. Constitution Alignment - -- Any requirement or plan element conflicting with a MUST principle -- Missing mandated sections or quality gates from constitution - -#### E. Coverage Gaps - -- Requirements with zero associated tasks -- Tasks with no mapped requirement/story -- Non-functional requirements not reflected in tasks (e.g., performance, security) - -#### F. Inconsistency - -- Terminology drift (same concept named differently across files) -- Data entities referenced in plan but absent in spec (or vice versa) -- Task ordering contradictions (e.g., integration tasks before foundational setup tasks without dependency note) -- Conflicting requirements (e.g., one requires Next.js while other specifies Vue) - -### 5. Severity Assignment - -Use this heuristic to prioritize findings: - -- **CRITICAL**: Violates constitution MUST, missing core spec artifact, or requirement with zero coverage that blocks baseline functionality -- **HIGH**: Duplicate or conflicting requirement, ambiguous security/performance attribute, untestable acceptance criterion -- **MEDIUM**: Terminology drift, missing non-functional task coverage, underspecified edge case -- **LOW**: Style/wording improvements, minor redundancy not affecting execution order - -### 6. Produce Compact Analysis Report - -Output a Markdown report (no file writes) with the following structure: - -## Specification Analysis Report - -| ID | Category | Severity | Location(s) | Summary | Recommendation | -|----|----------|----------|-------------|---------|----------------| -| A1 | Duplication | HIGH | spec.md:L120-134 | Two similar requirements ... | Merge phrasing; keep clearer version | - -(Add one row per finding; generate stable IDs prefixed by category initial.) - -**Coverage Summary Table:** - -| Requirement Key | Has Task? | Task IDs | Notes | -|-----------------|-----------|----------|-------| - -**Constitution Alignment Issues:** (if any) - -**Unmapped Tasks:** (if any) - -**Metrics:** - -- Total Requirements -- Total Tasks -- Coverage % (requirements with >=1 task) -- Ambiguity Count -- Duplication Count -- Critical Issues Count - -### 7. Provide Next Actions - -At end of report, output a concise Next Actions block: - -- If CRITICAL issues exist: Recommend resolving before `/implement` -- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions -- Provide explicit command suggestions: e.g., "Run /spec-kitty.specify with refinement", "Run /plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'" - -### 8. Offer Remediation - -Ask the user: "Would you like me to suggest concrete remediation edits for the top N issues?" (Do NOT apply them automatically.) - -## Operating Principles - -### Context Efficiency - -- **Minimal high-signal tokens**: Focus on actionable findings, not exhaustive documentation -- **Progressive disclosure**: Load artifacts incrementally; don't dump all content into analysis -- **Token-efficient output**: Limit findings table to 50 rows; summarize overflow -- **Deterministic results**: Rerunning without changes should produce consistent IDs and counts - -### Analysis Guidelines - -- **NEVER modify files** (this is read-only analysis) -- **NEVER hallucinate missing sections** (if absent, report them accurately) -- **Prioritize constitution violations** (these are always CRITICAL) -- **Use examples over exhaustive rules** (cite specific instances, not generic patterns) -- **Report zero issues gracefully** (emit success report with coverage statistics) - -## Context - -$ARGUMENTS diff --git a/.github/prompts/spec-kitty.checklist.prompt.md b/.github/prompts/spec-kitty.checklist.prompt.md deleted file mode 100644 index 97228e12f3..0000000000 --- a/.github/prompts/spec-kitty.checklist.prompt.md +++ /dev/null @@ -1,287 +0,0 @@ ---- -description: Generate a custom checklist for the current feature based on user requirements. ---- - - -## Checklist Purpose: "Unit Tests for English" - -**CRITICAL CONCEPT**: Checklists are **UNIT TESTS FOR REQUIREMENTS WRITING** - they validate the quality, clarity, and completeness of requirements in a given domain. - -**NOT for verification/testing**: -- ❌ NOT "Verify the button clicks correctly" -- ❌ NOT "Test error handling works" -- ❌ NOT "Confirm the API returns 200" -- ❌ NOT checking if code/implementation matches the spec - -**FOR requirements quality validation**: -- ✅ "Are visual hierarchy requirements defined for all card types?" (completeness) -- ✅ "Is 'prominent display' quantified with specific sizing/positioning?" (clarity) -- ✅ "Are hover state requirements consistent across all interactive elements?" (consistency) -- ✅ "Are accessibility requirements defined for keyboard navigation?" (coverage) -- ✅ "Does the spec define what happens when logo image fails to load?" (edge cases) - -**Metaphor**: If your spec is code written in English, the checklist is its unit test suite. You're testing whether the requirements are well-written, complete, unambiguous, and ready for implementation - NOT whether the implementation works. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Execution Steps - -1. **Setup**: Run `(Missing script command for sh)` from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS list. - - All file paths must be absolute. - -2. **Clarify intent (dynamic)**: Derive up to THREE initial contextual clarifying questions (no pre-baked catalog). They MUST: - - Be generated from the user's phrasing + extracted signals from spec/plan/tasks - - Only ask about information that materially changes checklist content - - Be skipped individually if already unambiguous in `$ARGUMENTS` - - Prefer precision over breadth - - Generation algorithm: - 1. Extract signals: feature domain keywords (e.g., auth, latency, UX, API), risk indicators ("critical", "must", "compliance"), stakeholder hints ("QA", "review", "security team"), and explicit deliverables ("a11y", "rollback", "contracts"). - 2. Cluster signals into candidate focus areas (max 4) ranked by relevance. - 3. Identify probable audience & timing (author, reviewer, QA, release) if not explicit. - 4. Detect missing dimensions: scope breadth, depth/rigor, risk emphasis, exclusion boundaries, measurable acceptance criteria. - 5. Formulate questions chosen from these archetypes: - - Scope refinement (e.g., "Should this include integration touchpoints with X and Y or stay limited to local module correctness?") - - Risk prioritization (e.g., "Which of these potential risk areas should receive mandatory gating checks?") - - Depth calibration (e.g., "Is this a lightweight pre-commit sanity list or a formal release gate?") - - Audience framing (e.g., "Will this be used by the author only or peers during PR review?") - - Boundary exclusion (e.g., "Should we explicitly exclude performance tuning items this round?") - - Scenario class gap (e.g., "No recovery flows detected—are rollback / partial failure paths in scope?") - - Question formatting rules: - - If presenting options, generate a compact table with columns: Option | Candidate | Why It Matters - - Limit to A–E options maximum; omit table if a free-form answer is clearer - - Never ask the user to restate what they already said - - Avoid speculative categories (no hallucination). If uncertain, ask explicitly: "Confirm whether X belongs in scope." - - Defaults when interaction impossible: - - Depth: Standard - - Audience: Reviewer (PR) if code-related; Author otherwise - - Focus: Top 2 relevance clusters - - Output the questions (label Q1/Q2/Q3). After answers: if ≥2 scenario classes (Alternate / Exception / Recovery / Non-Functional domain) remain unclear, you MAY ask up to TWO more targeted follow‑ups (Q4/Q5) with a one-line justification each (e.g., "Unresolved recovery path risk"). Do not exceed five total questions. Skip escalation if user explicitly declines more. - -3. **Understand user request**: Combine `$ARGUMENTS` + clarifying answers: - - Derive checklist theme (e.g., security, review, deploy, ux) - - Consolidate explicit must-have items mentioned by user - - Map focus selections to category scaffolding - - Infer any missing context from spec/plan/tasks (do NOT hallucinate) - -4. **Load feature context**: Read from FEATURE_DIR: - - spec.md: Feature requirements and scope - - plan.md (if exists): Technical details, dependencies - - tasks.md (if exists): Implementation tasks - - **Context Loading Strategy**: - - Load only necessary portions relevant to active focus areas (avoid full-file dumping) - - Prefer summarizing long sections into concise scenario/requirement bullets - - Use progressive disclosure: add follow-on retrieval only if gaps detected - - If source docs are large, generate interim summary items instead of embedding raw text - -5. **Generate checklist** - Create "Unit Tests for Requirements": - - Create `FEATURE_DIR/checklists/` directory if it doesn't exist - - Generate unique checklist filename: - - Use short, descriptive name based on domain (e.g., `ux.md`, `api.md`, `security.md`) - - Format: `[domain].md` - - If file exists, append to existing file - - Number items sequentially starting from CHK001 - - Each `/spec-kitty.checklist` run creates a NEW file (never overwrites existing checklists) - - **CORE PRINCIPLE - Test the Requirements, Not the Implementation**: - Every checklist item MUST evaluate the REQUIREMENTS THEMSELVES for: - - **Completeness**: Are all necessary requirements present? - - **Clarity**: Are requirements unambiguous and specific? - - **Consistency**: Do requirements align with each other? - - **Measurability**: Can requirements be objectively verified? - - **Coverage**: Are all scenarios/edge cases addressed? - - **Category Structure** - Group items by requirement quality dimensions: - - **Requirement Completeness** (Are all necessary requirements documented?) - - **Requirement Clarity** (Are requirements specific and unambiguous?) - - **Requirement Consistency** (Do requirements align without conflicts?) - - **Acceptance Criteria Quality** (Are success criteria measurable?) - - **Scenario Coverage** (Are all flows/cases addressed?) - - **Edge Case Coverage** (Are boundary conditions defined?) - - **Non-Functional Requirements** (Performance, Security, Accessibility, etc. - are they specified?) - - **Dependencies & Assumptions** (Are they documented and validated?) - - **Ambiguities & Conflicts** (What needs clarification?) - - **HOW TO WRITE CHECKLIST ITEMS - "Unit Tests for English"**: - - ❌ **WRONG** (Testing implementation): - - "Verify landing page displays 3 episode cards" - - "Test hover states work on desktop" - - "Confirm logo click navigates home" - - ✅ **CORRECT** (Testing requirements quality): - - "Are the exact number and layout of featured episodes specified?" [Completeness] - - "Is 'prominent display' quantified with specific sizing/positioning?" [Clarity] - - "Are hover state requirements consistent across all interactive elements?" [Consistency] - - "Are keyboard navigation requirements defined for all interactive UI?" [Coverage] - - "Is the fallback behavior specified when logo image fails to load?" [Edge Cases] - - "Are loading states defined for asynchronous episode data?" [Completeness] - - "Does the spec define visual hierarchy for competing UI elements?" [Clarity] - - **ITEM STRUCTURE**: - Each item should follow this pattern: - - Question format asking about requirement quality - - Focus on what's WRITTEN (or not written) in the spec/plan - - Include quality dimension in brackets [Completeness/Clarity/Consistency/etc.] - - Reference spec section `[Spec §X.Y]` when checking existing requirements - - Use `[Gap]` marker when checking for missing requirements - - **EXAMPLES BY QUALITY DIMENSION**: - - Completeness: - - "Are error handling requirements defined for all API failure modes? [Gap]" - - "Are accessibility requirements specified for all interactive elements? [Completeness]" - - "Are mobile breakpoint requirements defined for responsive layouts? [Gap]" - - Clarity: - - "Is 'fast loading' quantified with specific timing thresholds? [Clarity, Spec §NFR-2]" - - "Are 'related episodes' selection criteria explicitly defined? [Clarity, Spec §FR-5]" - - "Is 'prominent' defined with measurable visual properties? [Ambiguity, Spec §FR-4]" - - Consistency: - - "Do navigation requirements align across all pages? [Consistency, Spec §FR-10]" - - "Are card component requirements consistent between landing and detail pages? [Consistency]" - - Coverage: - - "Are requirements defined for zero-state scenarios (no episodes)? [Coverage, Edge Case]" - - "Are concurrent user interaction scenarios addressed? [Coverage, Gap]" - - "Are requirements specified for partial data loading failures? [Coverage, Exception Flow]" - - Measurability: - - "Are visual hierarchy requirements measurable/testable? [Acceptance Criteria, Spec §FR-1]" - - "Can 'balanced visual weight' be objectively verified? [Measurability, Spec §FR-2]" - - **Scenario Classification & Coverage** (Requirements Quality Focus): - - Check if requirements exist for: Primary, Alternate, Exception/Error, Recovery, Non-Functional scenarios - - For each scenario class, ask: "Are [scenario type] requirements complete, clear, and consistent?" - - If scenario class missing: "Are [scenario type] requirements intentionally excluded or missing? [Gap]" - - Include resilience/rollback when state mutation occurs: "Are rollback requirements defined for migration failures? [Gap]" - - **Traceability Requirements**: - - MINIMUM: ≥80% of items MUST include at least one traceability reference - - Each item should reference: spec section `[Spec §X.Y]`, or use markers: `[Gap]`, `[Ambiguity]`, `[Conflict]`, `[Assumption]` - - If no ID system exists: "Is a requirement & acceptance criteria ID scheme established? [Traceability]" - - **Surface & Resolve Issues** (Requirements Quality Problems): - Ask questions about the requirements themselves: - - Ambiguities: "Is the term 'fast' quantified with specific metrics? [Ambiguity, Spec §NFR-1]" - - Conflicts: "Do navigation requirements conflict between §FR-10 and §FR-10a? [Conflict]" - - Assumptions: "Is the assumption of 'always available podcast API' validated? [Assumption]" - - Dependencies: "Are external podcast API requirements documented? [Dependency, Gap]" - - Missing definitions: "Is 'visual hierarchy' defined with measurable criteria? [Gap]" - - **Content Consolidation**: - - Soft cap: If raw candidate items > 40, prioritize by risk/impact - - Merge near-duplicates checking the same requirement aspect - - If >5 low-impact edge cases, create one item: "Are edge cases X, Y, Z addressed in requirements? [Coverage]" - - **🚫 ABSOLUTELY PROHIBITED** - These make it an implementation test, not a requirements test: - - ❌ Any item starting with "Verify", "Test", "Confirm", "Check" + implementation behavior - - ❌ References to code execution, user actions, system behavior - - ❌ "Displays correctly", "works properly", "functions as expected" - - ❌ "Click", "navigate", "render", "load", "execute" - - ❌ Test cases, test plans, QA procedures - - ❌ Implementation details (frameworks, APIs, algorithms) - - **✅ REQUIRED PATTERNS** - These test requirements quality: - - ✅ "Are [requirement type] defined/specified/documented for [scenario]?" - - ✅ "Is [vague term] quantified/clarified with specific criteria?" - - ✅ "Are requirements consistent between [section A] and [section B]?" - - ✅ "Can [requirement] be objectively measured/verified?" - - ✅ "Are [edge cases/scenarios] addressed in requirements?" - - ✅ "Does the spec define [missing aspect]?" - -6. **Structure Reference**: Generate the checklist following the canonical template in `.kittify/templates/checklist-template.md` for title, meta section, category headings, and ID formatting. If template is unavailable, use: H1 title, purpose/created meta lines, `##` category sections containing `- [ ] CHK### ` lines with globally incrementing IDs starting at CHK001. - -7. **Report**: Output full path to created checklist, item count, and remind user that each run creates a new file. Summarize: - - Focus areas selected - - Depth level - - Actor/timing - - Any explicit user-specified must-have items incorporated - -**Important**: Each `/spec-kitty.checklist` command invocation creates a checklist file using short, descriptive names unless file already exists. This allows: - -- Multiple checklists of different types (e.g., `ux.md`, `test.md`, `security.md`) -- Simple, memorable filenames that indicate checklist purpose -- Easy identification and navigation in the `checklists/` folder - -To avoid clutter, use descriptive types and clean up obsolete checklists when done. - -## Example Checklist Types & Sample Items - -**UX Requirements Quality:** `ux.md` - -Sample items (testing the requirements, NOT the implementation): -- "Are visual hierarchy requirements defined with measurable criteria? [Clarity, Spec §FR-1]" -- "Is the number and positioning of UI elements explicitly specified? [Completeness, Spec §FR-1]" -- "Are interaction state requirements (hover, focus, active) consistently defined? [Consistency]" -- "Are accessibility requirements specified for all interactive elements? [Coverage, Gap]" -- "Is fallback behavior defined when images fail to load? [Edge Case, Gap]" -- "Can 'prominent display' be objectively measured? [Measurability, Spec §FR-4]" - -**API Requirements Quality:** `api.md` - -Sample items: -- "Are error response formats specified for all failure scenarios? [Completeness]" -- "Are rate limiting requirements quantified with specific thresholds? [Clarity]" -- "Are authentication requirements consistent across all endpoints? [Consistency]" -- "Are retry/timeout requirements defined for external dependencies? [Coverage, Gap]" -- "Is versioning strategy documented in requirements? [Gap]" - -**Performance Requirements Quality:** `performance.md` - -Sample items: -- "Are performance requirements quantified with specific metrics? [Clarity]" -- "Are performance targets defined for all critical user journeys? [Coverage]" -- "Are performance requirements under different load conditions specified? [Completeness]" -- "Can performance requirements be objectively measured? [Measurability]" -- "Are degradation requirements defined for high-load scenarios? [Edge Case, Gap]" - -**Security Requirements Quality:** `security.md` - -Sample items: -- "Are authentication requirements specified for all protected resources? [Coverage]" -- "Are data protection requirements defined for sensitive information? [Completeness]" -- "Is the threat model documented and requirements aligned to it? [Traceability]" -- "Are security requirements consistent with compliance obligations? [Consistency]" -- "Are security failure/breach response requirements defined? [Gap, Exception Flow]" - -## Anti-Examples: What NOT To Do - -**❌ WRONG - These test implementation, not requirements:** - -```markdown -- [ ] CHK001 - Verify landing page displays 3 episode cards [Spec §FR-001] -- [ ] CHK002 - Test hover states work correctly on desktop [Spec §FR-003] -- [ ] CHK003 - Confirm logo click navigates to home page [Spec §FR-010] -- [ ] CHK004 - Check that related episodes section shows 3-5 items [Spec §FR-005] -``` - -**✅ CORRECT - These test requirements quality:** - -```markdown -- [ ] CHK001 - Are the number and layout of featured episodes explicitly specified? [Completeness, Spec §FR-001] -- [ ] CHK002 - Are hover state requirements consistently defined for all interactive elements? [Consistency, Spec §FR-003] -- [ ] CHK003 - Are navigation requirements clear for all clickable brand elements? [Clarity, Spec §FR-010] -- [ ] CHK004 - Is the selection criteria for related episodes documented? [Gap, Spec §FR-005] -- [ ] CHK005 - Are loading state requirements defined for asynchronous episode data? [Gap] -- [ ] CHK006 - Can "visual hierarchy" requirements be objectively measured? [Measurability, Spec §FR-001] -``` - -**Key Differences:** -- Wrong: Tests if the system works correctly -- Correct: Tests if the requirements are written correctly -- Wrong: Verification of behavior -- Correct: Validation of requirement quality -- Wrong: "Does it do X?" -- Correct: "Is X clearly specified?" diff --git a/.github/prompts/spec-kitty.clarify.prompt.md b/.github/prompts/spec-kitty.clarify.prompt.md deleted file mode 100644 index 6cc7b09ae5..0000000000 --- a/.github/prompts/spec-kitty.clarify.prompt.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -description: Identify underspecified areas in the current feature spec by asking up to 5 highly targeted clarification questions and encoding answers back into the spec. ---- - - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Outline - -Goal: Detect and reduce ambiguity or missing decision points in the active feature specification and record the clarifications directly in the spec file. - -Note: This clarification workflow is expected to run (and be completed) BEFORE invoking `/spec-kitty.plan`. If the user explicitly states they are skipping clarification (e.g., exploratory spike), you may proceed, but must warn that downstream rework risk increases. - -Execution steps: - -1. Run `spec-kitty agent feature check-prerequisites --json --paths-only` from the repository root and parse JSON for: - - `FEATURE_DIR` - Absolute path to feature directory (e.g., `/path/to/kitty-specs/017-my-feature/`) - - `FEATURE_SPEC` - Absolute path to spec.md file - - If command fails or JSON parsing fails, abort and instruct user to run `/spec-kitty.specify` first or verify they are in a spec-kitty-initialized repository. - -2. Load the current spec file. Perform a structured ambiguity & coverage scan using this taxonomy. For each category, mark status: Clear / Partial / Missing. Produce an internal coverage map used for prioritization (do not output raw map unless no questions will be asked). - - Functional Scope & Behavior: - - Core user goals & success criteria - - Explicit out-of-scope declarations - - User roles / personas differentiation - - Domain & Data Model: - - Entities, attributes, relationships - - Identity & uniqueness rules - - Lifecycle/state transitions - - Data volume / scale assumptions - - Interaction & UX Flow: - - Critical user journeys / sequences - - Error/empty/loading states - - Accessibility or localization notes - - Non-Functional Quality Attributes: - - Performance (latency, throughput targets) - - Scalability (horizontal/vertical, limits) - - Reliability & availability (uptime, recovery expectations) - - Observability (logging, metrics, tracing signals) - - Security & privacy (authN/Z, data protection, threat assumptions) - - Compliance / regulatory constraints (if any) - - Integration & External Dependencies: - - External services/APIs and failure modes - - Data import/export formats - - Protocol/versioning assumptions - - Edge Cases & Failure Handling: - - Negative scenarios - - Rate limiting / throttling - - Conflict resolution (e.g., concurrent edits) - - Constraints & Tradeoffs: - - Technical constraints (language, storage, hosting) - - Explicit tradeoffs or rejected alternatives - - Terminology & Consistency: - - Canonical glossary terms - - Avoided synonyms / deprecated terms - - Completion Signals: - - Acceptance criteria testability - - Measurable Definition of Done style indicators - - Misc / Placeholders: - - TODO markers / unresolved decisions - - Ambiguous adjectives ("robust", "intuitive") lacking quantification - - For each category with Partial or Missing status, add a candidate question opportunity unless: - - Clarification would not materially change implementation or validation strategy - - Information is better deferred to planning phase (note internally) - -3. Generate (internally) a prioritized queue of candidate clarification questions (maximum 5). Do NOT output them all at once. Apply these constraints: - - Maximum of 10 total questions across the whole session. - - Each question must be answerable with EITHER: - * A short multiple‑choice selection (2–5 distinct, mutually exclusive options), OR - * A one-word / short‑phrase answer (explicitly constrain: "Answer in <=5 words"). - - Only include questions whose answers materially impact architecture, data modeling, task decomposition, test design, UX behavior, operational readiness, or compliance validation. - - Ensure category coverage balance: attempt to cover the highest impact unresolved categories first; avoid asking two low-impact questions when a single high-impact area (e.g., security posture) is unresolved. - - Exclude questions already answered, trivial stylistic preferences, or plan-level execution details (unless blocking correctness). - - Favor clarifications that reduce downstream rework risk or prevent misaligned acceptance tests. - - Scale thoroughness to the feature’s complexity: a lightweight enhancement may only need one or two confirmations, while multi-system efforts warrant the full question budget if gaps remain critical. - - If more than 5 categories remain unresolved, select the top 5 by (Impact * Uncertainty) heuristic. - -4. Sequential questioning loop (interactive): - - Present EXACTLY ONE question at a time. - - For multiple-choice questions, list options inline using letter prefixes rather than tables, e.g. - `Options: (A) describe option A · (B) describe option B · (C) describe option C · (D) short custom answer (<=5 words)` - Ask the user to reply with the letter (or short custom text when offered). - - For short-answer style (no meaningful discrete options), output a single line after the question: `Format: Short answer (<=5 words)`. - - After the user answers: - * Validate the answer maps to one option or fits the <=5 word constraint. - * If ambiguous, ask for a quick disambiguation (count still belongs to same question; do not advance). - * Once satisfactory, record it in working memory (do not yet write to disk) and move to the next queued question. - - Stop asking further questions when: - * All critical ambiguities resolved early (remaining queued items become unnecessary), OR - * User signals completion ("done", "good", "no more"), OR - * You reach 5 asked questions. - - Never reveal future queued questions in advance. - - If no valid questions exist at start, immediately report no critical ambiguities. - -5. Integration after EACH accepted answer (incremental update approach): - - Maintain in-memory representation of the spec (loaded once at start) plus the raw file contents. - - For the first integrated answer in this session: - * Ensure a `## Clarifications` section exists (create it just after the highest-level contextual/overview section per the spec template if missing). - * Under it, create (if not present) a `### Session YYYY-MM-DD` subheading for today. - - Append a bullet line immediately after acceptance: `- Q: → A: `. - - Then immediately apply the clarification to the most appropriate section(s): - * Functional ambiguity → Update or add a bullet in Functional Requirements. - * User interaction / actor distinction → Update User Stories or Actors subsection (if present) with clarified role, constraint, or scenario. - * Data shape / entities → Update Data Model (add fields, types, relationships) preserving ordering; note added constraints succinctly. - * Non-functional constraint → Add/modify measurable criteria in Non-Functional / Quality Attributes section (convert vague adjective to metric or explicit target). - * Edge case / negative flow → Add a new bullet under Edge Cases / Error Handling (or create such subsection if template provides placeholder for it). - * Terminology conflict → Normalize term across spec; retain original only if necessary by adding `(formerly referred to as "X")` once. - - If the clarification invalidates an earlier ambiguous statement, replace that statement instead of duplicating; leave no obsolete contradictory text. - - Save the spec file AFTER each integration to minimize risk of context loss (atomic overwrite). - - Preserve formatting: do not reorder unrelated sections; keep heading hierarchy intact. - - Keep each inserted clarification minimal and testable (avoid narrative drift). - -6. Validation (performed after EACH write plus final pass): - - Clarifications session contains exactly one bullet per accepted answer (no duplicates). - - Total asked (accepted) questions ≤ 5. - - Updated sections contain no lingering vague placeholders the new answer was meant to resolve. - - No contradictory earlier statement remains (scan for now-invalid alternative choices removed). - - Markdown structure valid; only allowed new headings: `## Clarifications`, `### Session YYYY-MM-DD`. - - Terminology consistency: same canonical term used across all updated sections. - -7. Write the updated spec back to `FEATURE_SPEC`. - -8. Report completion (after questioning loop ends or early termination): - - Number of questions asked & answered. - - Path to updated spec. - - Sections touched (list names). - - Coverage summary listing each taxonomy category with a status label (Resolved / Deferred / Clear / Outstanding). Present as plain text or bullet list, not a table. - - If any Outstanding or Deferred remain, recommend whether to proceed to `/spec-kitty.plan` or run `/spec-kitty.clarify` again later post-plan. - - Suggested next command. - -Behavior rules: -- If no meaningful ambiguities found (or all potential questions would be low-impact), respond: "No critical ambiguities detected worth formal clarification." and suggest proceeding. -- If spec file missing, instruct user to run `/spec-kitty.specify` first (do not create a new spec here). -- Never exceed 5 total asked questions (clarification retries for a single question do not count as new questions). -- Avoid speculative tech stack questions unless the absence blocks functional clarity. -- Respect user early termination signals ("stop", "done", "proceed"). - - If no questions asked due to full coverage, output a compact coverage summary (all categories Clear) then suggest advancing. - - If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale. - -Context for prioritization: User arguments from $ARGUMENTS section above (if provided). Use these to focus clarification on specific areas of concern mentioned by the user. diff --git a/.github/prompts/spec-kitty.constitution.prompt.md b/.github/prompts/spec-kitty.constitution.prompt.md deleted file mode 100644 index 6c79509b73..0000000000 --- a/.github/prompts/spec-kitty.constitution.prompt.md +++ /dev/null @@ -1,433 +0,0 @@ ---- -description: Create or update the project constitution through interactive phase-based discovery. ---- - -**Path reference rule:** When you mention directories or files, provide either the absolute path or a path relative to the project root (for example, `kitty-specs//tasks/`). Never refer to a folder by name alone. - -*Path: [.kittify/templates/commands/constitution.md](.kittify/templates/commands/constitution.md)* - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - ---- - -## What This Command Does - -This command creates or updates the **project constitution** through an interactive, phase-based discovery workflow. - -**Location**: `.kittify/memory/constitution.md` (project root, not worktrees) -**Scope**: Project-wide principles that apply to ALL features - -**Important**: The constitution is OPTIONAL. All spec-kitty commands work without it. - -**Constitution Purpose**: -- Capture technical standards (languages, testing, deployment) -- Document code quality expectations (review process, quality gates) -- Record tribal knowledge (team conventions, lessons learned) -- Define governance (how the constitution changes, who enforces it) - ---- - -## Discovery Workflow - -This command uses a **4-phase discovery process**: - -1. **Phase 1: Technical Standards** (Recommended) - - Languages, frameworks, testing requirements - - Performance targets, deployment constraints - - ≈3-4 questions, creates a lean foundation - -2. **Phase 2: Code Quality** (Optional) - - PR requirements, review checklist, quality gates - - Documentation standards - - ≈3-4 questions - -3. **Phase 3: Tribal Knowledge** (Optional) - - Team conventions, lessons learned - - Historical decisions (optional) - - ≈2-4 questions - -4. **Phase 4: Governance** (Optional) - - Amendment process, compliance validation - - Exception handling (optional) - - ≈2-3 questions - -**Paths**: -- **Minimal** (≈1 page): Phase 1 only → ≈3-5 questions -- **Comprehensive** (≈2-3 pages): All phases → ≈8-12 questions - ---- - -## Execution Outline - -### Step 1: Initial Choice - -Ask the user: -``` -Do you want to establish a project constitution? - -A) No, skip it - I don't need a formal constitution -B) Yes, minimal - Core technical standards only (≈1 page, 3-5 questions) -C) Yes, comprehensive - Full governance and tribal knowledge (≈2-3 pages, 8-12 questions) -``` - -Handle responses: -- **A (Skip)**: Create a minimal placeholder at `.kittify/memory/constitution.md`: - - Title + short note: "Constitution skipped - not required for spec-kitty usage. Run /spec-kitty.constitution anytime to create one." - - Exit successfully. -- **B (Minimal)**: Continue with Phase 1 only. -- **C (Comprehensive)**: Continue through all phases, asking whether to skip each optional phase. - -### Step 2: Phase 1 - Technical Standards - -Context: -``` -Phase 1: Technical Standards -These are the non-negotiable technical requirements that all features must follow. -This phase is recommended for all projects. -``` - -Ask one question at a time: - -**Q1: Languages and Frameworks** -``` -What languages and frameworks are required for this project? -Examples: -- "Python 3.11+ with FastAPI for backend" -- "TypeScript 4.9+ with React 18 for frontend" -- "Rust 1.70+ with no external dependencies" -``` - -**Q2: Testing Requirements** -``` -What testing framework and coverage requirements? -Examples: -- "pytest with 80% line coverage, 100% for critical paths" -- "Jest with 90% coverage, unit + integration tests required" -- "cargo test, no specific coverage target but all features must have tests" -``` - -**Q3: Performance and Scale Targets** -``` -What are the performance and scale expectations? -Examples: -- "Handle 1000 requests/second at p95 < 200ms" -- "Support 10k concurrent users, 1M daily active users" -- "CLI operations complete in < 2 seconds" -- "N/A - performance not a primary concern" -``` - -**Q4: Deployment and Constraints** -``` -What are the deployment constraints or platform requirements? -Examples: -- "Docker-only, deployed to Kubernetes" -- "Must run on Ubuntu 20.04 LTS without external dependencies" -- "Cross-platform: Linux, macOS, Windows 10+" -- "N/A - no specific deployment constraints" -``` - -### Step 3: Phase 2 - Code Quality (Optional) - -Ask only if comprehensive path is selected: -``` -Phase 2: Code Quality -Skip this if your team uses standard practices without special requirements. - -Do you want to define code quality standards? -A) Yes, ask questions -B) No, skip this phase (use standard practices) -``` - -If yes, ask one at a time: - -**Q5: PR Requirements** -``` -What are the requirements for pull requests? -Examples: -- "2 approvals required, 1 must be from core team" -- "1 approval required, PR must pass CI checks" -- "Self-merge allowed after CI passes for maintainers" -``` - -**Q6: Code Review Checklist** -``` -What should reviewers check during code review? -Examples: -- "Tests added, docstrings updated, follows PEP 8, no security issues" -- "Type annotations present, error handling robust, performance considered" -- "Standard review - correctness, clarity, maintainability" -``` - -**Q7: Quality Gates** -``` -What quality gates must pass before merging? -Examples: -- "All tests pass, coverage ≥80%, linter clean, security scan clean" -- "Tests pass, type checking passes, manual QA approved" -- "CI green, no merge conflicts, PR approved" -``` - -**Q8: Documentation Standards** -``` -What documentation is required? -Examples: -- "All public APIs must have docstrings + examples" -- "README updated for new features, ADRs for architectural decisions" -- "Inline comments for complex logic, keep docs up to date" -- "Minimal - code should be self-documenting" -``` - -### Step 4: Phase 3 - Tribal Knowledge (Optional) - -Ask only if comprehensive path is selected: -``` -Phase 3: Tribal Knowledge -Skip this for new projects or if team conventions are minimal. - -Do you want to capture tribal knowledge? -A) Yes, ask questions -B) No, skip this phase -``` - -If yes, ask: - -**Q9: Team Conventions** -``` -What team conventions or coding styles should everyone follow? -Examples: -- "Use Result for fallible operations, never unwrap() in prod" -- "Prefer composition over inheritance, keep classes small (<200 lines)" -- "Use feature flags for gradual rollouts, never merge half-finished features" -``` - -**Q10: Lessons Learned** -``` -What past mistakes or lessons learned should guide future work? -Examples: -- "Always version APIs from day 1" -- "Write integration tests first" -- "Keep dependencies minimal - every dependency is a liability" -- "N/A - no major lessons yet" -``` - -Optional follow-up: -``` -Do you want to document historical architectural decisions? -A) Yes -B) No -``` - -**Q11: Historical Decisions** (only if yes) -``` -Any historical architectural decisions that should guide future work? -Examples: -- "Chose microservices for independent scaling" -- "Chose monorepo for atomic changes across services" -- "Chose SQLite for simplicity over PostgreSQL" -``` - -### Step 5: Phase 4 - Governance (Optional) - -Ask only if comprehensive path is selected: -``` -Phase 4: Governance -Skip this to use simple defaults. - -Do you want to define governance process? -A) Yes, ask questions -B) No, skip this phase (use simple defaults) -``` - -If skipped, use defaults: -- Amendment: Any team member can propose changes via PR -- Compliance: Team validates during code review -- Exceptions: Discuss with team, document in PR - -If yes, ask: - -**Q12: Amendment Process** -``` -How should the constitution be amended? -Examples: -- "PR with 2 approvals, announce in team chat, 1 week discussion" -- "Any maintainer can update via PR" -- "Quarterly review, team votes on changes" -``` - -**Q13: Compliance Validation** -``` -Who validates that features comply with the constitution? -Examples: -- "Code reviewers check compliance, block merge if violated" -- "Team lead reviews architecture" -- "Self-managed - developers responsible" -``` - -Optional follow-up: -``` -Do you want to define exception handling? -A) Yes -B) No -``` - -**Q14: Exception Handling** (only if yes) -``` -How should exceptions to the constitution be handled? -Examples: -- "Document in ADR, require 3 approvals, set sunset date" -- "Case-by-case discussion, strong justification required" -- "Exceptions discouraged - update constitution instead" -``` - -### Step 6: Summary and Confirmation - -Present a summary and ask for confirmation: -``` -Constitution Summary -==================== - -You've completed [X] phases and answered [Y] questions. -Here's what will be written to .kittify/memory/constitution.md: - -Technical Standards: -- Languages: [Q1] -- Testing: [Q2] -- Performance: [Q3] -- Deployment: [Q4] - -[If Phase 2 completed] -Code Quality: -- PR Requirements: [Q5] -- Review Checklist: [Q6] -- Quality Gates: [Q7] -- Documentation: [Q8] - -[If Phase 3 completed] -Tribal Knowledge: -- Conventions: [Q9] -- Lessons Learned: [Q10] -- Historical Decisions: [Q11 if present] - -Governance: [Custom if Phase 4 completed, otherwise defaults] - -Estimated length: ≈[50-80 lines minimal] or ≈[150-200 lines comprehensive] - -Proceed with writing constitution? -A) Yes, write it -B) No, let me start over -C) Cancel, don't create constitution -``` - -Handle responses: -- **A**: Write the constitution file. -- **B**: Restart from Step 1. -- **C**: Exit without writing. - -### Step 7: Write Constitution File - -Generate the constitution as Markdown: - -```markdown -# [PROJECT_NAME] Constitution - -> Auto-generated by spec-kitty constitution command -> Created: [YYYY-MM-DD] -> Version: 1.0.0 - -## Purpose - -This constitution captures the technical standards, code quality expectations, -tribal knowledge, and governance rules for [PROJECT_NAME]. All features and -pull requests should align with these principles. - -## Technical Standards - -### Languages and Frameworks -[Q1] - -### Testing Requirements -[Q2] - -### Performance and Scale -[Q3] - -### Deployment and Constraints -[Q4] - -[If Phase 2 completed] -## Code Quality - -### Pull Request Requirements -[Q5] - -### Code Review Checklist -[Q6] - -### Quality Gates -[Q7] - -### Documentation Standards -[Q8] - -[If Phase 3 completed] -## Tribal Knowledge - -### Team Conventions -[Q9] - -### Lessons Learned -[Q10] - -[If Q11 present] -### Historical Decisions -[Q11] - -## Governance - -[If Phase 4 completed] -### Amendment Process -[Q12] - -### Compliance Validation -[Q13] - -[If Q14 present] -### Exception Handling -[Q14] - -[If Phase 4 skipped, use defaults] -### Amendment Process -Any team member can propose amendments via pull request. Changes are discussed -and merged following standard PR review process. - -### Compliance Validation -Code reviewers validate compliance during PR review. Constitution violations -should be flagged and addressed before merge. - -### Exception Handling -Exceptions discussed case-by-case with team. Strong justification required. -Consider updating constitution if exceptions become common. -``` - -### Step 8: Success Message - -After writing, provide: -- Location of the file -- Phases completed and questions answered -- Next steps (review, share with team, run /spec-kitty.specify) - ---- - -## Required Behaviors - -- Ask one question at a time. -- Offer skip options and explain when to skip. -- Keep responses concise and user-focused. -- Ensure the constitution stays lean (1-3 pages, not 10 pages). -- If user chooses to skip entirely, still create the minimal placeholder file and exit successfully. diff --git a/.github/prompts/spec-kitty.dashboard.prompt.md b/.github/prompts/spec-kitty.dashboard.prompt.md deleted file mode 100644 index af4eff346a..0000000000 --- a/.github/prompts/spec-kitty.dashboard.prompt.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -description: Open the Spec Kitty dashboard in your browser. ---- - - -## Dashboard Access - -This command launches the Spec Kitty dashboard in your browser using the spec-kitty CLI. - -## What to do - -Simply run the `spec-kitty dashboard` command to: -- Start the dashboard if it's not already running -- Open it in your default web browser -- Display the dashboard URL - -If you need to stop the dashboard, you can use `spec-kitty dashboard --kill`. - -## Implementation - -Execute the following terminal command: - -```bash -spec-kitty dashboard -``` - -## Additional Options - -- To specify a preferred port: `spec-kitty dashboard --port 8080` -- To stop the dashboard: `spec-kitty dashboard --kill` - -## Success Criteria - -- User sees the dashboard URL clearly displayed -- Browser opens automatically to the dashboard -- If browser doesn't open, user gets clear instructions -- Error messages are helpful and actionable diff --git a/.github/prompts/spec-kitty.implement.prompt.md b/.github/prompts/spec-kitty.implement.prompt.md deleted file mode 100644 index cf59f9e163..0000000000 --- a/.github/prompts/spec-kitty.implement.prompt.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -description: Create an isolated workspace (worktree) for implementing a specific work package. ---- - - -## ⚠️ CRITICAL: Working Directory Requirement - -**After running `spec-kitty implement WP##`, you MUST:** - -1. **Run the cd command shown in the output** - e.g., `cd .worktrees/###-feature-WP##/` -2. **ALL file operations happen in this directory** - Read, Write, Edit tools must target files in the workspace -3. **NEVER write deliverable files to the main repository** - This is a critical workflow error - -**Why this matters:** -- Each WP has an isolated worktree with its own branch -- Changes in main repository will NOT be seen by reviewers looking at the WP worktree -- Writing to main instead of the workspace causes review failures and merge conflicts - ---- - -**IMPORTANT**: After running the command below, you'll see a LONG work package prompt (~1000+ lines). - -**You MUST scroll to the BOTTOM** to see the completion command! - -Run this command to get the work package prompt and implementation instructions: - -```bash -spec-kitty agent workflow implement $ARGUMENTS --agent -``` - -**CRITICAL**: You MUST provide `--agent ` to track who is implementing! - -If no WP ID is provided, it will automatically find the first work package with `lane: "planned"` and move it to "doing" for you. - ---- - -## Commit Workflow - -**BEFORE moving to for_review**, you MUST commit your implementation: - -```bash -cd .worktrees/###-feature-WP##/ -git add -A -git commit -m "feat(WP##): " -``` - -**Then move to review:** -```bash -spec-kitty agent tasks move-task WP## --to for_review --note "Ready for review: " -``` - -**Why this matters:** -- `move-task` validates that your worktree has commits beyond main -- Uncommitted changes will block the move to for_review -- This prevents lost work and ensures reviewers see complete implementations - ---- - -**The Python script handles all file updates automatically - no manual editing required!** - -**NOTE**: If `/spec-kitty.status` shows your WP in "doing" after you moved it to "for_review", don't panic - a reviewer may have moved it back (changes requested), or there's a sync delay. Focus on your WP. diff --git a/.github/prompts/spec-kitty.merge.prompt.md b/.github/prompts/spec-kitty.merge.prompt.md deleted file mode 100644 index 9f739a89b4..0000000000 --- a/.github/prompts/spec-kitty.merge.prompt.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -description: Merge a completed feature into the main branch and clean up worktree ---- - - -# /spec-kitty.merge - Merge Feature to Main - -**Version**: 0.11.0+ -**Purpose**: Merge ALL completed work packages for a feature into main branch. - -## CRITICAL: Workspace-per-WP Model (0.11.0) - -In 0.11.0, each work package has its own worktree: -- `.worktrees/###-feature-WP01/` -- `.worktrees/###-feature-WP02/` -- `.worktrees/###-feature-WP03/` - -**Merge merges ALL WP branches at once** (not incrementally one-by-one). - -## ⛔ Location Pre-flight Check (CRITICAL) - -**BEFORE PROCEEDING:** You MUST be in a feature worktree, NOT the main repository. - -Verify your current location: -```bash -pwd -git branch --show-current -``` - -**Expected output:** -- `pwd`: Should end with `.worktrees/###-feature-name-WP01` (or similar feature worktree) -- Branch: Should show your feature branch name like `###-feature-name-WP01` (NOT `main` or `release/*`) - -**If you see:** -- Branch showing `main` or `release/` -- OR pwd shows the main repository root - -⛔ **STOP - DANGER! You are in the wrong location!** - -**Correct the issue:** -1. Navigate to ANY worktree for this feature: `cd .worktrees/###-feature-name-WP01` -2. Verify you're on a feature branch: `git branch --show-current` -3. Then run this merge command again - -**Exception (main branch):** -If you are on `main` and need to merge a workspace-per-WP feature, run: -```bash -spec-kitty merge --feature -``` - ---- - -## Location Pre-flight Check (CRITICAL for AI Agents) - -Before merging, verify you are in the correct working directory by running this validation: - -```bash -python3 -c " -from specify_cli.guards import validate_worktree_location -result = validate_worktree_location() -if not result.is_valid: - print(result.format_error()) - print('\nThis command MUST run from a feature worktree, not the main repository.') - print('\nFor workspace-per-WP features, run from ANY WP worktree:') - print(' cd /path/to/project/.worktrees/-WP01') - print(' # or any other WP worktree for this feature') - raise SystemExit(1) -else: - print('✓ Location verified:', result.branch_name) -" -``` - -**What this validates**: -- Current branch follows the feature pattern like `001-feature-name` or `001-feature-name-WP01` -- You're not attempting to run from `main` or any release branch -- The validator prints clear navigation instructions if you're outside the feature worktree - -**For workspace-per-WP features (0.11.0+)**: -- Run merge from ANY WP worktree (e.g., `.worktrees/014-feature-WP09/`) -- The merge command automatically detects all WP branches and merges them sequentially -- You do NOT need to run merge from each WP worktree individually - -## Prerequisites - -Before running this command: - -1. ✅ All work packages must be in `done` lane (reviewed and approved) -2. ✅ Feature must pass `/spec-kitty.accept` checks -3. ✅ Working directory must be clean (no uncommitted changes in main) -4. ✅ **You must be in main repository root** (not in a worktree) - -## Command Syntax - -```bash -spec-kitty merge ###-feature-slug [OPTIONS] -``` - -**Example**: -```bash -cd /tmp/spec-kitty-test/test-project # Main repo root -spec-kitty merge 001-cli-hello-world -``` - -## What This Command Does - -1. **Detects** your current feature branch and worktree status -2. **Runs** pre-flight validation across all worktrees and the target branch -3. **Determines** merge order based on WP dependencies (workspace-per-WP) -4. **Forecasts** conflicts during `--dry-run` and flags auto-resolvable status files -5. **Verifies** working directory is clean (legacy single-worktree) -6. **Switches** to the target branch (default: `main`) -7. **Updates** the target branch (`git pull --ff-only`) -8. **Merges** the feature using your chosen strategy -9. **Auto-resolves** status file conflicts after each WP merge -10. **Optionally pushes** to origin -11. **Removes** the feature worktree (if in one) -12. **Deletes** the feature branch - -## Usage - -### Basic merge (default: merge commit, cleanup everything) - -```bash -spec-kitty merge -``` - -This will: -- Create a merge commit -- Remove the worktree -- Delete the feature branch -- Keep changes local (no push) - -### Merge with options - -```bash -# Squash all commits into one -spec-kitty merge --strategy squash - -# Push to origin after merging -spec-kitty merge --push - -# Keep the feature branch -spec-kitty merge --keep-branch - -# Keep the worktree -spec-kitty merge --keep-worktree - -# Merge into a different branch -spec-kitty merge --target develop - -# See what would happen without doing it -spec-kitty merge --dry-run - -# Run merge from main for a workspace-per-WP feature -spec-kitty merge --feature 017-feature-slug -``` - -### Common workflows - -```bash -# Feature complete, squash and push -spec-kitty merge --strategy squash --push - -# Keep branch for reference -spec-kitty merge --keep-branch - -# Merge into develop instead of main -spec-kitty merge --target develop --push -``` - -## Merge Strategies - -### `merge` (default) -Creates a merge commit preserving all feature branch commits. -```bash -spec-kitty merge --strategy merge -``` -✅ Preserves full commit history -✅ Clear feature boundaries in git log -❌ More commits in main branch - -### `squash` -Squashes all feature commits into a single commit. -```bash -spec-kitty merge --strategy squash -``` -✅ Clean, linear history on main -✅ Single commit per feature -❌ Loses individual commit details - -### `rebase` -Requires manual rebase first (command will guide you). -```bash -spec-kitty merge --strategy rebase -``` -✅ Linear history without merge commits -❌ Requires manual intervention -❌ Rewrites commit history - -## Options - -| Option | Description | Default | -|--------|-------------|---------| -| `--strategy` | Merge strategy: `merge`, `squash`, or `rebase` | `merge` | -| `--delete-branch` / `--keep-branch` | Delete feature branch after merge | delete | -| `--remove-worktree` / `--keep-worktree` | Remove feature worktree after merge | remove | -| `--push` | Push to origin after merge | no push | -| `--target` | Target branch to merge into | `main` | -| `--dry-run` | Show what would be done without executing | off | -| `--feature` | Feature slug when merging from main branch | none | -| `--resume` | Resume an interrupted merge | off | - -## Worktree Strategy - -Spec Kitty uses an **opinionated worktree approach**: - -### Workspace-per-WP Model (0.11.0+) - -In the current model, each work package gets its own worktree: - -``` -my-project/ # Main repo (main branch) -├── .worktrees/ -│ ├── 001-auth-system-WP01/ # WP01 worktree -│ ├── 001-auth-system-WP02/ # WP02 worktree -│ ├── 001-auth-system-WP03/ # WP03 worktree -│ └── 002-dashboard-WP01/ # Different feature -├── .kittify/ -├── kitty-specs/ -└── ... (main branch files) -``` - -**Merge behavior for workspace-per-WP**: -- Run `spec-kitty merge` from **any** WP worktree for the feature -- The command automatically detects all WP branches (WP01, WP02, WP03, etc.) -- Merges each WP branch into main in sequence -- Cleans up all WP worktrees and branches - -### Legacy Pattern (0.10.x) -``` -my-project/ # Main repo (main branch) -├── .worktrees/ -│ ├── 001-auth-system/ # Feature 1 worktree (single) -│ ├── 002-dashboard/ # Feature 2 worktree (single) -│ └── 003-notifications/ # Feature 3 worktree (single) -├── .kittify/ -├── kitty-specs/ -└── ... (main branch files) -``` - -### The Rules -1. **Main branch** stays in the primary repo root -2. **Feature branches** live in `.worktrees//` -3. **Work on features** happens in their worktrees (isolation) -4. **Merge from worktrees** using this command -5. **Cleanup is automatic** - worktrees removed after merge - -### Why Worktrees? -- ✅ Work on multiple features simultaneously -- ✅ Each feature has its own sandbox -- ✅ No branch switching in main repo -- ✅ Easy to compare features -- ✅ Clean separation of concerns - -### The Flow -``` -1. /spec-kitty.specify → Creates branch + worktree -2. cd .worktrees// → Enter worktree -3. /spec-kitty.plan → Work in isolation -4. /spec-kitty.tasks -5. /spec-kitty.implement -6. /spec-kitty.review -7. /spec-kitty.accept -8. /spec-kitty.merge → Merge + cleanup worktree -9. Back in main repo! → Ready for next feature -``` - -## Error Handling - -### "Already on main branch" -You're not on a feature branch. Switch to your feature branch first: -```bash -cd .worktrees/ -# or -git checkout -``` - -### "Working directory has uncommitted changes" -Commit or stash your changes: -```bash -git add . -git commit -m "Final changes" -# or -git stash -``` - -### "Could not fast-forward main" -Your main branch is behind origin: -```bash -git checkout main -git pull -git checkout -spec-kitty merge -``` - -### "Merge failed - conflicts" -Resolve conflicts manually: -```bash -# Fix conflicts in files -git add -git commit -# Then complete cleanup manually: -git worktree remove .worktrees/ -git branch -d -``` - -## Safety Features - -1. **Clean working directory check** - Won't merge with uncommitted changes -2. **Fast-forward only pull** - Won't proceed if main has diverged -3. **Graceful failure** - If merge fails, you can fix manually -4. **Optional operations** - Push, branch delete, and worktree removal are configurable -5. **Dry run mode** - Preview exactly what will happen - -## Examples - -### Complete feature and push -```bash -cd .worktrees/001-auth-system -/spec-kitty.accept -/spec-kitty.merge --push -``` - -### Squash merge for cleaner history -```bash -spec-kitty merge --strategy squash --push -``` - -### Merge but keep branch for reference -```bash -spec-kitty merge --keep-branch --push -``` - -### Check what will happen first -```bash -spec-kitty merge --dry-run -``` - -## After Merging - -After a successful merge, you're back on the main branch with: -- ✅ Feature code integrated -- ✅ Worktree removed (if it existed) -- ✅ Feature branch deleted (unless `--keep-branch`) -- ✅ Ready to start your next feature! - -## Integration with Accept - -The typical flow is: - -```bash -# 1. Run acceptance checks -/spec-kitty.accept --mode local - -# 2. If checks pass, merge -/spec-kitty.merge --push -``` - -Or combine conceptually: -```bash -# Accept verifies readiness -/spec-kitty.accept --mode local - -# Merge performs integration -/spec-kitty.merge --strategy squash --push -``` - -The `/spec-kitty.accept` command **verifies** your feature is complete. -The `/spec-kitty.merge` command **integrates** your feature into main. - -Together they complete the workflow: -``` -specify → plan → tasks → implement → review → accept → merge ✅ -``` diff --git a/.github/prompts/spec-kitty.plan.prompt.md b/.github/prompts/spec-kitty.plan.prompt.md deleted file mode 100644 index 36e2de1874..0000000000 --- a/.github/prompts/spec-kitty.plan.prompt.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -description: Execute the implementation planning workflow using the plan template to generate design artifacts. ---- - - -# /spec-kitty.plan - Create Implementation Plan - -**Version**: 0.11.0+ - -## 📍 WORKING DIRECTORY: Stay in planning repository - -**IMPORTANT**: Plan works in the planning repository. NO worktrees created. - -```bash -# Run from project root (same directory as /spec-kitty.specify): -# You should already be here if you just ran /spec-kitty.specify - -# Creates: -# - kitty-specs/###-feature/plan.md → In planning repository -# - Commits to target branch -# - NO worktrees created -``` - -**Do NOT cd anywhere**. Stay in the planning repository root. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Location Check (0.11.0+) - -This command runs in the **planning repository**, not in a worktree. - -- Verify you're on the target branch (meta.json → target_branch) before scaffolding plan.md -- Planning artifacts live in `kitty-specs/###-feature/` -- The plan template is committed to the target branch after generation - -**Path reference rule:** When you mention directories or files, provide either the absolute path or a path relative to the project root (for example, `kitty-specs//tasks/`). Never refer to a folder by name alone. - -## Planning Interrogation (mandatory) - -Before executing any scripts or generating artifacts you must interrogate the specification and stakeholders. - -- **Scope proportionality (CRITICAL)**: FIRST, assess the feature's complexity from the spec: - - **Trivial/Test Features** (hello world, simple static pages, basic demos): Ask 1-2 questions maximum about tech stack preference, then proceed with sensible defaults - - **Simple Features** (small components, minor API additions): Ask 2-3 questions about tech choices and constraints - - **Complex Features** (new subsystems, multi-component features): Ask 3-5 questions covering architecture, NFRs, integrations - - **Platform/Critical Features** (core infrastructure, security, payments): Full interrogation with 5+ questions - -- **User signals to reduce questioning**: If the user says "use defaults", "just make it simple", "skip to implementation", "vanilla HTML/CSS/JS" - recognize these as signals to minimize planning questions and use standard approaches. - -- **First response rule**: - - For TRIVIAL features: Ask ONE tech stack question, then if answer is simple (e.g., "vanilla HTML"), proceed directly to plan generation - - For other features: Ask a single architecture question and end with `WAITING_FOR_PLANNING_INPUT` - -- If the user has not provided plan context, keep interrogating with one question at a time. - -- **Conversational cadence**: After each reply, assess if you have SUFFICIENT context for this feature's scope. For trivial features, knowing the basic stack is enough. Only continue if critical unknowns remain. - -Planning requirements (scale to complexity): - -1. Maintain a **Planning Questions** table internally covering questions appropriate to the feature's complexity (1-2 for trivial, up to 5+ for platform-level). Track columns `#`, `Question`, `Why it matters`, and `Current insight`. Do **not** render this table to the user. -2. For trivial features, standard practices are acceptable (vanilla HTML, simple file structure, no build tools). Only probe if the user's request suggests otherwise. -3. When you have sufficient context for the scope, summarize into an **Engineering Alignment** note and confirm. -4. If user explicitly asks to skip questions or use defaults, acknowledge and proceed with best practices for that feature type. - -## Outline - -1. **Check planning discovery status**: - - If any planning questions remain unanswered or the user has not confirmed the **Engineering Alignment** summary, stay in the one-question cadence, capture the user's response, update your internal table, and end with `WAITING_FOR_PLANNING_INPUT`. Do **not** surface the table. Do **not** run the setup command yet. - - Once every planning question has a concrete answer and the alignment summary is confirmed by the user, continue. - -2. **Detect feature context** (CRITICAL - prevents wrong feature selection): - - Before running any commands, detect which feature you're working on: - - a. **Check git branch name**: - - Run: `git rev-parse --abbrev-ref HEAD` - - If branch matches pattern `###-feature-name` or `###-feature-name-WP##`, extract the feature slug (strip `-WP##` suffix if present) - - Example: Branch `020-my-feature` or `020-my-feature-WP01` → Feature `020-my-feature` - - b. **Check current directory**: - - Look for `###-feature-name` pattern in the current path - - Examples: - - Inside `kitty-specs/020-my-feature/` → Feature `020-my-feature` - - Not in a worktree during planning (worktrees only used during implement): If detection runs from `.worktrees/020-my-feature-WP01/` → Feature `020-my-feature` - - c. **Prioritize features without plan.md** (if multiple exist): - - If multiple features exist and none detected from branch/path, list all features in `kitty-specs/` - - Prefer features that don't have `plan.md` yet (unplanned features) - - If ambiguous, ask the user which feature to plan - - d. **Extract feature slug**: - - Feature slug format: `###-feature-name` (e.g., `020-my-feature`) - - You MUST pass this explicitly to the setup-plan command using `--feature` flag - - **DO NOT** rely on auto-detection by the CLI (prevents wrong feature selection) - -3. **Setup**: Run `spec-kitty agent feature setup-plan --feature --json` from the repository root and parse JSON for: - - `result`: "success" or error message - - `plan_file`: Absolute path to the created plan.md - - `feature_dir`: Absolute path to the feature directory - - **Example**: - ```bash - # If detected feature is 020-my-feature: - spec-kitty agent feature setup-plan --feature 020-my-feature --json - ``` - - **Error handling**: If the command fails with "Cannot detect feature" or "Multiple features found", verify your feature detection logic in step 2 and ensure you're passing the correct feature slug. - -4. **Load context**: Read FEATURE_SPEC and `.kittify/memory/constitution.md` if it exists. If the constitution file is missing, skip Constitution Check and note that it is absent. Load IMPL_PLAN template (already copied). - -5. **Execute plan workflow**: Follow the structure in IMPL_PLAN template, using the validated planning answers as ground truth: - - Update Technical Context with explicit statements from the user or discovery research; mark `[NEEDS CLARIFICATION: …]` only when the user deliberately postpones a decision - - If a constitution exists, fill Constitution Check section from it and challenge any conflicts directly with the user. If no constitution exists, mark the section as skipped. - - Evaluate gates (ERROR if violations unjustified or questions remain unanswered) - - Phase 0: Generate research.md (commission research to resolve every outstanding clarification) - - Phase 1: Generate data-model.md, contracts/, quickstart.md based on confirmed intent - - Phase 1: Update agent context by running the agent script - - Re-evaluate Constitution Check post-design, asking the user to resolve new gaps before proceeding - -6. **STOP and report**: This command ends after Phase 1 planning. Report branch, IMPL_PLAN path, and generated artifacts. - - **⚠️ CRITICAL: DO NOT proceed to task generation!** The user must explicitly run `/spec-kitty.tasks` to generate work packages. Your job is COMPLETE after reporting the planning artifacts. - -## Phases - -### Phase 0: Outline & Research - -1. **Extract unknowns from Technical Context** above: - - For each NEEDS CLARIFICATION → research task - - For each dependency → best practices task - - For each integration → patterns task - -2. **Generate and dispatch research agents**: - ``` - For each unknown in Technical Context: - Task: "Research {unknown} for {feature context}" - For each technology choice: - Task: "Find best practices for {tech} in {domain}" - ``` - -3. **Consolidate findings** in `research.md` using format: - - Decision: [what was chosen] - - Rationale: [why chosen] - - Alternatives considered: [what else evaluated] - -**Output**: research.md with all NEEDS CLARIFICATION resolved - -### Phase 1: Design & Contracts - -**Prerequisites:** `research.md` complete - -1. **Extract entities from feature spec** → `data-model.md`: - - Entity name, fields, relationships - - Validation rules from requirements - - State transitions if applicable - -2. **Generate API contracts** from functional requirements: - - For each user action → endpoint - - Use standard REST/GraphQL patterns - - Output OpenAPI/GraphQL schema to `/contracts/` - -3. **Agent context update**: - - Run `` - - These scripts detect which AI agent is in use - - Update the appropriate agent-specific context file - - Add only new technology from current plan - - Preserve manual additions between markers - -**Output**: data-model.md, /contracts/*, quickstart.md, agent-specific file - -## Key rules - -- Use absolute paths -- ERROR on gate failures or unresolved clarifications - ---- - -## ⛔ MANDATORY STOP POINT - -**This command is COMPLETE after generating planning artifacts.** - -After reporting: -- `plan.md` path -- `research.md` path (if generated) -- `data-model.md` path (if generated) -- `contracts/` contents (if generated) -- Agent context file updated - -**YOU MUST STOP HERE.** - -Do NOT: -- ❌ Generate `tasks.md` -- ❌ Create work package (WP) files -- ❌ Create `tasks/` subdirectories -- ❌ Proceed to implementation - -The user will run `/spec-kitty.tasks` when they are ready to generate work packages. - -**Next suggested command**: `/spec-kitty.tasks` (user must invoke this explicitly) diff --git a/.github/prompts/spec-kitty.research.prompt.md b/.github/prompts/spec-kitty.research.prompt.md deleted file mode 100644 index b6bdff8ea7..0000000000 --- a/.github/prompts/spec-kitty.research.prompt.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: Run the Phase 0 research workflow to scaffold research artifacts before task planning. ---- - -**Path reference rule:** When you mention directories or files, provide either the absolute path or a path relative to the project root (for example, `kitty-specs//tasks/`). Never refer to a folder by name alone. - - -*Path: [.kittify/templates/commands/research.md](.kittify/templates/commands/research.md)* - - -## Location Pre-flight Check - -**BEFORE PROCEEDING:** Verify you are working in the feature worktree. - -```bash -pwd -git branch --show-current -``` - -**Expected output:** -- `pwd`: Should end with `.worktrees/001-feature-name` (or similar feature worktree) -- Branch: Should show your feature branch name like `001-feature-name` (NOT `main`) - -**If you see the main branch or main repository path:** - -⛔ **STOP - You are in the wrong location!** - -This command creates research artifacts in your feature directory. You must be in the feature worktree. - -**Correct the issue:** -1. Navigate to your feature worktree: `cd .worktrees/001-feature-name` -2. Verify you're on the correct feature branch: `git branch --show-current` -3. Then run this research command again - ---- - -## What This Command Creates - -When you run `spec-kitty research`, the following files are generated in your feature directory: - -**Generated files**: -- **research.md** – Decisions, rationale, and supporting evidence -- **data-model.md** – Entities, attributes, and relationships -- **research/evidence-log.csv** – Sources and findings audit trail -- **research/source-register.csv** – Reference tracking for all sources - -**Location**: All files go in `kitty-specs/001-feature-name/` - ---- - -## Workflow Context - -**Before this**: `/spec-kitty.plan` calls this as "Phase 0" research phase - -**This command**: -- Scaffolds research artifacts -- Creates templates for capturing decisions and evidence -- Establishes audit trail for traceability - -**After this**: -- Fill in research.md, data-model.md, and CSV logs with actual findings -- Continue with `/spec-kitty.plan` which uses your research to drive technical design - ---- - -## Goal - -Create `research.md`, `data-model.md`, and supporting CSV stubs based on the active mission so implementation planning can reference concrete decisions and evidence. - -## What to do - -1. You should already be in the correct feature worktree (verified above with pre-flight check). -2. Run `spec-kitty research` to generate the mission-specific research artifacts. (Add `--force` only when it is acceptable to overwrite existing drafts.) -3. Open the generated files and fill in the required content: - - `research.md` – capture decisions, rationale, and supporting evidence. - - `data-model.md` – document entities, attributes, and relationships discovered during research. - - `research/evidence-log.csv` & `research/source-register.csv` – log all sources and findings so downstream reviewers can audit the trail. -4. If your research generates additional templates (spreadsheets, notebooks, etc.), store them under `research/` and reference them inside `research.md`. -5. Summarize open questions or risks at the bottom of `research.md`. These should feed directly into `/spec-kitty.tasks` and future implementation prompts. - -## Success Criteria - -- `kitty-specs//research.md` explains every major decision with references to evidence. -- `kitty-specs//data-model.md` lists the entities and relationships needed for implementation. -- CSV logs exist (even if partially filled) so evidence gathering is traceable. -- Outstanding questions from the research phase are tracked and ready for follow-up during planning or execution. diff --git a/.github/prompts/spec-kitty.review.prompt.md b/.github/prompts/spec-kitty.review.prompt.md deleted file mode 100644 index fde47891fc..0000000000 --- a/.github/prompts/spec-kitty.review.prompt.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -description: Perform structured code review and kanban transitions for completed task prompt files ---- - - -**IMPORTANT**: After running the command below, you'll see a LONG work package prompt (~1000+ lines). - -**You MUST scroll to the BOTTOM** to see the completion commands! - -Run this command to get the work package prompt and review instructions: - -```bash -spec-kitty agent workflow review $ARGUMENTS --agent -``` - -**CRITICAL**: You MUST provide `--agent ` to track who is reviewing! - -If no WP ID is provided, it will automatically find the first work package with `lane: "for_review"` and move it to "doing" for you. - -## Dependency checks (required) - -- dependency_check: If the WP frontmatter lists `dependencies`, confirm each dependency WP is merged to main before you review this WP. -- dependent_check: Identify any WPs that list this WP as a dependency and note their current lanes. -- rebase_warning: If you request changes AND any dependents exist, warn those agents to rebase and provide a concrete command (example: `cd .worktrees/FEATURE-WP02 && git rebase FEATURE-WP01`). -- verify_instruction: Confirm dependency declarations match actual code coupling (imports, shared modules, API contracts). - -**After reviewing, scroll to the bottom and run ONE of these commands**: -- ✅ Approve: `spec-kitty agent tasks move-task WP## --to done --note "Review passed: "` -- ❌ Reject: Write feedback to the temp file path shown in the prompt, then run `spec-kitty agent tasks move-task WP## --to planned --review-feedback-file ` - -**The prompt will provide a unique temp file path for feedback - use that exact path to avoid conflicts with other agents!** - -**The Python script handles all file updates automatically - no manual editing required!** diff --git a/.github/prompts/spec-kitty.specify.prompt.md b/.github/prompts/spec-kitty.specify.prompt.md deleted file mode 100644 index cc2735849c..0000000000 --- a/.github/prompts/spec-kitty.specify.prompt.md +++ /dev/null @@ -1,328 +0,0 @@ ---- -description: Create or update the feature specification from a natural language feature description. ---- - - -# /spec-kitty.specify - Create Feature Specification - -**Version**: 0.11.0+ - -## 📍 WORKING DIRECTORY: Stay in planning repository - -**IMPORTANT**: Specify works in the planning repository. NO worktrees are created. - -```bash -# Run from project root: -cd /path/to/project/root # Your planning repository - -# All planning artifacts are created in the planning repo and committed: -# - kitty-specs/###-feature/spec.md → Created in planning repo -# - Committed to target branch (meta.json → target_branch) -# - NO worktrees created -``` - -**Worktrees are created later** during `/spec-kitty.implement`, not during planning. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Discovery Gate (mandatory) - -Before running any scripts or writing to disk you **must** conduct a structured discovery interview. - -- **Scope proportionality (CRITICAL)**: FIRST, gauge the inherent complexity of the request: - - **Trivial/Test Features** (hello world, simple pages, proof-of-concept): Ask 1-2 questions maximum, then proceed. Examples: "a simple hello world page", "tic-tac-toe game", "basic contact form" - - **Simple Features** (small UI additions, minor enhancements): Ask 2-3 questions covering purpose and basic constraints - - **Complex Features** (new subsystems, integrations): Ask 3-5 questions covering goals, users, constraints, risks - - **Platform/Critical Features** (authentication, payments, infrastructure): Full discovery with 5+ questions - -- **User signals to reduce questioning**: If the user says "just testing", "quick prototype", "skip to next phase", "stop asking questions" - recognize this as a signal to minimize discovery and proceed with reasonable defaults. - -- **First response rule**: - - For TRIVIAL features (hello world, simple test): Ask ONE clarifying question, then if the answer confirms it's simple, proceed directly to spec generation - - For other features: Ask a single focused discovery question and end with `WAITING_FOR_DISCOVERY_INPUT` - -- If the user provides no initial description (empty command), stay in **Interactive Interview Mode**: keep probing with one question at a time. - -- **Conversational cadence**: After each user reply, decide if you have ENOUGH context for this feature's complexity level. For trivial features, 1-2 questions is sufficient. Only continue asking if truly necessary for the scope. - -Discovery requirements (scale to feature complexity): - -1. Maintain a **Discovery Questions** table internally covering questions appropriate to the feature's complexity (1-2 for trivial, up to 5+ for complex). Track columns `#`, `Question`, `Why it matters`, and `Current insight`. Do **not** render this table to the user. -2. For trivial features, reasonable defaults are acceptable. Only probe if truly ambiguous. -3. When you have sufficient context for the feature's scope, paraphrase into an **Intent Summary** and confirm. For trivial features, this can be very brief. -4. If user explicitly asks to skip questions or says "just testing", acknowledge and proceed with minimal discovery. - -## Mission Selection - -After completing discovery and confirming the Intent Summary, determine the appropriate mission for this feature. - -### Available Missions - -- **software-dev**: For building software features, APIs, CLI tools, applications - - Phases: research → design → implement → test → review - - Best for: code changes, new features, bug fixes, refactoring - -- **research**: For investigations, literature reviews, technical analysis - - Phases: question → methodology → gather → analyze → synthesize → publish - - Best for: feasibility studies, market research, technology evaluation - -### Mission Inference - -1. **Analyze the feature description** to identify the primary goal: - - Building, coding, implementing, creating software → **software-dev** - - Researching, investigating, analyzing, evaluating → **research** - -2. **Check for explicit mission requests** in the user's description: - - If user mentions "research project", "investigation", "analysis" → use research - - If user mentions "build", "implement", "create feature" → use software-dev - -3. **Confirm with user** (unless explicit): - > "Based on your description, this sounds like a **[software-dev/research]** project. - > I'll use the **[mission name]** mission. Does that work for you?" - -4. **Handle user response**: - - If confirmed: proceed with selected mission - - If user wants different mission: use their choice - -5. **Handle --mission flag**: If the user provides `--mission ` in their command, skip inference and use the specified mission directly. - -Store the final mission selection in your notes and include it in the spec output. Do not pass a `--mission` flag to feature creation. - -## Workflow (0.11.0+) - -**Planning happens in the planning repository - NO worktree created!** - -1. Creates `kitty-specs/###-feature/spec.md` directly in planning repo -2. Automatically commits to target branch -3. No worktree created during specify - -**Worktrees created later**: Use `spec-kitty implement WP##` to create a workspace for each work package. Worktrees are created later during implement (e.g., `.worktrees/###-feature-WP##`). - -## Location - -- Work in: **Planning repository** (not a worktree) -- Creates: `kitty-specs/###-feature/spec.md` -- Commits to: target branch (`meta.json` → `target_branch`) - -## Outline - -### 0. Generate a Friendly Feature Title - -- Summarize the agreed intent into a short, descriptive title (aim for ≤7 words; avoid filler like "feature" or "thing"). -- Read that title back during the Intent Summary and revise it if the user requests changes. -- Use the confirmed title to derive the kebab-case feature slug for the create-feature command. - -The text the user typed after `/spec-kitty.specify` in the triggering message **is** the initial feature description. Capture it verbatim, but treat it only as a starting point for discovery—not the final truth. Your job is to interrogate the request, surface gaps, and co-create a complete specification with the user. - -Given that feature description, do this: - -- **Generation Mode (arguments provided)**: Use the provided text as a starting point, validate it through discovery, and fill gaps with explicit questions or clearly documented assumptions (limit `[NEEDS CLARIFICATION: …]` to at most three critical decisions the user has postponed). -- **Interactive Interview Mode (no arguments)**: Use the discovery interview to elicit all necessary context, synthesize the working feature description, and confirm it with the user before you generate any specification artifacts. - -1. **Check discovery status**: - - If this is your first message or discovery questions remain unanswered, stay in the one-question loop, capture the user's response, update your internal table, and end with `WAITING_FOR_DISCOVERY_INPUT`. Do **not** surface the table; keep it internal. Do **not** call the creation command yet. - - Only proceed once every discovery question has an explicit answer and the user has acknowledged the Intent Summary. - - Empty invocation rule: stay in interview mode until you can restate the agreed-upon feature description. Do **not** call the creation command while the description is missing or provisional. - -2. When discovery is complete and the intent summary, **title**, and **mission** are confirmed, run the feature creation command from repo root: - - ```bash - spec-kitty agent feature create-feature "" --json - ``` - - Where `` is a kebab-case version of the friendly title (e.g., "Checkout Upsell Flow" → "checkout-upsell-flow"). - - The command returns JSON with: - - `result`: "success" or error message - - `feature`: Feature number and slug (e.g., "014-checkout-upsell-flow") - - `feature_dir`: Absolute path to the feature directory inside the main repo - - Parse these values for use in subsequent steps. All file paths are absolute. - - **IMPORTANT**: You must only ever run this command once. The JSON is provided in the terminal output - always refer to it to get the actual paths you're looking for. -3. **Stay in the main repository**: No worktree is created during specify. - -4. The spec template is bundled with spec-kitty at `src/specify_cli/missions/software-dev/.kittify/templates/spec-template.md`. The template defines required sections for software development features. - -5. Create meta.json in the feature directory with: - ```json - { - "feature_number": "", - "slug": "", - "friendly_name": "", - "mission": "", - "source_description": "$ARGUMENTS", - "created_at": "", - "target_branch": "main", - "vcs": "git" - } - ``` - - **CRITICAL**: Always set these fields explicitly: - - `target_branch`: Set to "main" by default (user can change to "2.x" for dual-branch features) - - `vcs`: Set to "git" by default (enables VCS locking and prevents jj fallback) - -6. Generate the specification content by following this flow: - - Use the discovery answers as your authoritative source of truth (do **not** rely on raw `$ARGUMENTS`) - - For empty invocations, treat the synthesized interview summary as the canonical feature description - - Identify: actors, actions, data, constraints, motivations, success metrics - - For any remaining ambiguity: - * Ask the user a focused follow-up question immediately and halt work until they answer - * Only use `[NEEDS CLARIFICATION: …]` when the user explicitly defers the decision - * Record any interim assumption in the Assumptions section - * Prioritize clarifications by impact: scope > outcomes > risks/security > user experience > technical details - - Fill User Scenarios & Testing section (ERROR if no clear user flow can be determined) - - Generate Functional Requirements (each requirement must be testable) - - Define Success Criteria (measurable, technology-agnostic outcomes) - - Identify Key Entities (if data involved) - -7. Write the specification to `/spec.md` using the template structure, replacing placeholders with concrete details derived from the feature description while preserving section order and headings. - -8. **Specification Quality Validation**: After writing the initial spec, validate it against quality criteria: - - a. **Create Spec Quality Checklist**: Generate a checklist file at `FEATURE_DIR/checklists/requirements.md` using the checklist template structure with these validation items: - - ```markdown - # Specification Quality Checklist: [FEATURE NAME] - - **Purpose**: Validate specification completeness and quality before proceeding to planning - **Created**: [DATE] - **Feature**: [Link to spec.md] - - ## Content Quality - - - [ ] No implementation details (languages, frameworks, APIs) - - [ ] Focused on user value and business needs - - [ ] Written for non-technical stakeholders - - [ ] All mandatory sections completed - - ## Requirement Completeness - - - [ ] No [NEEDS CLARIFICATION] markers remain - - [ ] Requirements are testable and unambiguous - - [ ] Success criteria are measurable - - [ ] Success criteria are technology-agnostic (no implementation details) - - [ ] All acceptance scenarios are defined - - [ ] Edge cases are identified - - [ ] Scope is clearly bounded - - [ ] Dependencies and assumptions identified - - ## Feature Readiness - - - [ ] All functional requirements have clear acceptance criteria - - [ ] User scenarios cover primary flows - - [ ] Feature meets measurable outcomes defined in Success Criteria - - [ ] No implementation details leak into specification - - ## Notes - - - Items marked incomplete require spec updates before `/spec-kitty.clarify` or `/spec-kitty.plan` - ``` - - b. **Run Validation Check**: Review the spec against each checklist item: - - For each item, determine if it passes or fails - - Document specific issues found (quote relevant spec sections) - - c. **Handle Validation Results**: - - - **If all items pass**: Mark checklist complete and proceed to step 6 - - - **If items fail (excluding [NEEDS CLARIFICATION])**: - 1. List the failing items and specific issues - 2. Update the spec to address each issue - 3. Re-run validation until all items pass (max 3 iterations) - 4. If still failing after 3 iterations, document remaining issues in checklist notes and warn user - - - **If [NEEDS CLARIFICATION] markers remain**: - 1. Extract all [NEEDS CLARIFICATION: ...] markers from the spec - 2. Re-confirm with the user whether each outstanding decision truly needs to stay unresolved. Do not assume away critical gaps. - 3. For each clarification the user has explicitly deferred, present options using plain text—no tables: - - ``` - Question [N]: [Topic] - Context: [Quote relevant spec section] - Need: [Specific question from NEEDS CLARIFICATION marker] - Options: (A) [First answer — implications] · (B) [Second answer — implications] · (C) [Third answer — implications] · (D) Custom (describe your own answer) - Reply with a letter or a custom answer. - ``` - - 4. Number questions sequentially (Q1, Q2, Q3 - max 3 total) - 5. Present all questions together before waiting for responses - 6. Wait for user to respond with their choices for all questions (e.g., "Q1: A, Q2: Custom - [details], Q3: B") - 7. Update the spec by replacing each [NEEDS CLARIFICATION] marker with the user's selected or provided answer - 9. Re-run validation after all clarifications are resolved - - d. **Update Checklist**: After each validation iteration, update the checklist file with current pass/fail status - -9. Report completion with feature directory, spec file path, checklist results, and readiness for the next phase (`/spec-kitty.clarify` or `/spec-kitty.plan`). - -**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing. - -## General Guidelines - -## Quick Guidelines - -- Focus on **WHAT** users need and **WHY**. -- Avoid HOW to implement (no tech stack, APIs, code structure). -- Written for business stakeholders, not developers. -- DO NOT create any checklists that are embedded in the spec. That will be a separate command. - -### Section Requirements - -- **Mandatory sections**: Must be completed for every feature -- **Optional sections**: Include only when relevant to the feature -- When a section doesn't apply, remove it entirely (don't leave as "N/A") - -### For AI Generation - -When creating this spec from a user prompt: - -1. **Make informed guesses**: Use context, industry standards, and common patterns to fill gaps -2. **Document assumptions**: Record reasonable defaults in the Assumptions section -3. **Limit clarifications**: Maximum 3 [NEEDS CLARIFICATION] markers - use only for critical decisions that: - - Significantly impact feature scope or user experience - - Have multiple reasonable interpretations with different implications - - Lack any reasonable default -4. **Prioritize clarifications**: scope > security/privacy > user experience > technical details -5. **Think like a tester**: Every vague requirement should fail the "testable and unambiguous" checklist item -6. **Common areas needing clarification** (only if no reasonable default exists): - - Feature scope and boundaries (include/exclude specific use cases) - - User types and permissions (if multiple conflicting interpretations possible) - - Security/compliance requirements (when legally/financially significant) - -**Examples of reasonable defaults** (don't ask about these): - -- Data retention: Industry-standard practices for the domain -- Performance targets: Standard web/mobile app expectations unless specified -- Error handling: User-friendly messages with appropriate fallbacks -- Authentication method: Standard session-based or OAuth2 for web apps -- Integration patterns: RESTful APIs unless specified otherwise - -### Success Criteria Guidelines - -Success criteria must be: - -1. **Measurable**: Include specific metrics (time, percentage, count, rate) -2. **Technology-agnostic**: No mention of frameworks, languages, databases, or tools -3. **User-focused**: Describe outcomes from user/business perspective, not system internals -4. **Verifiable**: Can be tested/validated without knowing implementation details - -**Good examples**: - -- "Users can complete checkout in under 3 minutes" -- "System supports 10,000 concurrent users" -- "95% of searches return results in under 1 second" -- "Task completion rate improves by 40%" - -**Bad examples** (implementation-focused): - -- "API response time is under 200ms" (too technical, use "Users see results instantly") -- "Database can handle 1000 TPS" (implementation detail, use user-facing metric) -- "React components render efficiently" (framework-specific) -- "Redis cache hit rate above 80%" (technology-specific) diff --git a/.github/prompts/spec-kitty.status.prompt.md b/.github/prompts/spec-kitty.status.prompt.md deleted file mode 100644 index 8776b1ca64..0000000000 --- a/.github/prompts/spec-kitty.status.prompt.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -description: Display kanban board status showing work package progress across lanes (planned/doing/for_review/done). ---- - - -## Status Board - -Show the current status of all work packages in the active feature. This displays: -- Kanban board with WPs organized by lane -- Progress bar showing completion percentage -- Parallelization opportunities (which WPs can run concurrently) -- Next steps recommendations - -## When to Use - -- Before starting work (see what's ready to implement) -- During implementation (track overall progress) -- After completing a WP (see what's next) -- When planning parallelization (identify independent WPs) - -## Implementation - -Run the CLI command to display the status board: - -```bash -spec-kitty agent tasks status -``` - -To specify a feature explicitly: - -```bash -spec-kitty agent tasks status --feature 012-documentation-mission -``` - -The command displays a rich kanban board with: -- Progress bar showing completion percentage -- Work packages organized by lane (planned/doing/for_review/done) -- Summary metrics - -## Alternative: Python API - -For programmatic access (e.g., in Jupyter notebooks or scripts), use the Python function: - -```python -from specify_cli.agent_utils.status import show_kanban_status - -# Auto-detect feature from current directory/branch -result = show_kanban_status() - -# Or specify feature explicitly: -# result = show_kanban_status("012-documentation-mission") -``` - -Returns structured data: - -```python -{ - 'feature_slug': '012-documentation-mission', - 'progress_percentage': 80.0, - 'done_count': 8, - 'total_wps': 10, - 'by_lane': { - 'planned': ['WP09'], - 'doing': ['WP10'], - 'for_review': [], - 'done': ['WP01', 'WP02', ...] - }, - 'parallelization': { - 'ready_wps': [...], - 'can_parallelize': True/False, - 'parallel_groups': [...] - } -} - -## Output Example - -``` -╭─────────────────────────────────────────────────────────────────────╮ -│ 012-documentation-mission │ -│ Progress: 80% [████████░░] │ -╰─────────────────────────────────────────────────────────────────────╯ - -┌─────────────┬─────────────┬─────────────┬─────────────┐ -│ PLANNED │ DOING │ FOR_REVIEW │ DONE │ -├─────────────┼─────────────┼─────────────┼─────────────┤ -│ WP09 │ WP10 │ │ WP01 │ -│ │ │ │ WP02 │ -│ │ │ │ WP03 │ -│ │ │ │ ... │ -└─────────────┴─────────────┴─────────────┴─────────────┘ - -🔀 Parallelization: WP09 can start (no dependencies) -``` diff --git a/.github/prompts/spec-kitty.tasks.prompt.md b/.github/prompts/spec-kitty.tasks.prompt.md deleted file mode 100644 index e170ee580e..0000000000 --- a/.github/prompts/spec-kitty.tasks.prompt.md +++ /dev/null @@ -1,577 +0,0 @@ ---- -description: Generate grouped work packages with actionable subtasks and matching prompt files for the feature in one pass. ---- - - -# /spec-kitty.tasks - Generate Work Packages - -**Version**: 0.11.0+ - -## ⚠️ CRITICAL: THIS IS THE MOST IMPORTANT PLANNING WORK - -**You are creating the blueprint for implementation**. The quality of work packages determines: -- How easily agents can implement the feature -- How parallelizable the work is -- How reviewable the code will be -- Whether the feature succeeds or fails - -**QUALITY OVER SPEED**: This is NOT the time to save tokens or rush. Take your time to: -- Understand the full scope deeply -- Break work into clear, manageable pieces -- Write detailed, actionable guidance -- Think through risks and edge cases - -**Token usage is EXPECTED and GOOD here**. A thorough task breakdown saves 10x the effort during implementation. Do not cut corners. - ---- - -## 📍 WORKING DIRECTORY: Stay in planning repository - -**IMPORTANT**: Tasks works in the planning repository. NO worktrees created. - -```bash -# Run from project root (same directory as /spec-kitty.plan): -# You should already be here if you just ran /spec-kitty.plan - -# Creates: -# - kitty-specs/###-feature/tasks/WP01-*.md → In planning repository -# - kitty-specs/###-feature/tasks/WP02-*.md → In planning repository -# - Commits ALL to target branch -# - NO worktrees created -``` - -**Do NOT cd anywhere**. Stay in the planning repository root. - -**Worktrees created later**: After tasks are generated, use `spec-kitty implement WP##` to create workspace for each WP. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Location Check (0.11.0+) - -Before proceeding, verify you are in the planning repository: - -**Check your current branch:** -```bash -git branch --show-current -``` - -**Expected output:** the target branch (meta.json → target_branch), typically `main` or `2.x` -**If you see a feature branch:** You're in the wrong place. Return to the target branch: -```bash -cd $(git rev-parse --show-toplevel) -git checkout -``` - -Work packages are generated directly in `kitty-specs/###-feature/` and committed to the target branch. Worktrees are created later when implementing each work package. - -## Outline - -1. **Setup**: Run `spec-kitty agent feature check-prerequisites --json --paths-only --include-tasks` from the repository root and capture `FEATURE_DIR` plus `AVAILABLE_DOCS`. All paths must be absolute. - - **CRITICAL**: The command returns JSON with `FEATURE_DIR` as an ABSOLUTE path (e.g., `/Users/robert/Code/new_specify/kitty-specs/001-feature-name`). - - **YOU MUST USE THIS PATH** for ALL subsequent file operations. Example: - ``` - FEATURE_DIR = "/Users/robert/Code/new_specify/kitty-specs/001-a-simple-hello" - tasks.md location: FEATURE_DIR + "/tasks.md" - prompt location: FEATURE_DIR + "/tasks/WP01-slug.md" - ``` - - **DO NOT CREATE** paths like: - - ❌ `tasks/WP01-slug.md` (missing FEATURE_DIR prefix) - - ❌ `/tasks/WP01-slug.md` (wrong root) - - ❌ `FEATURE_DIR/tasks/planned/WP01-slug.md` (WRONG - no subdirectories!) - - ❌ `WP01-slug.md` (wrong directory) - -2. **Load design documents** from `FEATURE_DIR` (only those present): - - **Required**: plan.md (tech architecture, stack), spec.md (user stories & priorities) - - **Optional**: data-model.md (entities), contracts/ (API schemas), research.md (decisions), quickstart.md (validation scenarios) - - Scale your effort to the feature: simple UI tweaks deserve lighter coverage, multi-system releases require deeper decomposition. - -3. **Derive fine-grained subtasks** (IDs `T001`, `T002`, ...): - - Parse plan/spec to enumerate concrete implementation steps, tests (only if explicitly requested), migrations, and operational work. - - Capture prerequisites, dependencies, and parallelizability markers (`[P]` means safe to parallelize per file/concern). - - Maintain the subtask list internally; it feeds the work-package roll-up and the prompts. - -4. **Roll subtasks into work packages** (IDs `WP01`, `WP02`, ...): - - **IDEAL WORK PACKAGE SIZE** (most important guideline): - - **Target: 3-7 subtasks per WP** (results in 200-500 line prompts) - - **Maximum: 10 subtasks per WP** (results in ~700 line prompts) - - **If more than 10 subtasks needed**: Create additional WPs, don't pack them in - - **WHY SIZE MATTERS**: - - **Too large** (>10 subtasks, >700 lines): Agents get overwhelmed, skip details, make mistakes - - **Too small** (<3 subtasks, <150 lines): Overhead of worktree creation not worth it - - **Just right** (3-7 subtasks, 200-500 lines): Agent can hold entire context, implements thoroughly - - **NUMBER OF WPs**: Let the work dictate the count - - Simple feature (5-10 subtasks total): 2-3 WPs - - Medium feature (20-40 subtasks): 5-8 WPs - - Complex feature (50+ subtasks): 10-20 WPs ← **This is OK!** - - **Better to have 20 focused WPs than 5 overwhelming WPs** - - **GROUPING PRINCIPLES**: - - Each WP should be independently implementable - - Root in a single user story or cohesive subsystem - - Ensure every subtask appears in exactly one work package - - Name with succinct goal (e.g., "User Story 1 – Real-time chat happy path") - - Record metadata: priority, success criteria, risks, dependencies, included subtasks - -5. **Write `tasks.md`** using the bundled tasks template (`src/specify_cli/missions/software-dev/.kittify/templates/tasks-template.md`): - - **Location**: Write to `FEATURE_DIR/tasks.md` (use the absolute FEATURE_DIR path from step 1) - - Populate the Work Package sections (setup, foundational, per-story, polish) with the `WPxx` entries - - Under each work package include: - - Summary (goal, priority, independent test) - - Included subtasks (checkbox list referencing `Txxx`) - - Implementation sketch (high-level sequence) - - Parallel opportunities, dependencies, and risks - - Preserve the checklist style so implementers can mark progress - -6. **Generate prompt files (one per work package)**: - - **CRITICAL PATH RULE**: All work package files MUST be created in a FLAT `FEATURE_DIR/tasks/` directory, NOT in subdirectories! - - Correct structure: `FEATURE_DIR/tasks/WPxx-slug.md` (flat, no subdirectories) - - WRONG (do not create): `FEATURE_DIR/tasks/planned/`, `FEATURE_DIR/tasks/doing/`, or ANY lane subdirectories - - WRONG (do not create): `/tasks/`, `tasks/`, or any path not under FEATURE_DIR - - Ensure `FEATURE_DIR/tasks/` exists (create as flat directory, NO subdirectories) - - For each work package: - - Derive a kebab-case slug from the title; filename: `WPxx-slug.md` - - Full path example: `FEATURE_DIR/tasks/WP01-create-html-page.md` (use ABSOLUTE path from FEATURE_DIR variable) - - Use the bundled task prompt template (`src/specify_cli/missions/software-dev/.kittify/templates/task-prompt-template.md`) to capture: - - Frontmatter with `work_package_id`, `subtasks` array, `lane: "planned"`, `dependencies`, history entry - - Objective, context, detailed guidance per subtask - - Test strategy (only if requested) - - Definition of Done, risks, reviewer guidance - - Update `tasks.md` to reference the prompt filename - - **TARGET PROMPT SIZE**: 200-500 lines per WP (results from 3-7 subtasks) - - **MAXIMUM PROMPT SIZE**: 700 lines per WP (10 subtasks max) - - **If prompts are >700 lines**: Split the WP - it's too large - - **IMPORTANT**: All WP files live in flat `tasks/` directory. Lane status is tracked ONLY in the `lane:` frontmatter field, NOT by directory location. Agents can change lanes by editing the `lane:` field directly or using `spec-kitty agent tasks move-task`. - -7. **Finalize tasks with dependency parsing and commit**: - After generating all WP prompt files, run the finalization command to: - - Parse dependencies from tasks.md - - Update WP frontmatter with dependencies field - - Validate dependencies (check for cycles, invalid references) - - Commit all tasks to target branch - - **CRITICAL**: Run this command from repo root: - ```bash - spec-kitty agent feature finalize-tasks --json - ``` - - This step is MANDATORY for workspace-per-WP features. Without it: - - Dependencies won't be in frontmatter - - Agents won't know which --base flag to use - - Tasks won't be committed to target branch - - **IMPORTANT - DO NOT COMMIT AGAIN AFTER THIS COMMAND**: - - finalize-tasks COMMITS the files automatically - - JSON output includes "commit_created": true/false and "commit_hash" - - If commit_created=true, files are ALREADY committed - do not run git commit again - - Other dirty files shown by 'git status' (templates, config) are UNRELATED - - Verify using the commit_hash from JSON output, not by running git add/commit again - -8. **Report**: Provide a concise outcome summary: - - Path to `tasks.md` - - Work package count and per-package subtask tallies - - **Average prompt size** (estimate lines per WP) - - **Validation**: Flag if any WP has >10 subtasks or >700 estimated lines - - Parallelization highlights - - MVP scope recommendation (usually Work Package 1) - - Prompt generation stats (files written, directory structure, any skipped items with rationale) - - Finalization status (dependencies parsed, X WP files updated, committed to target branch) - - Next suggested command (e.g., `/spec-kitty.analyze` or `/spec-kitty.implement`) - -Context for work-package planning: $ARGUMENTS - -The combination of `tasks.md` and the bundled prompt files must enable a new engineer to pick up any work package and deliver it end-to-end without further specification spelunking. - -## Dependency Detection (0.11.0+) - -**Parse dependencies from tasks.md structure**: - -The LLM should analyze tasks.md for dependency relationships: -- Explicit phrases: "Depends on WP##", "Dependencies: WP##" -- Phase grouping: Phase 2 WPs typically depend on Phase 1 -- Default to empty if unclear - -**Generate dependencies in WP frontmatter**: - -Each WP prompt file MUST include a `dependencies` field: -```yaml ---- -work_package_id: "WP02" -title: "Build API" -lane: "planned" -dependencies: ["WP01"] # Generated from tasks.md -subtasks: ["T001", "T002"] ---- -``` - -**Include the correct implementation command**: -- No dependencies: `spec-kitty implement WP01` -- With dependencies: `spec-kitty implement WP02 --base WP01` - -The WP prompt must show the correct command so agents don't branch from the wrong base. - -## Work Package Sizing Guidelines (CRITICAL) - -### Ideal WP Size - -**Target: 3-7 subtasks per WP** -- Results in 200-500 line prompt files -- Agent can hold entire context in working memory -- Clear scope - easy to review -- Parallelizable - multiple agents can work simultaneously - -**Examples of well-sized WPs**: -- WP01: Foundation Setup (5 subtasks, ~300 lines) - - T001: Create database schema - - T002: Set up migration system - - T003: Create base models - - T004: Add validation layer - - T005: Write foundation tests - -- WP02: User Authentication (6 subtasks, ~400 lines) - - T006: Implement login endpoint - - T007: Implement logout endpoint - - T008: Add session management - - T009: Add password reset flow - - T010: Write auth tests - - T011: Add rate limiting - -### Maximum WP Size - -**Hard limit: 10 subtasks, ~700 lines** -- Beyond this, agents start making mistakes -- Prompts become overwhelming -- Reviews take too long -- Integration risk increases - -**If you need more than 10 subtasks**: SPLIT into multiple WPs. - -### Number of WPs: No Arbitrary Limit - -**DO NOT limit based on WP count. Limit based on SIZE.** - -- ✅ **20 WPs of 5 subtasks each** = 100 subtasks, manageable prompts -- ❌ **5 WPs of 20 subtasks each** = 100 subtasks, overwhelming 1400-line prompts - -**Feature complexity scales with subtask count, not WP count**: -- Simple feature: 10-15 subtasks → 2-4 WPs -- Medium feature: 30-50 subtasks → 6-10 WPs -- Complex feature: 80-120 subtasks → 15-20 WPs ← **Totally fine!** -- Very complex: 150+ subtasks → 25-30 WPs ← **Also fine!** - -**The goal is manageable WP size, not minimizing WP count.** - -### When to Split a WP - -**Split if ANY of these are true**: -- More than 10 subtasks -- Prompt would exceed 700 lines -- Multiple independent concerns mixed together -- Different phases or priorities mixed -- Agent would need to switch contexts multiple times - -**How to split**: -- By phase: Foundation WP01, Implementation WP02, Testing WP03 -- By component: Database WP01, API WP02, UI WP03 -- By user story: Story 1 WP01, Story 2 WP02, Story 3 WP03 -- By type of work: Code WP01, Tests WP02, Migration WP03, Docs WP04 - -### When to Merge WPs - -**Merge if ALL of these are true**: -- Each WP has <3 subtasks -- Combined would be <7 subtasks -- Both address the same concern/component -- No natural parallelization opportunity -- Implementation is highly coupled - -**Don't merge just to hit a WP count target!** - -## Task Generation Rules - -**Tests remain optional**. Only include testing tasks/steps if the feature spec or user explicitly demands them. - -1. **Subtask derivation**: - - Assign IDs `Txxx` sequentially in execution order. - - Use `[P]` for parallel-safe items (different files/components). - - Include migrations, data seeding, observability, and operational chores. - - **Ideal subtask granularity**: One clear action (e.g., "Create user model", "Add login endpoint") - - **Too granular**: "Add import statement", "Fix typo" (bundle these) - - **Too coarse**: "Build entire API" (split into endpoints) - -2. **Work package grouping**: - - **Focus on SIZE first, count second** - - Target 3-7 subtasks per WP (200-500 line prompts) - - Maximum 10 subtasks per WP (700 line prompts) - - Keep each work package laser-focused on a single goal - - Avoid mixing unrelated concerns - - **Let complexity dictate WP count**: 20+ WPs is fine for complex features - -3. **Prioritisation & dependencies**: - - Sequence work packages: setup → foundational → story phases (priority order) → polish. - - Call out inter-package dependencies explicitly in both `tasks.md` and the prompts. - - Front-load infrastructure/foundation WPs (enable parallelization) - -4. **Prompt composition**: - - Mirror subtask order inside the prompt. - - Provide actionable implementation and test guidance per subtask—short for trivial work, exhaustive for complex flows. - - **Aim for 30-70 lines per subtask** in the prompt (includes purpose, steps, files, validation) - - Surface risks, integration points, and acceptance gates clearly so reviewers know what to verify. - - Include examples where helpful (API request/response shapes, config file structures, test cases) - -5. **Quality checkpoints**: - - After drafting WPs, review each prompt size estimate - - If any WP >700 lines: **STOP and split it** - - If most WPs <200 lines: Consider merging related ones - - Aim for consistency: Most WPs should be similar size (within 200-line range) - - **Think like an implementer**: Can I complete this WP in one focused session? If not, it's too big. - -6. **Think like a reviewer**: Any vague requirement should be tightened until a reviewer can objectively mark it done or not done. - -## Step-by-Step Process - -### Step 1: Setup - -Run `spec-kitty agent feature check-prerequisites --json --paths-only --include-tasks` and capture `FEATURE_DIR`. - -### Step 2: Load Design Documents - -Read from `FEATURE_DIR`: -- spec.md (required) -- plan.md (required) -- data-model.md (optional) -- research.md (optional) -- contracts/ (optional) - -### Step 3: Derive ALL Subtasks - -Create complete list of subtasks with IDs T001, T002, etc. - -**Don't worry about count yet - capture EVERYTHING needed.** - -### Step 4: Group into Work Packages - -**SIZING ALGORITHM**: - -``` -For each cohesive unit of work: - 1. List related subtasks - 2. Count subtasks - 3. Estimate prompt lines (subtasks × 50 lines avg) - - If subtasks <= 7 AND estimated lines <= 500: - ✓ Good WP size - create it - - Else if subtasks > 10 OR estimated lines > 700: - ✗ Too large - split into 2+ WPs - - Else if subtasks < 3 AND can merge with related WP: - → Consider merging (but don't force it) -``` - -**Examples**: - -**Good sizing**: -- WP01: Database Foundation (5 subtasks, ~300 lines) ✓ -- WP02: User Authentication (7 subtasks, ~450 lines) ✓ -- WP03: Admin Dashboard (6 subtasks, ~400 lines) ✓ - -**Too large - MUST SPLIT**: -- ❌ WP01: Entire Backend (25 subtasks, ~1500 lines) - - ✓ Split into: DB Layer (5), Business Logic (6), API Layer (7), Auth (7) - -**Too small - CONSIDER MERGING**: -- WP01: Add config file (2 subtasks, ~100 lines) -- WP02: Add logging (2 subtasks, ~120 lines) - - ✓ Merge into: WP01: Infrastructure Setup (4 subtasks, ~220 lines) - -### Step 5: Write tasks.md - -Create work package sections with: -- Summary (goal, priority, test criteria) -- Included subtasks (checkbox list) -- Implementation notes -- Parallel opportunities -- Dependencies -- **Estimated prompt size** (e.g., "~400 lines") - -### Step 6: Generate WP Prompt Files - -For each WP, generate `FEATURE_DIR/tasks/WPxx-slug.md` using the template. - -**CRITICAL VALIDATION**: After generating each prompt: -1. Count lines in the prompt -2. If >700 lines: GO BACK and split the WP -3. If >1000 lines: **STOP - this will fail** - you MUST split it - -**Self-check**: -- Subtask count: 3-7? ✓ | 8-10? ⚠️ | 11+? ❌ SPLIT -- Estimated lines: 200-500? ✓ | 500-700? ⚠️ | 700+? ❌ SPLIT -- Can implement in one session? ✓ | Multiple sessions needed? ❌ SPLIT - -### Step 7: Finalize Tasks - -Run `spec-kitty agent feature finalize-tasks --json` to: -- Parse dependencies -- Update frontmatter -- Validate (cycles, invalid refs) -- Commit to target branch - -**DO NOT run git commit after this** - finalize-tasks commits automatically. -Check JSON output for "commit_created": true and "commit_hash" to verify. - -### Step 8: Report - -Provide summary with: -- WP count and subtask tallies -- **Size distribution** (e.g., "6 WPs ranging from 250-480 lines") -- **Size validation** (e.g., "✓ All WPs within ideal range" OR "⚠️ WP05 is 820 lines - consider splitting") -- Parallelization opportunities -- MVP scope -- Next command - -## Dependency Detection (0.11.0+) - -**Parse dependencies from tasks.md structure**: - -The LLM should analyze tasks.md for dependency relationships: -- Explicit phrases: "Depends on WP##", "Dependencies: WP##" -- Phase grouping: Phase 2 WPs typically depend on Phase 1 -- Default to empty if unclear - -**Generate dependencies in WP frontmatter**: - -Each WP prompt file MUST include a `dependencies` field: -```yaml ---- -work_package_id: "WP02" -title: "Build API" -lane: "planned" -dependencies: ["WP01"] # Generated from tasks.md -subtasks: ["T001", "T002"] ---- -``` - -**Include the correct implementation command**: -- No dependencies: `spec-kitty implement WP01` -- With dependencies: `spec-kitty implement WP02 --base WP01` - -The WP prompt must show the correct command so agents don't branch from the wrong base. - -## ⚠️ Common Mistakes to Avoid - -### ❌ MISTAKE 1: Optimizing for WP Count - -**Bad thinking**: "I'll create exactly 5-7 WPs to keep it manageable" -→ Results in: 20 subtasks per WP, 1200-line prompts, overwhelmed agents - -**Good thinking**: "Each WP should be 3-7 subtasks (200-500 lines). If that means 15 WPs, that's fine." -→ Results in: Focused WPs, successful implementation, happy agents - -### ❌ MISTAKE 2: Token Conservation During Planning - -**Bad thinking**: "I'll save tokens by writing brief prompts with minimal guidance" -→ Results in: Agents confused during implementation, asking clarifying questions, doing work wrong, requiring rework - -**Good thinking**: "I'll invest tokens now to write thorough prompts with examples and edge cases" -→ Results in: Agents implement correctly the first time, no rework needed, net token savings - -### ❌ MISTAKE 3: Mixing Unrelated Concerns - -**Bad example**: WP03: Misc Backend Work (12 subtasks) -- T010: Add user model -- T011: Configure logging -- T012: Set up email service -- T013: Add admin dashboard -- ... (8 more unrelated tasks) - -**Good approach**: Split by concern -- WP03: User Management (T010-T013, 4 subtasks) -- WP04: Infrastructure Services (T014-T017, 4 subtasks) -- WP05: Admin Dashboard (T018-T021, 4 subtasks) - -### ❌ MISTAKE 4: Insufficient Prompt Detail - -**Bad prompt** (~20 lines per subtask): -```markdown -### Subtask T001: Add user authentication - -**Purpose**: Implement login - -**Steps**: -1. Create endpoint -2. Add validation -3. Test it -``` - -**Good prompt** (~60 lines per subtask): -```markdown -### Subtask T001: Implement User Login Endpoint - -**Purpose**: Create POST /api/auth/login endpoint that validates credentials and returns JWT token. - -**Steps**: -1. Create endpoint handler in `src/api/auth.py`: - - Route: POST /api/auth/login - - Request body: `{email: string, password: string}` - - Response: `{token: string, user: UserProfile}` on success - - Error codes: 400 (invalid input), 401 (bad credentials), 429 (rate limited) - -2. Implement credential validation: - - Hash password with bcrypt (matches registration hash) - - Compare against stored hash from database - - Use constant-time comparison to prevent timing attacks - -3. Generate JWT token on success: - - Include: user_id, email, issued_at, expires_at (24 hours) - - Sign with SECRET_KEY from environment - - Algorithm: HS256 - -4. Add rate limiting: - - Max 5 attempts per IP per 15 minutes - - Return 429 with Retry-After header - -**Files**: -- `src/api/auth.py` (new file, ~80 lines) -- `tests/api/test_auth.py` (new file, ~120 lines) - -**Validation**: -- [ ] Valid credentials return 200 with token -- [ ] Invalid credentials return 401 -- [ ] Missing fields return 400 -- [ ] Rate limit enforced (test with 6 requests) -- [ ] JWT token is valid and contains correct claims -- [ ] Token expires after 24 hours - -**Edge Cases**: -- Account doesn't exist: Return 401 (same as wrong password - don't leak info) -- Empty password: Return 400 -- SQL injection in email field: Prevented by parameterized queries -- Concurrent login attempts: Handle with database locking -``` - -## Remember - -**This is the most important planning work you'll do.** - -A well-crafted set of work packages with detailed prompts makes implementation smooth and parallelizable. - -A rushed job with vague, oversized WPs causes: -- Agents getting stuck -- Implementation taking 2-3x longer -- Rework and review cycles -- Feature failure - -**Invest the tokens now. Be thorough. Future agents will thank you.** diff --git a/.kilocode/workflows/spec-kitty.accept.md b/.kilocode/workflows/spec-kitty.accept.md deleted file mode 100644 index b3b718ccd4..0000000000 --- a/.kilocode/workflows/spec-kitty.accept.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -description: Validate feature readiness and guide final acceptance steps. ---- - - -# /spec-kitty.accept - Validate Feature Readiness - -**Version**: 0.11.0+ -**Purpose**: Validate all work packages are complete and feature is ready to merge. - -## 📍 WORKING DIRECTORY: Run from MAIN repository - -**IMPORTANT**: Accept runs from the main repository root, NOT from a WP worktree. - -```bash -# If you're in a worktree, return to main first: -cd $(git rev-parse --show-toplevel) - -# Then run accept: -spec-kitty accept -``` - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Discovery (mandatory) - -Before running the acceptance workflow, gather the following: - -1. **Feature slug** (e.g., `005-awesome-thing`). If omitted, detect automatically. -2. **Acceptance mode**: - - `pr` when the feature will merge via hosted pull request. - - `local` when the feature will merge locally without a PR. - - `checklist` to run the readiness checklist without committing or producing merge instructions. -3. **Validation commands executed** (tests/builds). Collect each command verbatim; omit if none. -4. **Acceptance actor** (optional, defaults to the current agent name). - -Ask one focused question per item and confirm the summary before continuing. End the discovery turn with `WAITING_FOR_ACCEPTANCE_INPUT` until all answers are provided. - -## Execution Plan - -1. Compile the acceptance options into an argument list: - - Always include `--actor "kilocode"`. - - Append `--feature ""` when the user supplied a slug. - - Append `--mode ` (`pr`, `local`, or `checklist`). - - Append `--test ""` for each validation command provided. -2. Run `(Missing script command for sh)` (the CLI wrapper) with the assembled arguments **and** `--json`. -3. Parse the JSON response. It contains: - - `summary.ok` (boolean) and other readiness details. - - `summary.outstanding` categories when issues remain. - - `instructions` (merge steps) and `cleanup_instructions`. - - `notes` (e.g., acceptance commit hash). -4. Present the outcome: - - If `summary.ok` is `false`, list each outstanding category with bullet points and advise the user to resolve them before retrying acceptance. - - If `summary.ok` is `true`, display: - - Acceptance timestamp, actor, and (if present) acceptance commit hash. - - Merge instructions and cleanup instructions as ordered steps. - - Validation commands executed (if any). -5. When the mode is `checklist`, make it clear no commits or merge instructions were produced. - -## Output Requirements - -- Summaries must be in plain text (no tables). Use short bullet lists for instructions. -- Surface outstanding issues before any congratulations or success messages. -- If the JSON payload includes warnings, surface them under an explicit **Warnings** section. -- Never fabricate results; only report what the JSON contains. - -## Error Handling - -- If the command fails or returns invalid JSON, report the failure and request user guidance (do not retry automatically). -- When outstanding issues exist, do **not** attempt to force acceptance—return the checklist and prompt the user to fix the blockers. diff --git a/.kilocode/workflows/spec-kitty.analyze.md b/.kilocode/workflows/spec-kitty.analyze.md deleted file mode 100644 index e2cd797d48..0000000000 --- a/.kilocode/workflows/spec-kitty.analyze.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -description: Perform a non-destructive cross-artifact consistency and quality analysis across spec.md, plan.md, and tasks.md after task generation. ---- - - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Goal - -Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/tasks` has successfully produced a complete `tasks.md`. - -## Operating Constraints - -**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually). - -**Constitution Authority**: The project constitution (`/.kittify/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/analyze`. - -## Execution Steps - -### 1. Initialize Analysis Context - -Run `(Missing script command for sh)` once from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS. Derive absolute paths: - -- SPEC = FEATURE_DIR/spec.md -- PLAN = FEATURE_DIR/plan.md -- TASKS = FEATURE_DIR/tasks.md - -Abort with an error message if any required file is missing (instruct the user to run missing prerequisite command). - -### 2. Load Artifacts (Progressive Disclosure) - -Load only the minimal necessary context from each artifact: - -**From spec.md:** - -- Overview/Context -- Functional Requirements -- Non-Functional Requirements -- User Stories -- Edge Cases (if present) - -**From plan.md:** - -- Architecture/stack choices -- Data Model references -- Phases -- Technical constraints - -**From tasks.md:** - -- Task IDs -- Descriptions -- Phase grouping -- Parallel markers [P] -- Referenced file paths - -**From constitution:** - -- Load `/.kittify/memory/constitution.md` for principle validation - -### 3. Build Semantic Models - -Create internal representations (do not include raw artifacts in output): - -- **Requirements inventory**: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" → `user-can-upload-file`) -- **User story/action inventory**: Discrete user actions with acceptance criteria -- **Task coverage mapping**: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases) -- **Constitution rule set**: Extract principle names and MUST/SHOULD normative statements - -### 4. Detection Passes (Token-Efficient Analysis) - -Focus on high-signal findings. Limit to 50 findings total; aggregate remainder in overflow summary. - -#### A. Duplication Detection - -- Identify near-duplicate requirements -- Mark lower-quality phrasing for consolidation - -#### B. Ambiguity Detection - -- Flag vague adjectives (fast, scalable, secure, intuitive, robust) lacking measurable criteria -- Flag unresolved placeholders (TODO, TKTK, ???, ``, etc.) - -#### C. Underspecification - -- Requirements with verbs but missing object or measurable outcome -- User stories missing acceptance criteria alignment -- Tasks referencing files or components not defined in spec/plan - -#### D. Constitution Alignment - -- Any requirement or plan element conflicting with a MUST principle -- Missing mandated sections or quality gates from constitution - -#### E. Coverage Gaps - -- Requirements with zero associated tasks -- Tasks with no mapped requirement/story -- Non-functional requirements not reflected in tasks (e.g., performance, security) - -#### F. Inconsistency - -- Terminology drift (same concept named differently across files) -- Data entities referenced in plan but absent in spec (or vice versa) -- Task ordering contradictions (e.g., integration tasks before foundational setup tasks without dependency note) -- Conflicting requirements (e.g., one requires Next.js while other specifies Vue) - -### 5. Severity Assignment - -Use this heuristic to prioritize findings: - -- **CRITICAL**: Violates constitution MUST, missing core spec artifact, or requirement with zero coverage that blocks baseline functionality -- **HIGH**: Duplicate or conflicting requirement, ambiguous security/performance attribute, untestable acceptance criterion -- **MEDIUM**: Terminology drift, missing non-functional task coverage, underspecified edge case -- **LOW**: Style/wording improvements, minor redundancy not affecting execution order - -### 6. Produce Compact Analysis Report - -Output a Markdown report (no file writes) with the following structure: - -## Specification Analysis Report - -| ID | Category | Severity | Location(s) | Summary | Recommendation | -|----|----------|----------|-------------|---------|----------------| -| A1 | Duplication | HIGH | spec.md:L120-134 | Two similar requirements ... | Merge phrasing; keep clearer version | - -(Add one row per finding; generate stable IDs prefixed by category initial.) - -**Coverage Summary Table:** - -| Requirement Key | Has Task? | Task IDs | Notes | -|-----------------|-----------|----------|-------| - -**Constitution Alignment Issues:** (if any) - -**Unmapped Tasks:** (if any) - -**Metrics:** - -- Total Requirements -- Total Tasks -- Coverage % (requirements with >=1 task) -- Ambiguity Count -- Duplication Count -- Critical Issues Count - -### 7. Provide Next Actions - -At end of report, output a concise Next Actions block: - -- If CRITICAL issues exist: Recommend resolving before `/implement` -- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions -- Provide explicit command suggestions: e.g., "Run /spec-kitty.specify with refinement", "Run /plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'" - -### 8. Offer Remediation - -Ask the user: "Would you like me to suggest concrete remediation edits for the top N issues?" (Do NOT apply them automatically.) - -## Operating Principles - -### Context Efficiency - -- **Minimal high-signal tokens**: Focus on actionable findings, not exhaustive documentation -- **Progressive disclosure**: Load artifacts incrementally; don't dump all content into analysis -- **Token-efficient output**: Limit findings table to 50 rows; summarize overflow -- **Deterministic results**: Rerunning without changes should produce consistent IDs and counts - -### Analysis Guidelines - -- **NEVER modify files** (this is read-only analysis) -- **NEVER hallucinate missing sections** (if absent, report them accurately) -- **Prioritize constitution violations** (these are always CRITICAL) -- **Use examples over exhaustive rules** (cite specific instances, not generic patterns) -- **Report zero issues gracefully** (emit success report with coverage statistics) - -## Context - -$ARGUMENTS diff --git a/.kilocode/workflows/spec-kitty.checklist.md b/.kilocode/workflows/spec-kitty.checklist.md deleted file mode 100644 index 97228e12f3..0000000000 --- a/.kilocode/workflows/spec-kitty.checklist.md +++ /dev/null @@ -1,287 +0,0 @@ ---- -description: Generate a custom checklist for the current feature based on user requirements. ---- - - -## Checklist Purpose: "Unit Tests for English" - -**CRITICAL CONCEPT**: Checklists are **UNIT TESTS FOR REQUIREMENTS WRITING** - they validate the quality, clarity, and completeness of requirements in a given domain. - -**NOT for verification/testing**: -- ❌ NOT "Verify the button clicks correctly" -- ❌ NOT "Test error handling works" -- ❌ NOT "Confirm the API returns 200" -- ❌ NOT checking if code/implementation matches the spec - -**FOR requirements quality validation**: -- ✅ "Are visual hierarchy requirements defined for all card types?" (completeness) -- ✅ "Is 'prominent display' quantified with specific sizing/positioning?" (clarity) -- ✅ "Are hover state requirements consistent across all interactive elements?" (consistency) -- ✅ "Are accessibility requirements defined for keyboard navigation?" (coverage) -- ✅ "Does the spec define what happens when logo image fails to load?" (edge cases) - -**Metaphor**: If your spec is code written in English, the checklist is its unit test suite. You're testing whether the requirements are well-written, complete, unambiguous, and ready for implementation - NOT whether the implementation works. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Execution Steps - -1. **Setup**: Run `(Missing script command for sh)` from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS list. - - All file paths must be absolute. - -2. **Clarify intent (dynamic)**: Derive up to THREE initial contextual clarifying questions (no pre-baked catalog). They MUST: - - Be generated from the user's phrasing + extracted signals from spec/plan/tasks - - Only ask about information that materially changes checklist content - - Be skipped individually if already unambiguous in `$ARGUMENTS` - - Prefer precision over breadth - - Generation algorithm: - 1. Extract signals: feature domain keywords (e.g., auth, latency, UX, API), risk indicators ("critical", "must", "compliance"), stakeholder hints ("QA", "review", "security team"), and explicit deliverables ("a11y", "rollback", "contracts"). - 2. Cluster signals into candidate focus areas (max 4) ranked by relevance. - 3. Identify probable audience & timing (author, reviewer, QA, release) if not explicit. - 4. Detect missing dimensions: scope breadth, depth/rigor, risk emphasis, exclusion boundaries, measurable acceptance criteria. - 5. Formulate questions chosen from these archetypes: - - Scope refinement (e.g., "Should this include integration touchpoints with X and Y or stay limited to local module correctness?") - - Risk prioritization (e.g., "Which of these potential risk areas should receive mandatory gating checks?") - - Depth calibration (e.g., "Is this a lightweight pre-commit sanity list or a formal release gate?") - - Audience framing (e.g., "Will this be used by the author only or peers during PR review?") - - Boundary exclusion (e.g., "Should we explicitly exclude performance tuning items this round?") - - Scenario class gap (e.g., "No recovery flows detected—are rollback / partial failure paths in scope?") - - Question formatting rules: - - If presenting options, generate a compact table with columns: Option | Candidate | Why It Matters - - Limit to A–E options maximum; omit table if a free-form answer is clearer - - Never ask the user to restate what they already said - - Avoid speculative categories (no hallucination). If uncertain, ask explicitly: "Confirm whether X belongs in scope." - - Defaults when interaction impossible: - - Depth: Standard - - Audience: Reviewer (PR) if code-related; Author otherwise - - Focus: Top 2 relevance clusters - - Output the questions (label Q1/Q2/Q3). After answers: if ≥2 scenario classes (Alternate / Exception / Recovery / Non-Functional domain) remain unclear, you MAY ask up to TWO more targeted follow‑ups (Q4/Q5) with a one-line justification each (e.g., "Unresolved recovery path risk"). Do not exceed five total questions. Skip escalation if user explicitly declines more. - -3. **Understand user request**: Combine `$ARGUMENTS` + clarifying answers: - - Derive checklist theme (e.g., security, review, deploy, ux) - - Consolidate explicit must-have items mentioned by user - - Map focus selections to category scaffolding - - Infer any missing context from spec/plan/tasks (do NOT hallucinate) - -4. **Load feature context**: Read from FEATURE_DIR: - - spec.md: Feature requirements and scope - - plan.md (if exists): Technical details, dependencies - - tasks.md (if exists): Implementation tasks - - **Context Loading Strategy**: - - Load only necessary portions relevant to active focus areas (avoid full-file dumping) - - Prefer summarizing long sections into concise scenario/requirement bullets - - Use progressive disclosure: add follow-on retrieval only if gaps detected - - If source docs are large, generate interim summary items instead of embedding raw text - -5. **Generate checklist** - Create "Unit Tests for Requirements": - - Create `FEATURE_DIR/checklists/` directory if it doesn't exist - - Generate unique checklist filename: - - Use short, descriptive name based on domain (e.g., `ux.md`, `api.md`, `security.md`) - - Format: `[domain].md` - - If file exists, append to existing file - - Number items sequentially starting from CHK001 - - Each `/spec-kitty.checklist` run creates a NEW file (never overwrites existing checklists) - - **CORE PRINCIPLE - Test the Requirements, Not the Implementation**: - Every checklist item MUST evaluate the REQUIREMENTS THEMSELVES for: - - **Completeness**: Are all necessary requirements present? - - **Clarity**: Are requirements unambiguous and specific? - - **Consistency**: Do requirements align with each other? - - **Measurability**: Can requirements be objectively verified? - - **Coverage**: Are all scenarios/edge cases addressed? - - **Category Structure** - Group items by requirement quality dimensions: - - **Requirement Completeness** (Are all necessary requirements documented?) - - **Requirement Clarity** (Are requirements specific and unambiguous?) - - **Requirement Consistency** (Do requirements align without conflicts?) - - **Acceptance Criteria Quality** (Are success criteria measurable?) - - **Scenario Coverage** (Are all flows/cases addressed?) - - **Edge Case Coverage** (Are boundary conditions defined?) - - **Non-Functional Requirements** (Performance, Security, Accessibility, etc. - are they specified?) - - **Dependencies & Assumptions** (Are they documented and validated?) - - **Ambiguities & Conflicts** (What needs clarification?) - - **HOW TO WRITE CHECKLIST ITEMS - "Unit Tests for English"**: - - ❌ **WRONG** (Testing implementation): - - "Verify landing page displays 3 episode cards" - - "Test hover states work on desktop" - - "Confirm logo click navigates home" - - ✅ **CORRECT** (Testing requirements quality): - - "Are the exact number and layout of featured episodes specified?" [Completeness] - - "Is 'prominent display' quantified with specific sizing/positioning?" [Clarity] - - "Are hover state requirements consistent across all interactive elements?" [Consistency] - - "Are keyboard navigation requirements defined for all interactive UI?" [Coverage] - - "Is the fallback behavior specified when logo image fails to load?" [Edge Cases] - - "Are loading states defined for asynchronous episode data?" [Completeness] - - "Does the spec define visual hierarchy for competing UI elements?" [Clarity] - - **ITEM STRUCTURE**: - Each item should follow this pattern: - - Question format asking about requirement quality - - Focus on what's WRITTEN (or not written) in the spec/plan - - Include quality dimension in brackets [Completeness/Clarity/Consistency/etc.] - - Reference spec section `[Spec §X.Y]` when checking existing requirements - - Use `[Gap]` marker when checking for missing requirements - - **EXAMPLES BY QUALITY DIMENSION**: - - Completeness: - - "Are error handling requirements defined for all API failure modes? [Gap]" - - "Are accessibility requirements specified for all interactive elements? [Completeness]" - - "Are mobile breakpoint requirements defined for responsive layouts? [Gap]" - - Clarity: - - "Is 'fast loading' quantified with specific timing thresholds? [Clarity, Spec §NFR-2]" - - "Are 'related episodes' selection criteria explicitly defined? [Clarity, Spec §FR-5]" - - "Is 'prominent' defined with measurable visual properties? [Ambiguity, Spec §FR-4]" - - Consistency: - - "Do navigation requirements align across all pages? [Consistency, Spec §FR-10]" - - "Are card component requirements consistent between landing and detail pages? [Consistency]" - - Coverage: - - "Are requirements defined for zero-state scenarios (no episodes)? [Coverage, Edge Case]" - - "Are concurrent user interaction scenarios addressed? [Coverage, Gap]" - - "Are requirements specified for partial data loading failures? [Coverage, Exception Flow]" - - Measurability: - - "Are visual hierarchy requirements measurable/testable? [Acceptance Criteria, Spec §FR-1]" - - "Can 'balanced visual weight' be objectively verified? [Measurability, Spec §FR-2]" - - **Scenario Classification & Coverage** (Requirements Quality Focus): - - Check if requirements exist for: Primary, Alternate, Exception/Error, Recovery, Non-Functional scenarios - - For each scenario class, ask: "Are [scenario type] requirements complete, clear, and consistent?" - - If scenario class missing: "Are [scenario type] requirements intentionally excluded or missing? [Gap]" - - Include resilience/rollback when state mutation occurs: "Are rollback requirements defined for migration failures? [Gap]" - - **Traceability Requirements**: - - MINIMUM: ≥80% of items MUST include at least one traceability reference - - Each item should reference: spec section `[Spec §X.Y]`, or use markers: `[Gap]`, `[Ambiguity]`, `[Conflict]`, `[Assumption]` - - If no ID system exists: "Is a requirement & acceptance criteria ID scheme established? [Traceability]" - - **Surface & Resolve Issues** (Requirements Quality Problems): - Ask questions about the requirements themselves: - - Ambiguities: "Is the term 'fast' quantified with specific metrics? [Ambiguity, Spec §NFR-1]" - - Conflicts: "Do navigation requirements conflict between §FR-10 and §FR-10a? [Conflict]" - - Assumptions: "Is the assumption of 'always available podcast API' validated? [Assumption]" - - Dependencies: "Are external podcast API requirements documented? [Dependency, Gap]" - - Missing definitions: "Is 'visual hierarchy' defined with measurable criteria? [Gap]" - - **Content Consolidation**: - - Soft cap: If raw candidate items > 40, prioritize by risk/impact - - Merge near-duplicates checking the same requirement aspect - - If >5 low-impact edge cases, create one item: "Are edge cases X, Y, Z addressed in requirements? [Coverage]" - - **🚫 ABSOLUTELY PROHIBITED** - These make it an implementation test, not a requirements test: - - ❌ Any item starting with "Verify", "Test", "Confirm", "Check" + implementation behavior - - ❌ References to code execution, user actions, system behavior - - ❌ "Displays correctly", "works properly", "functions as expected" - - ❌ "Click", "navigate", "render", "load", "execute" - - ❌ Test cases, test plans, QA procedures - - ❌ Implementation details (frameworks, APIs, algorithms) - - **✅ REQUIRED PATTERNS** - These test requirements quality: - - ✅ "Are [requirement type] defined/specified/documented for [scenario]?" - - ✅ "Is [vague term] quantified/clarified with specific criteria?" - - ✅ "Are requirements consistent between [section A] and [section B]?" - - ✅ "Can [requirement] be objectively measured/verified?" - - ✅ "Are [edge cases/scenarios] addressed in requirements?" - - ✅ "Does the spec define [missing aspect]?" - -6. **Structure Reference**: Generate the checklist following the canonical template in `.kittify/templates/checklist-template.md` for title, meta section, category headings, and ID formatting. If template is unavailable, use: H1 title, purpose/created meta lines, `##` category sections containing `- [ ] CHK### ` lines with globally incrementing IDs starting at CHK001. - -7. **Report**: Output full path to created checklist, item count, and remind user that each run creates a new file. Summarize: - - Focus areas selected - - Depth level - - Actor/timing - - Any explicit user-specified must-have items incorporated - -**Important**: Each `/spec-kitty.checklist` command invocation creates a checklist file using short, descriptive names unless file already exists. This allows: - -- Multiple checklists of different types (e.g., `ux.md`, `test.md`, `security.md`) -- Simple, memorable filenames that indicate checklist purpose -- Easy identification and navigation in the `checklists/` folder - -To avoid clutter, use descriptive types and clean up obsolete checklists when done. - -## Example Checklist Types & Sample Items - -**UX Requirements Quality:** `ux.md` - -Sample items (testing the requirements, NOT the implementation): -- "Are visual hierarchy requirements defined with measurable criteria? [Clarity, Spec §FR-1]" -- "Is the number and positioning of UI elements explicitly specified? [Completeness, Spec §FR-1]" -- "Are interaction state requirements (hover, focus, active) consistently defined? [Consistency]" -- "Are accessibility requirements specified for all interactive elements? [Coverage, Gap]" -- "Is fallback behavior defined when images fail to load? [Edge Case, Gap]" -- "Can 'prominent display' be objectively measured? [Measurability, Spec §FR-4]" - -**API Requirements Quality:** `api.md` - -Sample items: -- "Are error response formats specified for all failure scenarios? [Completeness]" -- "Are rate limiting requirements quantified with specific thresholds? [Clarity]" -- "Are authentication requirements consistent across all endpoints? [Consistency]" -- "Are retry/timeout requirements defined for external dependencies? [Coverage, Gap]" -- "Is versioning strategy documented in requirements? [Gap]" - -**Performance Requirements Quality:** `performance.md` - -Sample items: -- "Are performance requirements quantified with specific metrics? [Clarity]" -- "Are performance targets defined for all critical user journeys? [Coverage]" -- "Are performance requirements under different load conditions specified? [Completeness]" -- "Can performance requirements be objectively measured? [Measurability]" -- "Are degradation requirements defined for high-load scenarios? [Edge Case, Gap]" - -**Security Requirements Quality:** `security.md` - -Sample items: -- "Are authentication requirements specified for all protected resources? [Coverage]" -- "Are data protection requirements defined for sensitive information? [Completeness]" -- "Is the threat model documented and requirements aligned to it? [Traceability]" -- "Are security requirements consistent with compliance obligations? [Consistency]" -- "Are security failure/breach response requirements defined? [Gap, Exception Flow]" - -## Anti-Examples: What NOT To Do - -**❌ WRONG - These test implementation, not requirements:** - -```markdown -- [ ] CHK001 - Verify landing page displays 3 episode cards [Spec §FR-001] -- [ ] CHK002 - Test hover states work correctly on desktop [Spec §FR-003] -- [ ] CHK003 - Confirm logo click navigates to home page [Spec §FR-010] -- [ ] CHK004 - Check that related episodes section shows 3-5 items [Spec §FR-005] -``` - -**✅ CORRECT - These test requirements quality:** - -```markdown -- [ ] CHK001 - Are the number and layout of featured episodes explicitly specified? [Completeness, Spec §FR-001] -- [ ] CHK002 - Are hover state requirements consistently defined for all interactive elements? [Consistency, Spec §FR-003] -- [ ] CHK003 - Are navigation requirements clear for all clickable brand elements? [Clarity, Spec §FR-010] -- [ ] CHK004 - Is the selection criteria for related episodes documented? [Gap, Spec §FR-005] -- [ ] CHK005 - Are loading state requirements defined for asynchronous episode data? [Gap] -- [ ] CHK006 - Can "visual hierarchy" requirements be objectively measured? [Measurability, Spec §FR-001] -``` - -**Key Differences:** -- Wrong: Tests if the system works correctly -- Correct: Tests if the requirements are written correctly -- Wrong: Verification of behavior -- Correct: Validation of requirement quality -- Wrong: "Does it do X?" -- Correct: "Is X clearly specified?" diff --git a/.kilocode/workflows/spec-kitty.clarify.md b/.kilocode/workflows/spec-kitty.clarify.md deleted file mode 100644 index 6cc7b09ae5..0000000000 --- a/.kilocode/workflows/spec-kitty.clarify.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -description: Identify underspecified areas in the current feature spec by asking up to 5 highly targeted clarification questions and encoding answers back into the spec. ---- - - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Outline - -Goal: Detect and reduce ambiguity or missing decision points in the active feature specification and record the clarifications directly in the spec file. - -Note: This clarification workflow is expected to run (and be completed) BEFORE invoking `/spec-kitty.plan`. If the user explicitly states they are skipping clarification (e.g., exploratory spike), you may proceed, but must warn that downstream rework risk increases. - -Execution steps: - -1. Run `spec-kitty agent feature check-prerequisites --json --paths-only` from the repository root and parse JSON for: - - `FEATURE_DIR` - Absolute path to feature directory (e.g., `/path/to/kitty-specs/017-my-feature/`) - - `FEATURE_SPEC` - Absolute path to spec.md file - - If command fails or JSON parsing fails, abort and instruct user to run `/spec-kitty.specify` first or verify they are in a spec-kitty-initialized repository. - -2. Load the current spec file. Perform a structured ambiguity & coverage scan using this taxonomy. For each category, mark status: Clear / Partial / Missing. Produce an internal coverage map used for prioritization (do not output raw map unless no questions will be asked). - - Functional Scope & Behavior: - - Core user goals & success criteria - - Explicit out-of-scope declarations - - User roles / personas differentiation - - Domain & Data Model: - - Entities, attributes, relationships - - Identity & uniqueness rules - - Lifecycle/state transitions - - Data volume / scale assumptions - - Interaction & UX Flow: - - Critical user journeys / sequences - - Error/empty/loading states - - Accessibility or localization notes - - Non-Functional Quality Attributes: - - Performance (latency, throughput targets) - - Scalability (horizontal/vertical, limits) - - Reliability & availability (uptime, recovery expectations) - - Observability (logging, metrics, tracing signals) - - Security & privacy (authN/Z, data protection, threat assumptions) - - Compliance / regulatory constraints (if any) - - Integration & External Dependencies: - - External services/APIs and failure modes - - Data import/export formats - - Protocol/versioning assumptions - - Edge Cases & Failure Handling: - - Negative scenarios - - Rate limiting / throttling - - Conflict resolution (e.g., concurrent edits) - - Constraints & Tradeoffs: - - Technical constraints (language, storage, hosting) - - Explicit tradeoffs or rejected alternatives - - Terminology & Consistency: - - Canonical glossary terms - - Avoided synonyms / deprecated terms - - Completion Signals: - - Acceptance criteria testability - - Measurable Definition of Done style indicators - - Misc / Placeholders: - - TODO markers / unresolved decisions - - Ambiguous adjectives ("robust", "intuitive") lacking quantification - - For each category with Partial or Missing status, add a candidate question opportunity unless: - - Clarification would not materially change implementation or validation strategy - - Information is better deferred to planning phase (note internally) - -3. Generate (internally) a prioritized queue of candidate clarification questions (maximum 5). Do NOT output them all at once. Apply these constraints: - - Maximum of 10 total questions across the whole session. - - Each question must be answerable with EITHER: - * A short multiple‑choice selection (2–5 distinct, mutually exclusive options), OR - * A one-word / short‑phrase answer (explicitly constrain: "Answer in <=5 words"). - - Only include questions whose answers materially impact architecture, data modeling, task decomposition, test design, UX behavior, operational readiness, or compliance validation. - - Ensure category coverage balance: attempt to cover the highest impact unresolved categories first; avoid asking two low-impact questions when a single high-impact area (e.g., security posture) is unresolved. - - Exclude questions already answered, trivial stylistic preferences, or plan-level execution details (unless blocking correctness). - - Favor clarifications that reduce downstream rework risk or prevent misaligned acceptance tests. - - Scale thoroughness to the feature’s complexity: a lightweight enhancement may only need one or two confirmations, while multi-system efforts warrant the full question budget if gaps remain critical. - - If more than 5 categories remain unresolved, select the top 5 by (Impact * Uncertainty) heuristic. - -4. Sequential questioning loop (interactive): - - Present EXACTLY ONE question at a time. - - For multiple-choice questions, list options inline using letter prefixes rather than tables, e.g. - `Options: (A) describe option A · (B) describe option B · (C) describe option C · (D) short custom answer (<=5 words)` - Ask the user to reply with the letter (or short custom text when offered). - - For short-answer style (no meaningful discrete options), output a single line after the question: `Format: Short answer (<=5 words)`. - - After the user answers: - * Validate the answer maps to one option or fits the <=5 word constraint. - * If ambiguous, ask for a quick disambiguation (count still belongs to same question; do not advance). - * Once satisfactory, record it in working memory (do not yet write to disk) and move to the next queued question. - - Stop asking further questions when: - * All critical ambiguities resolved early (remaining queued items become unnecessary), OR - * User signals completion ("done", "good", "no more"), OR - * You reach 5 asked questions. - - Never reveal future queued questions in advance. - - If no valid questions exist at start, immediately report no critical ambiguities. - -5. Integration after EACH accepted answer (incremental update approach): - - Maintain in-memory representation of the spec (loaded once at start) plus the raw file contents. - - For the first integrated answer in this session: - * Ensure a `## Clarifications` section exists (create it just after the highest-level contextual/overview section per the spec template if missing). - * Under it, create (if not present) a `### Session YYYY-MM-DD` subheading for today. - - Append a bullet line immediately after acceptance: `- Q: → A: `. - - Then immediately apply the clarification to the most appropriate section(s): - * Functional ambiguity → Update or add a bullet in Functional Requirements. - * User interaction / actor distinction → Update User Stories or Actors subsection (if present) with clarified role, constraint, or scenario. - * Data shape / entities → Update Data Model (add fields, types, relationships) preserving ordering; note added constraints succinctly. - * Non-functional constraint → Add/modify measurable criteria in Non-Functional / Quality Attributes section (convert vague adjective to metric or explicit target). - * Edge case / negative flow → Add a new bullet under Edge Cases / Error Handling (or create such subsection if template provides placeholder for it). - * Terminology conflict → Normalize term across spec; retain original only if necessary by adding `(formerly referred to as "X")` once. - - If the clarification invalidates an earlier ambiguous statement, replace that statement instead of duplicating; leave no obsolete contradictory text. - - Save the spec file AFTER each integration to minimize risk of context loss (atomic overwrite). - - Preserve formatting: do not reorder unrelated sections; keep heading hierarchy intact. - - Keep each inserted clarification minimal and testable (avoid narrative drift). - -6. Validation (performed after EACH write plus final pass): - - Clarifications session contains exactly one bullet per accepted answer (no duplicates). - - Total asked (accepted) questions ≤ 5. - - Updated sections contain no lingering vague placeholders the new answer was meant to resolve. - - No contradictory earlier statement remains (scan for now-invalid alternative choices removed). - - Markdown structure valid; only allowed new headings: `## Clarifications`, `### Session YYYY-MM-DD`. - - Terminology consistency: same canonical term used across all updated sections. - -7. Write the updated spec back to `FEATURE_SPEC`. - -8. Report completion (after questioning loop ends or early termination): - - Number of questions asked & answered. - - Path to updated spec. - - Sections touched (list names). - - Coverage summary listing each taxonomy category with a status label (Resolved / Deferred / Clear / Outstanding). Present as plain text or bullet list, not a table. - - If any Outstanding or Deferred remain, recommend whether to proceed to `/spec-kitty.plan` or run `/spec-kitty.clarify` again later post-plan. - - Suggested next command. - -Behavior rules: -- If no meaningful ambiguities found (or all potential questions would be low-impact), respond: "No critical ambiguities detected worth formal clarification." and suggest proceeding. -- If spec file missing, instruct user to run `/spec-kitty.specify` first (do not create a new spec here). -- Never exceed 5 total asked questions (clarification retries for a single question do not count as new questions). -- Avoid speculative tech stack questions unless the absence blocks functional clarity. -- Respect user early termination signals ("stop", "done", "proceed"). - - If no questions asked due to full coverage, output a compact coverage summary (all categories Clear) then suggest advancing. - - If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale. - -Context for prioritization: User arguments from $ARGUMENTS section above (if provided). Use these to focus clarification on specific areas of concern mentioned by the user. diff --git a/.kilocode/workflows/spec-kitty.constitution.md b/.kilocode/workflows/spec-kitty.constitution.md deleted file mode 100644 index 6c79509b73..0000000000 --- a/.kilocode/workflows/spec-kitty.constitution.md +++ /dev/null @@ -1,433 +0,0 @@ ---- -description: Create or update the project constitution through interactive phase-based discovery. ---- - -**Path reference rule:** When you mention directories or files, provide either the absolute path or a path relative to the project root (for example, `kitty-specs//tasks/`). Never refer to a folder by name alone. - -*Path: [.kittify/templates/commands/constitution.md](.kittify/templates/commands/constitution.md)* - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - ---- - -## What This Command Does - -This command creates or updates the **project constitution** through an interactive, phase-based discovery workflow. - -**Location**: `.kittify/memory/constitution.md` (project root, not worktrees) -**Scope**: Project-wide principles that apply to ALL features - -**Important**: The constitution is OPTIONAL. All spec-kitty commands work without it. - -**Constitution Purpose**: -- Capture technical standards (languages, testing, deployment) -- Document code quality expectations (review process, quality gates) -- Record tribal knowledge (team conventions, lessons learned) -- Define governance (how the constitution changes, who enforces it) - ---- - -## Discovery Workflow - -This command uses a **4-phase discovery process**: - -1. **Phase 1: Technical Standards** (Recommended) - - Languages, frameworks, testing requirements - - Performance targets, deployment constraints - - ≈3-4 questions, creates a lean foundation - -2. **Phase 2: Code Quality** (Optional) - - PR requirements, review checklist, quality gates - - Documentation standards - - ≈3-4 questions - -3. **Phase 3: Tribal Knowledge** (Optional) - - Team conventions, lessons learned - - Historical decisions (optional) - - ≈2-4 questions - -4. **Phase 4: Governance** (Optional) - - Amendment process, compliance validation - - Exception handling (optional) - - ≈2-3 questions - -**Paths**: -- **Minimal** (≈1 page): Phase 1 only → ≈3-5 questions -- **Comprehensive** (≈2-3 pages): All phases → ≈8-12 questions - ---- - -## Execution Outline - -### Step 1: Initial Choice - -Ask the user: -``` -Do you want to establish a project constitution? - -A) No, skip it - I don't need a formal constitution -B) Yes, minimal - Core technical standards only (≈1 page, 3-5 questions) -C) Yes, comprehensive - Full governance and tribal knowledge (≈2-3 pages, 8-12 questions) -``` - -Handle responses: -- **A (Skip)**: Create a minimal placeholder at `.kittify/memory/constitution.md`: - - Title + short note: "Constitution skipped - not required for spec-kitty usage. Run /spec-kitty.constitution anytime to create one." - - Exit successfully. -- **B (Minimal)**: Continue with Phase 1 only. -- **C (Comprehensive)**: Continue through all phases, asking whether to skip each optional phase. - -### Step 2: Phase 1 - Technical Standards - -Context: -``` -Phase 1: Technical Standards -These are the non-negotiable technical requirements that all features must follow. -This phase is recommended for all projects. -``` - -Ask one question at a time: - -**Q1: Languages and Frameworks** -``` -What languages and frameworks are required for this project? -Examples: -- "Python 3.11+ with FastAPI for backend" -- "TypeScript 4.9+ with React 18 for frontend" -- "Rust 1.70+ with no external dependencies" -``` - -**Q2: Testing Requirements** -``` -What testing framework and coverage requirements? -Examples: -- "pytest with 80% line coverage, 100% for critical paths" -- "Jest with 90% coverage, unit + integration tests required" -- "cargo test, no specific coverage target but all features must have tests" -``` - -**Q3: Performance and Scale Targets** -``` -What are the performance and scale expectations? -Examples: -- "Handle 1000 requests/second at p95 < 200ms" -- "Support 10k concurrent users, 1M daily active users" -- "CLI operations complete in < 2 seconds" -- "N/A - performance not a primary concern" -``` - -**Q4: Deployment and Constraints** -``` -What are the deployment constraints or platform requirements? -Examples: -- "Docker-only, deployed to Kubernetes" -- "Must run on Ubuntu 20.04 LTS without external dependencies" -- "Cross-platform: Linux, macOS, Windows 10+" -- "N/A - no specific deployment constraints" -``` - -### Step 3: Phase 2 - Code Quality (Optional) - -Ask only if comprehensive path is selected: -``` -Phase 2: Code Quality -Skip this if your team uses standard practices without special requirements. - -Do you want to define code quality standards? -A) Yes, ask questions -B) No, skip this phase (use standard practices) -``` - -If yes, ask one at a time: - -**Q5: PR Requirements** -``` -What are the requirements for pull requests? -Examples: -- "2 approvals required, 1 must be from core team" -- "1 approval required, PR must pass CI checks" -- "Self-merge allowed after CI passes for maintainers" -``` - -**Q6: Code Review Checklist** -``` -What should reviewers check during code review? -Examples: -- "Tests added, docstrings updated, follows PEP 8, no security issues" -- "Type annotations present, error handling robust, performance considered" -- "Standard review - correctness, clarity, maintainability" -``` - -**Q7: Quality Gates** -``` -What quality gates must pass before merging? -Examples: -- "All tests pass, coverage ≥80%, linter clean, security scan clean" -- "Tests pass, type checking passes, manual QA approved" -- "CI green, no merge conflicts, PR approved" -``` - -**Q8: Documentation Standards** -``` -What documentation is required? -Examples: -- "All public APIs must have docstrings + examples" -- "README updated for new features, ADRs for architectural decisions" -- "Inline comments for complex logic, keep docs up to date" -- "Minimal - code should be self-documenting" -``` - -### Step 4: Phase 3 - Tribal Knowledge (Optional) - -Ask only if comprehensive path is selected: -``` -Phase 3: Tribal Knowledge -Skip this for new projects or if team conventions are minimal. - -Do you want to capture tribal knowledge? -A) Yes, ask questions -B) No, skip this phase -``` - -If yes, ask: - -**Q9: Team Conventions** -``` -What team conventions or coding styles should everyone follow? -Examples: -- "Use Result for fallible operations, never unwrap() in prod" -- "Prefer composition over inheritance, keep classes small (<200 lines)" -- "Use feature flags for gradual rollouts, never merge half-finished features" -``` - -**Q10: Lessons Learned** -``` -What past mistakes or lessons learned should guide future work? -Examples: -- "Always version APIs from day 1" -- "Write integration tests first" -- "Keep dependencies minimal - every dependency is a liability" -- "N/A - no major lessons yet" -``` - -Optional follow-up: -``` -Do you want to document historical architectural decisions? -A) Yes -B) No -``` - -**Q11: Historical Decisions** (only if yes) -``` -Any historical architectural decisions that should guide future work? -Examples: -- "Chose microservices for independent scaling" -- "Chose monorepo for atomic changes across services" -- "Chose SQLite for simplicity over PostgreSQL" -``` - -### Step 5: Phase 4 - Governance (Optional) - -Ask only if comprehensive path is selected: -``` -Phase 4: Governance -Skip this to use simple defaults. - -Do you want to define governance process? -A) Yes, ask questions -B) No, skip this phase (use simple defaults) -``` - -If skipped, use defaults: -- Amendment: Any team member can propose changes via PR -- Compliance: Team validates during code review -- Exceptions: Discuss with team, document in PR - -If yes, ask: - -**Q12: Amendment Process** -``` -How should the constitution be amended? -Examples: -- "PR with 2 approvals, announce in team chat, 1 week discussion" -- "Any maintainer can update via PR" -- "Quarterly review, team votes on changes" -``` - -**Q13: Compliance Validation** -``` -Who validates that features comply with the constitution? -Examples: -- "Code reviewers check compliance, block merge if violated" -- "Team lead reviews architecture" -- "Self-managed - developers responsible" -``` - -Optional follow-up: -``` -Do you want to define exception handling? -A) Yes -B) No -``` - -**Q14: Exception Handling** (only if yes) -``` -How should exceptions to the constitution be handled? -Examples: -- "Document in ADR, require 3 approvals, set sunset date" -- "Case-by-case discussion, strong justification required" -- "Exceptions discouraged - update constitution instead" -``` - -### Step 6: Summary and Confirmation - -Present a summary and ask for confirmation: -``` -Constitution Summary -==================== - -You've completed [X] phases and answered [Y] questions. -Here's what will be written to .kittify/memory/constitution.md: - -Technical Standards: -- Languages: [Q1] -- Testing: [Q2] -- Performance: [Q3] -- Deployment: [Q4] - -[If Phase 2 completed] -Code Quality: -- PR Requirements: [Q5] -- Review Checklist: [Q6] -- Quality Gates: [Q7] -- Documentation: [Q8] - -[If Phase 3 completed] -Tribal Knowledge: -- Conventions: [Q9] -- Lessons Learned: [Q10] -- Historical Decisions: [Q11 if present] - -Governance: [Custom if Phase 4 completed, otherwise defaults] - -Estimated length: ≈[50-80 lines minimal] or ≈[150-200 lines comprehensive] - -Proceed with writing constitution? -A) Yes, write it -B) No, let me start over -C) Cancel, don't create constitution -``` - -Handle responses: -- **A**: Write the constitution file. -- **B**: Restart from Step 1. -- **C**: Exit without writing. - -### Step 7: Write Constitution File - -Generate the constitution as Markdown: - -```markdown -# [PROJECT_NAME] Constitution - -> Auto-generated by spec-kitty constitution command -> Created: [YYYY-MM-DD] -> Version: 1.0.0 - -## Purpose - -This constitution captures the technical standards, code quality expectations, -tribal knowledge, and governance rules for [PROJECT_NAME]. All features and -pull requests should align with these principles. - -## Technical Standards - -### Languages and Frameworks -[Q1] - -### Testing Requirements -[Q2] - -### Performance and Scale -[Q3] - -### Deployment and Constraints -[Q4] - -[If Phase 2 completed] -## Code Quality - -### Pull Request Requirements -[Q5] - -### Code Review Checklist -[Q6] - -### Quality Gates -[Q7] - -### Documentation Standards -[Q8] - -[If Phase 3 completed] -## Tribal Knowledge - -### Team Conventions -[Q9] - -### Lessons Learned -[Q10] - -[If Q11 present] -### Historical Decisions -[Q11] - -## Governance - -[If Phase 4 completed] -### Amendment Process -[Q12] - -### Compliance Validation -[Q13] - -[If Q14 present] -### Exception Handling -[Q14] - -[If Phase 4 skipped, use defaults] -### Amendment Process -Any team member can propose amendments via pull request. Changes are discussed -and merged following standard PR review process. - -### Compliance Validation -Code reviewers validate compliance during PR review. Constitution violations -should be flagged and addressed before merge. - -### Exception Handling -Exceptions discussed case-by-case with team. Strong justification required. -Consider updating constitution if exceptions become common. -``` - -### Step 8: Success Message - -After writing, provide: -- Location of the file -- Phases completed and questions answered -- Next steps (review, share with team, run /spec-kitty.specify) - ---- - -## Required Behaviors - -- Ask one question at a time. -- Offer skip options and explain when to skip. -- Keep responses concise and user-focused. -- Ensure the constitution stays lean (1-3 pages, not 10 pages). -- If user chooses to skip entirely, still create the minimal placeholder file and exit successfully. diff --git a/.kilocode/workflows/spec-kitty.dashboard.md b/.kilocode/workflows/spec-kitty.dashboard.md deleted file mode 100644 index af4eff346a..0000000000 --- a/.kilocode/workflows/spec-kitty.dashboard.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -description: Open the Spec Kitty dashboard in your browser. ---- - - -## Dashboard Access - -This command launches the Spec Kitty dashboard in your browser using the spec-kitty CLI. - -## What to do - -Simply run the `spec-kitty dashboard` command to: -- Start the dashboard if it's not already running -- Open it in your default web browser -- Display the dashboard URL - -If you need to stop the dashboard, you can use `spec-kitty dashboard --kill`. - -## Implementation - -Execute the following terminal command: - -```bash -spec-kitty dashboard -``` - -## Additional Options - -- To specify a preferred port: `spec-kitty dashboard --port 8080` -- To stop the dashboard: `spec-kitty dashboard --kill` - -## Success Criteria - -- User sees the dashboard URL clearly displayed -- Browser opens automatically to the dashboard -- If browser doesn't open, user gets clear instructions -- Error messages are helpful and actionable diff --git a/.kilocode/workflows/spec-kitty.implement.md b/.kilocode/workflows/spec-kitty.implement.md deleted file mode 100644 index cf59f9e163..0000000000 --- a/.kilocode/workflows/spec-kitty.implement.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -description: Create an isolated workspace (worktree) for implementing a specific work package. ---- - - -## ⚠️ CRITICAL: Working Directory Requirement - -**After running `spec-kitty implement WP##`, you MUST:** - -1. **Run the cd command shown in the output** - e.g., `cd .worktrees/###-feature-WP##/` -2. **ALL file operations happen in this directory** - Read, Write, Edit tools must target files in the workspace -3. **NEVER write deliverable files to the main repository** - This is a critical workflow error - -**Why this matters:** -- Each WP has an isolated worktree with its own branch -- Changes in main repository will NOT be seen by reviewers looking at the WP worktree -- Writing to main instead of the workspace causes review failures and merge conflicts - ---- - -**IMPORTANT**: After running the command below, you'll see a LONG work package prompt (~1000+ lines). - -**You MUST scroll to the BOTTOM** to see the completion command! - -Run this command to get the work package prompt and implementation instructions: - -```bash -spec-kitty agent workflow implement $ARGUMENTS --agent -``` - -**CRITICAL**: You MUST provide `--agent ` to track who is implementing! - -If no WP ID is provided, it will automatically find the first work package with `lane: "planned"` and move it to "doing" for you. - ---- - -## Commit Workflow - -**BEFORE moving to for_review**, you MUST commit your implementation: - -```bash -cd .worktrees/###-feature-WP##/ -git add -A -git commit -m "feat(WP##): " -``` - -**Then move to review:** -```bash -spec-kitty agent tasks move-task WP## --to for_review --note "Ready for review: " -``` - -**Why this matters:** -- `move-task` validates that your worktree has commits beyond main -- Uncommitted changes will block the move to for_review -- This prevents lost work and ensures reviewers see complete implementations - ---- - -**The Python script handles all file updates automatically - no manual editing required!** - -**NOTE**: If `/spec-kitty.status` shows your WP in "doing" after you moved it to "for_review", don't panic - a reviewer may have moved it back (changes requested), or there's a sync delay. Focus on your WP. diff --git a/.kilocode/workflows/spec-kitty.merge.md b/.kilocode/workflows/spec-kitty.merge.md deleted file mode 100644 index 9f739a89b4..0000000000 --- a/.kilocode/workflows/spec-kitty.merge.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -description: Merge a completed feature into the main branch and clean up worktree ---- - - -# /spec-kitty.merge - Merge Feature to Main - -**Version**: 0.11.0+ -**Purpose**: Merge ALL completed work packages for a feature into main branch. - -## CRITICAL: Workspace-per-WP Model (0.11.0) - -In 0.11.0, each work package has its own worktree: -- `.worktrees/###-feature-WP01/` -- `.worktrees/###-feature-WP02/` -- `.worktrees/###-feature-WP03/` - -**Merge merges ALL WP branches at once** (not incrementally one-by-one). - -## ⛔ Location Pre-flight Check (CRITICAL) - -**BEFORE PROCEEDING:** You MUST be in a feature worktree, NOT the main repository. - -Verify your current location: -```bash -pwd -git branch --show-current -``` - -**Expected output:** -- `pwd`: Should end with `.worktrees/###-feature-name-WP01` (or similar feature worktree) -- Branch: Should show your feature branch name like `###-feature-name-WP01` (NOT `main` or `release/*`) - -**If you see:** -- Branch showing `main` or `release/` -- OR pwd shows the main repository root - -⛔ **STOP - DANGER! You are in the wrong location!** - -**Correct the issue:** -1. Navigate to ANY worktree for this feature: `cd .worktrees/###-feature-name-WP01` -2. Verify you're on a feature branch: `git branch --show-current` -3. Then run this merge command again - -**Exception (main branch):** -If you are on `main` and need to merge a workspace-per-WP feature, run: -```bash -spec-kitty merge --feature -``` - ---- - -## Location Pre-flight Check (CRITICAL for AI Agents) - -Before merging, verify you are in the correct working directory by running this validation: - -```bash -python3 -c " -from specify_cli.guards import validate_worktree_location -result = validate_worktree_location() -if not result.is_valid: - print(result.format_error()) - print('\nThis command MUST run from a feature worktree, not the main repository.') - print('\nFor workspace-per-WP features, run from ANY WP worktree:') - print(' cd /path/to/project/.worktrees/-WP01') - print(' # or any other WP worktree for this feature') - raise SystemExit(1) -else: - print('✓ Location verified:', result.branch_name) -" -``` - -**What this validates**: -- Current branch follows the feature pattern like `001-feature-name` or `001-feature-name-WP01` -- You're not attempting to run from `main` or any release branch -- The validator prints clear navigation instructions if you're outside the feature worktree - -**For workspace-per-WP features (0.11.0+)**: -- Run merge from ANY WP worktree (e.g., `.worktrees/014-feature-WP09/`) -- The merge command automatically detects all WP branches and merges them sequentially -- You do NOT need to run merge from each WP worktree individually - -## Prerequisites - -Before running this command: - -1. ✅ All work packages must be in `done` lane (reviewed and approved) -2. ✅ Feature must pass `/spec-kitty.accept` checks -3. ✅ Working directory must be clean (no uncommitted changes in main) -4. ✅ **You must be in main repository root** (not in a worktree) - -## Command Syntax - -```bash -spec-kitty merge ###-feature-slug [OPTIONS] -``` - -**Example**: -```bash -cd /tmp/spec-kitty-test/test-project # Main repo root -spec-kitty merge 001-cli-hello-world -``` - -## What This Command Does - -1. **Detects** your current feature branch and worktree status -2. **Runs** pre-flight validation across all worktrees and the target branch -3. **Determines** merge order based on WP dependencies (workspace-per-WP) -4. **Forecasts** conflicts during `--dry-run` and flags auto-resolvable status files -5. **Verifies** working directory is clean (legacy single-worktree) -6. **Switches** to the target branch (default: `main`) -7. **Updates** the target branch (`git pull --ff-only`) -8. **Merges** the feature using your chosen strategy -9. **Auto-resolves** status file conflicts after each WP merge -10. **Optionally pushes** to origin -11. **Removes** the feature worktree (if in one) -12. **Deletes** the feature branch - -## Usage - -### Basic merge (default: merge commit, cleanup everything) - -```bash -spec-kitty merge -``` - -This will: -- Create a merge commit -- Remove the worktree -- Delete the feature branch -- Keep changes local (no push) - -### Merge with options - -```bash -# Squash all commits into one -spec-kitty merge --strategy squash - -# Push to origin after merging -spec-kitty merge --push - -# Keep the feature branch -spec-kitty merge --keep-branch - -# Keep the worktree -spec-kitty merge --keep-worktree - -# Merge into a different branch -spec-kitty merge --target develop - -# See what would happen without doing it -spec-kitty merge --dry-run - -# Run merge from main for a workspace-per-WP feature -spec-kitty merge --feature 017-feature-slug -``` - -### Common workflows - -```bash -# Feature complete, squash and push -spec-kitty merge --strategy squash --push - -# Keep branch for reference -spec-kitty merge --keep-branch - -# Merge into develop instead of main -spec-kitty merge --target develop --push -``` - -## Merge Strategies - -### `merge` (default) -Creates a merge commit preserving all feature branch commits. -```bash -spec-kitty merge --strategy merge -``` -✅ Preserves full commit history -✅ Clear feature boundaries in git log -❌ More commits in main branch - -### `squash` -Squashes all feature commits into a single commit. -```bash -spec-kitty merge --strategy squash -``` -✅ Clean, linear history on main -✅ Single commit per feature -❌ Loses individual commit details - -### `rebase` -Requires manual rebase first (command will guide you). -```bash -spec-kitty merge --strategy rebase -``` -✅ Linear history without merge commits -❌ Requires manual intervention -❌ Rewrites commit history - -## Options - -| Option | Description | Default | -|--------|-------------|---------| -| `--strategy` | Merge strategy: `merge`, `squash`, or `rebase` | `merge` | -| `--delete-branch` / `--keep-branch` | Delete feature branch after merge | delete | -| `--remove-worktree` / `--keep-worktree` | Remove feature worktree after merge | remove | -| `--push` | Push to origin after merge | no push | -| `--target` | Target branch to merge into | `main` | -| `--dry-run` | Show what would be done without executing | off | -| `--feature` | Feature slug when merging from main branch | none | -| `--resume` | Resume an interrupted merge | off | - -## Worktree Strategy - -Spec Kitty uses an **opinionated worktree approach**: - -### Workspace-per-WP Model (0.11.0+) - -In the current model, each work package gets its own worktree: - -``` -my-project/ # Main repo (main branch) -├── .worktrees/ -│ ├── 001-auth-system-WP01/ # WP01 worktree -│ ├── 001-auth-system-WP02/ # WP02 worktree -│ ├── 001-auth-system-WP03/ # WP03 worktree -│ └── 002-dashboard-WP01/ # Different feature -├── .kittify/ -├── kitty-specs/ -└── ... (main branch files) -``` - -**Merge behavior for workspace-per-WP**: -- Run `spec-kitty merge` from **any** WP worktree for the feature -- The command automatically detects all WP branches (WP01, WP02, WP03, etc.) -- Merges each WP branch into main in sequence -- Cleans up all WP worktrees and branches - -### Legacy Pattern (0.10.x) -``` -my-project/ # Main repo (main branch) -├── .worktrees/ -│ ├── 001-auth-system/ # Feature 1 worktree (single) -│ ├── 002-dashboard/ # Feature 2 worktree (single) -│ └── 003-notifications/ # Feature 3 worktree (single) -├── .kittify/ -├── kitty-specs/ -└── ... (main branch files) -``` - -### The Rules -1. **Main branch** stays in the primary repo root -2. **Feature branches** live in `.worktrees//` -3. **Work on features** happens in their worktrees (isolation) -4. **Merge from worktrees** using this command -5. **Cleanup is automatic** - worktrees removed after merge - -### Why Worktrees? -- ✅ Work on multiple features simultaneously -- ✅ Each feature has its own sandbox -- ✅ No branch switching in main repo -- ✅ Easy to compare features -- ✅ Clean separation of concerns - -### The Flow -``` -1. /spec-kitty.specify → Creates branch + worktree -2. cd .worktrees// → Enter worktree -3. /spec-kitty.plan → Work in isolation -4. /spec-kitty.tasks -5. /spec-kitty.implement -6. /spec-kitty.review -7. /spec-kitty.accept -8. /spec-kitty.merge → Merge + cleanup worktree -9. Back in main repo! → Ready for next feature -``` - -## Error Handling - -### "Already on main branch" -You're not on a feature branch. Switch to your feature branch first: -```bash -cd .worktrees/ -# or -git checkout -``` - -### "Working directory has uncommitted changes" -Commit or stash your changes: -```bash -git add . -git commit -m "Final changes" -# or -git stash -``` - -### "Could not fast-forward main" -Your main branch is behind origin: -```bash -git checkout main -git pull -git checkout -spec-kitty merge -``` - -### "Merge failed - conflicts" -Resolve conflicts manually: -```bash -# Fix conflicts in files -git add -git commit -# Then complete cleanup manually: -git worktree remove .worktrees/ -git branch -d -``` - -## Safety Features - -1. **Clean working directory check** - Won't merge with uncommitted changes -2. **Fast-forward only pull** - Won't proceed if main has diverged -3. **Graceful failure** - If merge fails, you can fix manually -4. **Optional operations** - Push, branch delete, and worktree removal are configurable -5. **Dry run mode** - Preview exactly what will happen - -## Examples - -### Complete feature and push -```bash -cd .worktrees/001-auth-system -/spec-kitty.accept -/spec-kitty.merge --push -``` - -### Squash merge for cleaner history -```bash -spec-kitty merge --strategy squash --push -``` - -### Merge but keep branch for reference -```bash -spec-kitty merge --keep-branch --push -``` - -### Check what will happen first -```bash -spec-kitty merge --dry-run -``` - -## After Merging - -After a successful merge, you're back on the main branch with: -- ✅ Feature code integrated -- ✅ Worktree removed (if it existed) -- ✅ Feature branch deleted (unless `--keep-branch`) -- ✅ Ready to start your next feature! - -## Integration with Accept - -The typical flow is: - -```bash -# 1. Run acceptance checks -/spec-kitty.accept --mode local - -# 2. If checks pass, merge -/spec-kitty.merge --push -``` - -Or combine conceptually: -```bash -# Accept verifies readiness -/spec-kitty.accept --mode local - -# Merge performs integration -/spec-kitty.merge --strategy squash --push -``` - -The `/spec-kitty.accept` command **verifies** your feature is complete. -The `/spec-kitty.merge` command **integrates** your feature into main. - -Together they complete the workflow: -``` -specify → plan → tasks → implement → review → accept → merge ✅ -``` diff --git a/.kilocode/workflows/spec-kitty.plan.md b/.kilocode/workflows/spec-kitty.plan.md deleted file mode 100644 index 36e2de1874..0000000000 --- a/.kilocode/workflows/spec-kitty.plan.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -description: Execute the implementation planning workflow using the plan template to generate design artifacts. ---- - - -# /spec-kitty.plan - Create Implementation Plan - -**Version**: 0.11.0+ - -## 📍 WORKING DIRECTORY: Stay in planning repository - -**IMPORTANT**: Plan works in the planning repository. NO worktrees created. - -```bash -# Run from project root (same directory as /spec-kitty.specify): -# You should already be here if you just ran /spec-kitty.specify - -# Creates: -# - kitty-specs/###-feature/plan.md → In planning repository -# - Commits to target branch -# - NO worktrees created -``` - -**Do NOT cd anywhere**. Stay in the planning repository root. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Location Check (0.11.0+) - -This command runs in the **planning repository**, not in a worktree. - -- Verify you're on the target branch (meta.json → target_branch) before scaffolding plan.md -- Planning artifacts live in `kitty-specs/###-feature/` -- The plan template is committed to the target branch after generation - -**Path reference rule:** When you mention directories or files, provide either the absolute path or a path relative to the project root (for example, `kitty-specs//tasks/`). Never refer to a folder by name alone. - -## Planning Interrogation (mandatory) - -Before executing any scripts or generating artifacts you must interrogate the specification and stakeholders. - -- **Scope proportionality (CRITICAL)**: FIRST, assess the feature's complexity from the spec: - - **Trivial/Test Features** (hello world, simple static pages, basic demos): Ask 1-2 questions maximum about tech stack preference, then proceed with sensible defaults - - **Simple Features** (small components, minor API additions): Ask 2-3 questions about tech choices and constraints - - **Complex Features** (new subsystems, multi-component features): Ask 3-5 questions covering architecture, NFRs, integrations - - **Platform/Critical Features** (core infrastructure, security, payments): Full interrogation with 5+ questions - -- **User signals to reduce questioning**: If the user says "use defaults", "just make it simple", "skip to implementation", "vanilla HTML/CSS/JS" - recognize these as signals to minimize planning questions and use standard approaches. - -- **First response rule**: - - For TRIVIAL features: Ask ONE tech stack question, then if answer is simple (e.g., "vanilla HTML"), proceed directly to plan generation - - For other features: Ask a single architecture question and end with `WAITING_FOR_PLANNING_INPUT` - -- If the user has not provided plan context, keep interrogating with one question at a time. - -- **Conversational cadence**: After each reply, assess if you have SUFFICIENT context for this feature's scope. For trivial features, knowing the basic stack is enough. Only continue if critical unknowns remain. - -Planning requirements (scale to complexity): - -1. Maintain a **Planning Questions** table internally covering questions appropriate to the feature's complexity (1-2 for trivial, up to 5+ for platform-level). Track columns `#`, `Question`, `Why it matters`, and `Current insight`. Do **not** render this table to the user. -2. For trivial features, standard practices are acceptable (vanilla HTML, simple file structure, no build tools). Only probe if the user's request suggests otherwise. -3. When you have sufficient context for the scope, summarize into an **Engineering Alignment** note and confirm. -4. If user explicitly asks to skip questions or use defaults, acknowledge and proceed with best practices for that feature type. - -## Outline - -1. **Check planning discovery status**: - - If any planning questions remain unanswered or the user has not confirmed the **Engineering Alignment** summary, stay in the one-question cadence, capture the user's response, update your internal table, and end with `WAITING_FOR_PLANNING_INPUT`. Do **not** surface the table. Do **not** run the setup command yet. - - Once every planning question has a concrete answer and the alignment summary is confirmed by the user, continue. - -2. **Detect feature context** (CRITICAL - prevents wrong feature selection): - - Before running any commands, detect which feature you're working on: - - a. **Check git branch name**: - - Run: `git rev-parse --abbrev-ref HEAD` - - If branch matches pattern `###-feature-name` or `###-feature-name-WP##`, extract the feature slug (strip `-WP##` suffix if present) - - Example: Branch `020-my-feature` or `020-my-feature-WP01` → Feature `020-my-feature` - - b. **Check current directory**: - - Look for `###-feature-name` pattern in the current path - - Examples: - - Inside `kitty-specs/020-my-feature/` → Feature `020-my-feature` - - Not in a worktree during planning (worktrees only used during implement): If detection runs from `.worktrees/020-my-feature-WP01/` → Feature `020-my-feature` - - c. **Prioritize features without plan.md** (if multiple exist): - - If multiple features exist and none detected from branch/path, list all features in `kitty-specs/` - - Prefer features that don't have `plan.md` yet (unplanned features) - - If ambiguous, ask the user which feature to plan - - d. **Extract feature slug**: - - Feature slug format: `###-feature-name` (e.g., `020-my-feature`) - - You MUST pass this explicitly to the setup-plan command using `--feature` flag - - **DO NOT** rely on auto-detection by the CLI (prevents wrong feature selection) - -3. **Setup**: Run `spec-kitty agent feature setup-plan --feature --json` from the repository root and parse JSON for: - - `result`: "success" or error message - - `plan_file`: Absolute path to the created plan.md - - `feature_dir`: Absolute path to the feature directory - - **Example**: - ```bash - # If detected feature is 020-my-feature: - spec-kitty agent feature setup-plan --feature 020-my-feature --json - ``` - - **Error handling**: If the command fails with "Cannot detect feature" or "Multiple features found", verify your feature detection logic in step 2 and ensure you're passing the correct feature slug. - -4. **Load context**: Read FEATURE_SPEC and `.kittify/memory/constitution.md` if it exists. If the constitution file is missing, skip Constitution Check and note that it is absent. Load IMPL_PLAN template (already copied). - -5. **Execute plan workflow**: Follow the structure in IMPL_PLAN template, using the validated planning answers as ground truth: - - Update Technical Context with explicit statements from the user or discovery research; mark `[NEEDS CLARIFICATION: …]` only when the user deliberately postpones a decision - - If a constitution exists, fill Constitution Check section from it and challenge any conflicts directly with the user. If no constitution exists, mark the section as skipped. - - Evaluate gates (ERROR if violations unjustified or questions remain unanswered) - - Phase 0: Generate research.md (commission research to resolve every outstanding clarification) - - Phase 1: Generate data-model.md, contracts/, quickstart.md based on confirmed intent - - Phase 1: Update agent context by running the agent script - - Re-evaluate Constitution Check post-design, asking the user to resolve new gaps before proceeding - -6. **STOP and report**: This command ends after Phase 1 planning. Report branch, IMPL_PLAN path, and generated artifacts. - - **⚠️ CRITICAL: DO NOT proceed to task generation!** The user must explicitly run `/spec-kitty.tasks` to generate work packages. Your job is COMPLETE after reporting the planning artifacts. - -## Phases - -### Phase 0: Outline & Research - -1. **Extract unknowns from Technical Context** above: - - For each NEEDS CLARIFICATION → research task - - For each dependency → best practices task - - For each integration → patterns task - -2. **Generate and dispatch research agents**: - ``` - For each unknown in Technical Context: - Task: "Research {unknown} for {feature context}" - For each technology choice: - Task: "Find best practices for {tech} in {domain}" - ``` - -3. **Consolidate findings** in `research.md` using format: - - Decision: [what was chosen] - - Rationale: [why chosen] - - Alternatives considered: [what else evaluated] - -**Output**: research.md with all NEEDS CLARIFICATION resolved - -### Phase 1: Design & Contracts - -**Prerequisites:** `research.md` complete - -1. **Extract entities from feature spec** → `data-model.md`: - - Entity name, fields, relationships - - Validation rules from requirements - - State transitions if applicable - -2. **Generate API contracts** from functional requirements: - - For each user action → endpoint - - Use standard REST/GraphQL patterns - - Output OpenAPI/GraphQL schema to `/contracts/` - -3. **Agent context update**: - - Run `` - - These scripts detect which AI agent is in use - - Update the appropriate agent-specific context file - - Add only new technology from current plan - - Preserve manual additions between markers - -**Output**: data-model.md, /contracts/*, quickstart.md, agent-specific file - -## Key rules - -- Use absolute paths -- ERROR on gate failures or unresolved clarifications - ---- - -## ⛔ MANDATORY STOP POINT - -**This command is COMPLETE after generating planning artifacts.** - -After reporting: -- `plan.md` path -- `research.md` path (if generated) -- `data-model.md` path (if generated) -- `contracts/` contents (if generated) -- Agent context file updated - -**YOU MUST STOP HERE.** - -Do NOT: -- ❌ Generate `tasks.md` -- ❌ Create work package (WP) files -- ❌ Create `tasks/` subdirectories -- ❌ Proceed to implementation - -The user will run `/spec-kitty.tasks` when they are ready to generate work packages. - -**Next suggested command**: `/spec-kitty.tasks` (user must invoke this explicitly) diff --git a/.kilocode/workflows/spec-kitty.research.md b/.kilocode/workflows/spec-kitty.research.md deleted file mode 100644 index b6bdff8ea7..0000000000 --- a/.kilocode/workflows/spec-kitty.research.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: Run the Phase 0 research workflow to scaffold research artifacts before task planning. ---- - -**Path reference rule:** When you mention directories or files, provide either the absolute path or a path relative to the project root (for example, `kitty-specs//tasks/`). Never refer to a folder by name alone. - - -*Path: [.kittify/templates/commands/research.md](.kittify/templates/commands/research.md)* - - -## Location Pre-flight Check - -**BEFORE PROCEEDING:** Verify you are working in the feature worktree. - -```bash -pwd -git branch --show-current -``` - -**Expected output:** -- `pwd`: Should end with `.worktrees/001-feature-name` (or similar feature worktree) -- Branch: Should show your feature branch name like `001-feature-name` (NOT `main`) - -**If you see the main branch or main repository path:** - -⛔ **STOP - You are in the wrong location!** - -This command creates research artifacts in your feature directory. You must be in the feature worktree. - -**Correct the issue:** -1. Navigate to your feature worktree: `cd .worktrees/001-feature-name` -2. Verify you're on the correct feature branch: `git branch --show-current` -3. Then run this research command again - ---- - -## What This Command Creates - -When you run `spec-kitty research`, the following files are generated in your feature directory: - -**Generated files**: -- **research.md** – Decisions, rationale, and supporting evidence -- **data-model.md** – Entities, attributes, and relationships -- **research/evidence-log.csv** – Sources and findings audit trail -- **research/source-register.csv** – Reference tracking for all sources - -**Location**: All files go in `kitty-specs/001-feature-name/` - ---- - -## Workflow Context - -**Before this**: `/spec-kitty.plan` calls this as "Phase 0" research phase - -**This command**: -- Scaffolds research artifacts -- Creates templates for capturing decisions and evidence -- Establishes audit trail for traceability - -**After this**: -- Fill in research.md, data-model.md, and CSV logs with actual findings -- Continue with `/spec-kitty.plan` which uses your research to drive technical design - ---- - -## Goal - -Create `research.md`, `data-model.md`, and supporting CSV stubs based on the active mission so implementation planning can reference concrete decisions and evidence. - -## What to do - -1. You should already be in the correct feature worktree (verified above with pre-flight check). -2. Run `spec-kitty research` to generate the mission-specific research artifacts. (Add `--force` only when it is acceptable to overwrite existing drafts.) -3. Open the generated files and fill in the required content: - - `research.md` – capture decisions, rationale, and supporting evidence. - - `data-model.md` – document entities, attributes, and relationships discovered during research. - - `research/evidence-log.csv` & `research/source-register.csv` – log all sources and findings so downstream reviewers can audit the trail. -4. If your research generates additional templates (spreadsheets, notebooks, etc.), store them under `research/` and reference them inside `research.md`. -5. Summarize open questions or risks at the bottom of `research.md`. These should feed directly into `/spec-kitty.tasks` and future implementation prompts. - -## Success Criteria - -- `kitty-specs//research.md` explains every major decision with references to evidence. -- `kitty-specs//data-model.md` lists the entities and relationships needed for implementation. -- CSV logs exist (even if partially filled) so evidence gathering is traceable. -- Outstanding questions from the research phase are tracked and ready for follow-up during planning or execution. diff --git a/.kilocode/workflows/spec-kitty.review.md b/.kilocode/workflows/spec-kitty.review.md deleted file mode 100644 index fde47891fc..0000000000 --- a/.kilocode/workflows/spec-kitty.review.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -description: Perform structured code review and kanban transitions for completed task prompt files ---- - - -**IMPORTANT**: After running the command below, you'll see a LONG work package prompt (~1000+ lines). - -**You MUST scroll to the BOTTOM** to see the completion commands! - -Run this command to get the work package prompt and review instructions: - -```bash -spec-kitty agent workflow review $ARGUMENTS --agent -``` - -**CRITICAL**: You MUST provide `--agent ` to track who is reviewing! - -If no WP ID is provided, it will automatically find the first work package with `lane: "for_review"` and move it to "doing" for you. - -## Dependency checks (required) - -- dependency_check: If the WP frontmatter lists `dependencies`, confirm each dependency WP is merged to main before you review this WP. -- dependent_check: Identify any WPs that list this WP as a dependency and note their current lanes. -- rebase_warning: If you request changes AND any dependents exist, warn those agents to rebase and provide a concrete command (example: `cd .worktrees/FEATURE-WP02 && git rebase FEATURE-WP01`). -- verify_instruction: Confirm dependency declarations match actual code coupling (imports, shared modules, API contracts). - -**After reviewing, scroll to the bottom and run ONE of these commands**: -- ✅ Approve: `spec-kitty agent tasks move-task WP## --to done --note "Review passed: "` -- ❌ Reject: Write feedback to the temp file path shown in the prompt, then run `spec-kitty agent tasks move-task WP## --to planned --review-feedback-file ` - -**The prompt will provide a unique temp file path for feedback - use that exact path to avoid conflicts with other agents!** - -**The Python script handles all file updates automatically - no manual editing required!** diff --git a/.kilocode/workflows/spec-kitty.specify.md b/.kilocode/workflows/spec-kitty.specify.md deleted file mode 100644 index cc2735849c..0000000000 --- a/.kilocode/workflows/spec-kitty.specify.md +++ /dev/null @@ -1,328 +0,0 @@ ---- -description: Create or update the feature specification from a natural language feature description. ---- - - -# /spec-kitty.specify - Create Feature Specification - -**Version**: 0.11.0+ - -## 📍 WORKING DIRECTORY: Stay in planning repository - -**IMPORTANT**: Specify works in the planning repository. NO worktrees are created. - -```bash -# Run from project root: -cd /path/to/project/root # Your planning repository - -# All planning artifacts are created in the planning repo and committed: -# - kitty-specs/###-feature/spec.md → Created in planning repo -# - Committed to target branch (meta.json → target_branch) -# - NO worktrees created -``` - -**Worktrees are created later** during `/spec-kitty.implement`, not during planning. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Discovery Gate (mandatory) - -Before running any scripts or writing to disk you **must** conduct a structured discovery interview. - -- **Scope proportionality (CRITICAL)**: FIRST, gauge the inherent complexity of the request: - - **Trivial/Test Features** (hello world, simple pages, proof-of-concept): Ask 1-2 questions maximum, then proceed. Examples: "a simple hello world page", "tic-tac-toe game", "basic contact form" - - **Simple Features** (small UI additions, minor enhancements): Ask 2-3 questions covering purpose and basic constraints - - **Complex Features** (new subsystems, integrations): Ask 3-5 questions covering goals, users, constraints, risks - - **Platform/Critical Features** (authentication, payments, infrastructure): Full discovery with 5+ questions - -- **User signals to reduce questioning**: If the user says "just testing", "quick prototype", "skip to next phase", "stop asking questions" - recognize this as a signal to minimize discovery and proceed with reasonable defaults. - -- **First response rule**: - - For TRIVIAL features (hello world, simple test): Ask ONE clarifying question, then if the answer confirms it's simple, proceed directly to spec generation - - For other features: Ask a single focused discovery question and end with `WAITING_FOR_DISCOVERY_INPUT` - -- If the user provides no initial description (empty command), stay in **Interactive Interview Mode**: keep probing with one question at a time. - -- **Conversational cadence**: After each user reply, decide if you have ENOUGH context for this feature's complexity level. For trivial features, 1-2 questions is sufficient. Only continue asking if truly necessary for the scope. - -Discovery requirements (scale to feature complexity): - -1. Maintain a **Discovery Questions** table internally covering questions appropriate to the feature's complexity (1-2 for trivial, up to 5+ for complex). Track columns `#`, `Question`, `Why it matters`, and `Current insight`. Do **not** render this table to the user. -2. For trivial features, reasonable defaults are acceptable. Only probe if truly ambiguous. -3. When you have sufficient context for the feature's scope, paraphrase into an **Intent Summary** and confirm. For trivial features, this can be very brief. -4. If user explicitly asks to skip questions or says "just testing", acknowledge and proceed with minimal discovery. - -## Mission Selection - -After completing discovery and confirming the Intent Summary, determine the appropriate mission for this feature. - -### Available Missions - -- **software-dev**: For building software features, APIs, CLI tools, applications - - Phases: research → design → implement → test → review - - Best for: code changes, new features, bug fixes, refactoring - -- **research**: For investigations, literature reviews, technical analysis - - Phases: question → methodology → gather → analyze → synthesize → publish - - Best for: feasibility studies, market research, technology evaluation - -### Mission Inference - -1. **Analyze the feature description** to identify the primary goal: - - Building, coding, implementing, creating software → **software-dev** - - Researching, investigating, analyzing, evaluating → **research** - -2. **Check for explicit mission requests** in the user's description: - - If user mentions "research project", "investigation", "analysis" → use research - - If user mentions "build", "implement", "create feature" → use software-dev - -3. **Confirm with user** (unless explicit): - > "Based on your description, this sounds like a **[software-dev/research]** project. - > I'll use the **[mission name]** mission. Does that work for you?" - -4. **Handle user response**: - - If confirmed: proceed with selected mission - - If user wants different mission: use their choice - -5. **Handle --mission flag**: If the user provides `--mission ` in their command, skip inference and use the specified mission directly. - -Store the final mission selection in your notes and include it in the spec output. Do not pass a `--mission` flag to feature creation. - -## Workflow (0.11.0+) - -**Planning happens in the planning repository - NO worktree created!** - -1. Creates `kitty-specs/###-feature/spec.md` directly in planning repo -2. Automatically commits to target branch -3. No worktree created during specify - -**Worktrees created later**: Use `spec-kitty implement WP##` to create a workspace for each work package. Worktrees are created later during implement (e.g., `.worktrees/###-feature-WP##`). - -## Location - -- Work in: **Planning repository** (not a worktree) -- Creates: `kitty-specs/###-feature/spec.md` -- Commits to: target branch (`meta.json` → `target_branch`) - -## Outline - -### 0. Generate a Friendly Feature Title - -- Summarize the agreed intent into a short, descriptive title (aim for ≤7 words; avoid filler like "feature" or "thing"). -- Read that title back during the Intent Summary and revise it if the user requests changes. -- Use the confirmed title to derive the kebab-case feature slug for the create-feature command. - -The text the user typed after `/spec-kitty.specify` in the triggering message **is** the initial feature description. Capture it verbatim, but treat it only as a starting point for discovery—not the final truth. Your job is to interrogate the request, surface gaps, and co-create a complete specification with the user. - -Given that feature description, do this: - -- **Generation Mode (arguments provided)**: Use the provided text as a starting point, validate it through discovery, and fill gaps with explicit questions or clearly documented assumptions (limit `[NEEDS CLARIFICATION: …]` to at most three critical decisions the user has postponed). -- **Interactive Interview Mode (no arguments)**: Use the discovery interview to elicit all necessary context, synthesize the working feature description, and confirm it with the user before you generate any specification artifacts. - -1. **Check discovery status**: - - If this is your first message or discovery questions remain unanswered, stay in the one-question loop, capture the user's response, update your internal table, and end with `WAITING_FOR_DISCOVERY_INPUT`. Do **not** surface the table; keep it internal. Do **not** call the creation command yet. - - Only proceed once every discovery question has an explicit answer and the user has acknowledged the Intent Summary. - - Empty invocation rule: stay in interview mode until you can restate the agreed-upon feature description. Do **not** call the creation command while the description is missing or provisional. - -2. When discovery is complete and the intent summary, **title**, and **mission** are confirmed, run the feature creation command from repo root: - - ```bash - spec-kitty agent feature create-feature "" --json - ``` - - Where `` is a kebab-case version of the friendly title (e.g., "Checkout Upsell Flow" → "checkout-upsell-flow"). - - The command returns JSON with: - - `result`: "success" or error message - - `feature`: Feature number and slug (e.g., "014-checkout-upsell-flow") - - `feature_dir`: Absolute path to the feature directory inside the main repo - - Parse these values for use in subsequent steps. All file paths are absolute. - - **IMPORTANT**: You must only ever run this command once. The JSON is provided in the terminal output - always refer to it to get the actual paths you're looking for. -3. **Stay in the main repository**: No worktree is created during specify. - -4. The spec template is bundled with spec-kitty at `src/specify_cli/missions/software-dev/.kittify/templates/spec-template.md`. The template defines required sections for software development features. - -5. Create meta.json in the feature directory with: - ```json - { - "feature_number": "", - "slug": "", - "friendly_name": "", - "mission": "", - "source_description": "$ARGUMENTS", - "created_at": "", - "target_branch": "main", - "vcs": "git" - } - ``` - - **CRITICAL**: Always set these fields explicitly: - - `target_branch`: Set to "main" by default (user can change to "2.x" for dual-branch features) - - `vcs`: Set to "git" by default (enables VCS locking and prevents jj fallback) - -6. Generate the specification content by following this flow: - - Use the discovery answers as your authoritative source of truth (do **not** rely on raw `$ARGUMENTS`) - - For empty invocations, treat the synthesized interview summary as the canonical feature description - - Identify: actors, actions, data, constraints, motivations, success metrics - - For any remaining ambiguity: - * Ask the user a focused follow-up question immediately and halt work until they answer - * Only use `[NEEDS CLARIFICATION: …]` when the user explicitly defers the decision - * Record any interim assumption in the Assumptions section - * Prioritize clarifications by impact: scope > outcomes > risks/security > user experience > technical details - - Fill User Scenarios & Testing section (ERROR if no clear user flow can be determined) - - Generate Functional Requirements (each requirement must be testable) - - Define Success Criteria (measurable, technology-agnostic outcomes) - - Identify Key Entities (if data involved) - -7. Write the specification to `/spec.md` using the template structure, replacing placeholders with concrete details derived from the feature description while preserving section order and headings. - -8. **Specification Quality Validation**: After writing the initial spec, validate it against quality criteria: - - a. **Create Spec Quality Checklist**: Generate a checklist file at `FEATURE_DIR/checklists/requirements.md` using the checklist template structure with these validation items: - - ```markdown - # Specification Quality Checklist: [FEATURE NAME] - - **Purpose**: Validate specification completeness and quality before proceeding to planning - **Created**: [DATE] - **Feature**: [Link to spec.md] - - ## Content Quality - - - [ ] No implementation details (languages, frameworks, APIs) - - [ ] Focused on user value and business needs - - [ ] Written for non-technical stakeholders - - [ ] All mandatory sections completed - - ## Requirement Completeness - - - [ ] No [NEEDS CLARIFICATION] markers remain - - [ ] Requirements are testable and unambiguous - - [ ] Success criteria are measurable - - [ ] Success criteria are technology-agnostic (no implementation details) - - [ ] All acceptance scenarios are defined - - [ ] Edge cases are identified - - [ ] Scope is clearly bounded - - [ ] Dependencies and assumptions identified - - ## Feature Readiness - - - [ ] All functional requirements have clear acceptance criteria - - [ ] User scenarios cover primary flows - - [ ] Feature meets measurable outcomes defined in Success Criteria - - [ ] No implementation details leak into specification - - ## Notes - - - Items marked incomplete require spec updates before `/spec-kitty.clarify` or `/spec-kitty.plan` - ``` - - b. **Run Validation Check**: Review the spec against each checklist item: - - For each item, determine if it passes or fails - - Document specific issues found (quote relevant spec sections) - - c. **Handle Validation Results**: - - - **If all items pass**: Mark checklist complete and proceed to step 6 - - - **If items fail (excluding [NEEDS CLARIFICATION])**: - 1. List the failing items and specific issues - 2. Update the spec to address each issue - 3. Re-run validation until all items pass (max 3 iterations) - 4. If still failing after 3 iterations, document remaining issues in checklist notes and warn user - - - **If [NEEDS CLARIFICATION] markers remain**: - 1. Extract all [NEEDS CLARIFICATION: ...] markers from the spec - 2. Re-confirm with the user whether each outstanding decision truly needs to stay unresolved. Do not assume away critical gaps. - 3. For each clarification the user has explicitly deferred, present options using plain text—no tables: - - ``` - Question [N]: [Topic] - Context: [Quote relevant spec section] - Need: [Specific question from NEEDS CLARIFICATION marker] - Options: (A) [First answer — implications] · (B) [Second answer — implications] · (C) [Third answer — implications] · (D) Custom (describe your own answer) - Reply with a letter or a custom answer. - ``` - - 4. Number questions sequentially (Q1, Q2, Q3 - max 3 total) - 5. Present all questions together before waiting for responses - 6. Wait for user to respond with their choices for all questions (e.g., "Q1: A, Q2: Custom - [details], Q3: B") - 7. Update the spec by replacing each [NEEDS CLARIFICATION] marker with the user's selected or provided answer - 9. Re-run validation after all clarifications are resolved - - d. **Update Checklist**: After each validation iteration, update the checklist file with current pass/fail status - -9. Report completion with feature directory, spec file path, checklist results, and readiness for the next phase (`/spec-kitty.clarify` or `/spec-kitty.plan`). - -**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing. - -## General Guidelines - -## Quick Guidelines - -- Focus on **WHAT** users need and **WHY**. -- Avoid HOW to implement (no tech stack, APIs, code structure). -- Written for business stakeholders, not developers. -- DO NOT create any checklists that are embedded in the spec. That will be a separate command. - -### Section Requirements - -- **Mandatory sections**: Must be completed for every feature -- **Optional sections**: Include only when relevant to the feature -- When a section doesn't apply, remove it entirely (don't leave as "N/A") - -### For AI Generation - -When creating this spec from a user prompt: - -1. **Make informed guesses**: Use context, industry standards, and common patterns to fill gaps -2. **Document assumptions**: Record reasonable defaults in the Assumptions section -3. **Limit clarifications**: Maximum 3 [NEEDS CLARIFICATION] markers - use only for critical decisions that: - - Significantly impact feature scope or user experience - - Have multiple reasonable interpretations with different implications - - Lack any reasonable default -4. **Prioritize clarifications**: scope > security/privacy > user experience > technical details -5. **Think like a tester**: Every vague requirement should fail the "testable and unambiguous" checklist item -6. **Common areas needing clarification** (only if no reasonable default exists): - - Feature scope and boundaries (include/exclude specific use cases) - - User types and permissions (if multiple conflicting interpretations possible) - - Security/compliance requirements (when legally/financially significant) - -**Examples of reasonable defaults** (don't ask about these): - -- Data retention: Industry-standard practices for the domain -- Performance targets: Standard web/mobile app expectations unless specified -- Error handling: User-friendly messages with appropriate fallbacks -- Authentication method: Standard session-based or OAuth2 for web apps -- Integration patterns: RESTful APIs unless specified otherwise - -### Success Criteria Guidelines - -Success criteria must be: - -1. **Measurable**: Include specific metrics (time, percentage, count, rate) -2. **Technology-agnostic**: No mention of frameworks, languages, databases, or tools -3. **User-focused**: Describe outcomes from user/business perspective, not system internals -4. **Verifiable**: Can be tested/validated without knowing implementation details - -**Good examples**: - -- "Users can complete checkout in under 3 minutes" -- "System supports 10,000 concurrent users" -- "95% of searches return results in under 1 second" -- "Task completion rate improves by 40%" - -**Bad examples** (implementation-focused): - -- "API response time is under 200ms" (too technical, use "Users see results instantly") -- "Database can handle 1000 TPS" (implementation detail, use user-facing metric) -- "React components render efficiently" (framework-specific) -- "Redis cache hit rate above 80%" (technology-specific) diff --git a/.kilocode/workflows/spec-kitty.status.md b/.kilocode/workflows/spec-kitty.status.md deleted file mode 100644 index 8776b1ca64..0000000000 --- a/.kilocode/workflows/spec-kitty.status.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -description: Display kanban board status showing work package progress across lanes (planned/doing/for_review/done). ---- - - -## Status Board - -Show the current status of all work packages in the active feature. This displays: -- Kanban board with WPs organized by lane -- Progress bar showing completion percentage -- Parallelization opportunities (which WPs can run concurrently) -- Next steps recommendations - -## When to Use - -- Before starting work (see what's ready to implement) -- During implementation (track overall progress) -- After completing a WP (see what's next) -- When planning parallelization (identify independent WPs) - -## Implementation - -Run the CLI command to display the status board: - -```bash -spec-kitty agent tasks status -``` - -To specify a feature explicitly: - -```bash -spec-kitty agent tasks status --feature 012-documentation-mission -``` - -The command displays a rich kanban board with: -- Progress bar showing completion percentage -- Work packages organized by lane (planned/doing/for_review/done) -- Summary metrics - -## Alternative: Python API - -For programmatic access (e.g., in Jupyter notebooks or scripts), use the Python function: - -```python -from specify_cli.agent_utils.status import show_kanban_status - -# Auto-detect feature from current directory/branch -result = show_kanban_status() - -# Or specify feature explicitly: -# result = show_kanban_status("012-documentation-mission") -``` - -Returns structured data: - -```python -{ - 'feature_slug': '012-documentation-mission', - 'progress_percentage': 80.0, - 'done_count': 8, - 'total_wps': 10, - 'by_lane': { - 'planned': ['WP09'], - 'doing': ['WP10'], - 'for_review': [], - 'done': ['WP01', 'WP02', ...] - }, - 'parallelization': { - 'ready_wps': [...], - 'can_parallelize': True/False, - 'parallel_groups': [...] - } -} - -## Output Example - -``` -╭─────────────────────────────────────────────────────────────────────╮ -│ 012-documentation-mission │ -│ Progress: 80% [████████░░] │ -╰─────────────────────────────────────────────────────────────────────╯ - -┌─────────────┬─────────────┬─────────────┬─────────────┐ -│ PLANNED │ DOING │ FOR_REVIEW │ DONE │ -├─────────────┼─────────────┼─────────────┼─────────────┤ -│ WP09 │ WP10 │ │ WP01 │ -│ │ │ │ WP02 │ -│ │ │ │ WP03 │ -│ │ │ │ ... │ -└─────────────┴─────────────┴─────────────┴─────────────┘ - -🔀 Parallelization: WP09 can start (no dependencies) -``` diff --git a/.kilocode/workflows/spec-kitty.tasks.md b/.kilocode/workflows/spec-kitty.tasks.md deleted file mode 100644 index e170ee580e..0000000000 --- a/.kilocode/workflows/spec-kitty.tasks.md +++ /dev/null @@ -1,577 +0,0 @@ ---- -description: Generate grouped work packages with actionable subtasks and matching prompt files for the feature in one pass. ---- - - -# /spec-kitty.tasks - Generate Work Packages - -**Version**: 0.11.0+ - -## ⚠️ CRITICAL: THIS IS THE MOST IMPORTANT PLANNING WORK - -**You are creating the blueprint for implementation**. The quality of work packages determines: -- How easily agents can implement the feature -- How parallelizable the work is -- How reviewable the code will be -- Whether the feature succeeds or fails - -**QUALITY OVER SPEED**: This is NOT the time to save tokens or rush. Take your time to: -- Understand the full scope deeply -- Break work into clear, manageable pieces -- Write detailed, actionable guidance -- Think through risks and edge cases - -**Token usage is EXPECTED and GOOD here**. A thorough task breakdown saves 10x the effort during implementation. Do not cut corners. - ---- - -## 📍 WORKING DIRECTORY: Stay in planning repository - -**IMPORTANT**: Tasks works in the planning repository. NO worktrees created. - -```bash -# Run from project root (same directory as /spec-kitty.plan): -# You should already be here if you just ran /spec-kitty.plan - -# Creates: -# - kitty-specs/###-feature/tasks/WP01-*.md → In planning repository -# - kitty-specs/###-feature/tasks/WP02-*.md → In planning repository -# - Commits ALL to target branch -# - NO worktrees created -``` - -**Do NOT cd anywhere**. Stay in the planning repository root. - -**Worktrees created later**: After tasks are generated, use `spec-kitty implement WP##` to create workspace for each WP. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Location Check (0.11.0+) - -Before proceeding, verify you are in the planning repository: - -**Check your current branch:** -```bash -git branch --show-current -``` - -**Expected output:** the target branch (meta.json → target_branch), typically `main` or `2.x` -**If you see a feature branch:** You're in the wrong place. Return to the target branch: -```bash -cd $(git rev-parse --show-toplevel) -git checkout -``` - -Work packages are generated directly in `kitty-specs/###-feature/` and committed to the target branch. Worktrees are created later when implementing each work package. - -## Outline - -1. **Setup**: Run `spec-kitty agent feature check-prerequisites --json --paths-only --include-tasks` from the repository root and capture `FEATURE_DIR` plus `AVAILABLE_DOCS`. All paths must be absolute. - - **CRITICAL**: The command returns JSON with `FEATURE_DIR` as an ABSOLUTE path (e.g., `/Users/robert/Code/new_specify/kitty-specs/001-feature-name`). - - **YOU MUST USE THIS PATH** for ALL subsequent file operations. Example: - ``` - FEATURE_DIR = "/Users/robert/Code/new_specify/kitty-specs/001-a-simple-hello" - tasks.md location: FEATURE_DIR + "/tasks.md" - prompt location: FEATURE_DIR + "/tasks/WP01-slug.md" - ``` - - **DO NOT CREATE** paths like: - - ❌ `tasks/WP01-slug.md` (missing FEATURE_DIR prefix) - - ❌ `/tasks/WP01-slug.md` (wrong root) - - ❌ `FEATURE_DIR/tasks/planned/WP01-slug.md` (WRONG - no subdirectories!) - - ❌ `WP01-slug.md` (wrong directory) - -2. **Load design documents** from `FEATURE_DIR` (only those present): - - **Required**: plan.md (tech architecture, stack), spec.md (user stories & priorities) - - **Optional**: data-model.md (entities), contracts/ (API schemas), research.md (decisions), quickstart.md (validation scenarios) - - Scale your effort to the feature: simple UI tweaks deserve lighter coverage, multi-system releases require deeper decomposition. - -3. **Derive fine-grained subtasks** (IDs `T001`, `T002`, ...): - - Parse plan/spec to enumerate concrete implementation steps, tests (only if explicitly requested), migrations, and operational work. - - Capture prerequisites, dependencies, and parallelizability markers (`[P]` means safe to parallelize per file/concern). - - Maintain the subtask list internally; it feeds the work-package roll-up and the prompts. - -4. **Roll subtasks into work packages** (IDs `WP01`, `WP02`, ...): - - **IDEAL WORK PACKAGE SIZE** (most important guideline): - - **Target: 3-7 subtasks per WP** (results in 200-500 line prompts) - - **Maximum: 10 subtasks per WP** (results in ~700 line prompts) - - **If more than 10 subtasks needed**: Create additional WPs, don't pack them in - - **WHY SIZE MATTERS**: - - **Too large** (>10 subtasks, >700 lines): Agents get overwhelmed, skip details, make mistakes - - **Too small** (<3 subtasks, <150 lines): Overhead of worktree creation not worth it - - **Just right** (3-7 subtasks, 200-500 lines): Agent can hold entire context, implements thoroughly - - **NUMBER OF WPs**: Let the work dictate the count - - Simple feature (5-10 subtasks total): 2-3 WPs - - Medium feature (20-40 subtasks): 5-8 WPs - - Complex feature (50+ subtasks): 10-20 WPs ← **This is OK!** - - **Better to have 20 focused WPs than 5 overwhelming WPs** - - **GROUPING PRINCIPLES**: - - Each WP should be independently implementable - - Root in a single user story or cohesive subsystem - - Ensure every subtask appears in exactly one work package - - Name with succinct goal (e.g., "User Story 1 – Real-time chat happy path") - - Record metadata: priority, success criteria, risks, dependencies, included subtasks - -5. **Write `tasks.md`** using the bundled tasks template (`src/specify_cli/missions/software-dev/.kittify/templates/tasks-template.md`): - - **Location**: Write to `FEATURE_DIR/tasks.md` (use the absolute FEATURE_DIR path from step 1) - - Populate the Work Package sections (setup, foundational, per-story, polish) with the `WPxx` entries - - Under each work package include: - - Summary (goal, priority, independent test) - - Included subtasks (checkbox list referencing `Txxx`) - - Implementation sketch (high-level sequence) - - Parallel opportunities, dependencies, and risks - - Preserve the checklist style so implementers can mark progress - -6. **Generate prompt files (one per work package)**: - - **CRITICAL PATH RULE**: All work package files MUST be created in a FLAT `FEATURE_DIR/tasks/` directory, NOT in subdirectories! - - Correct structure: `FEATURE_DIR/tasks/WPxx-slug.md` (flat, no subdirectories) - - WRONG (do not create): `FEATURE_DIR/tasks/planned/`, `FEATURE_DIR/tasks/doing/`, or ANY lane subdirectories - - WRONG (do not create): `/tasks/`, `tasks/`, or any path not under FEATURE_DIR - - Ensure `FEATURE_DIR/tasks/` exists (create as flat directory, NO subdirectories) - - For each work package: - - Derive a kebab-case slug from the title; filename: `WPxx-slug.md` - - Full path example: `FEATURE_DIR/tasks/WP01-create-html-page.md` (use ABSOLUTE path from FEATURE_DIR variable) - - Use the bundled task prompt template (`src/specify_cli/missions/software-dev/.kittify/templates/task-prompt-template.md`) to capture: - - Frontmatter with `work_package_id`, `subtasks` array, `lane: "planned"`, `dependencies`, history entry - - Objective, context, detailed guidance per subtask - - Test strategy (only if requested) - - Definition of Done, risks, reviewer guidance - - Update `tasks.md` to reference the prompt filename - - **TARGET PROMPT SIZE**: 200-500 lines per WP (results from 3-7 subtasks) - - **MAXIMUM PROMPT SIZE**: 700 lines per WP (10 subtasks max) - - **If prompts are >700 lines**: Split the WP - it's too large - - **IMPORTANT**: All WP files live in flat `tasks/` directory. Lane status is tracked ONLY in the `lane:` frontmatter field, NOT by directory location. Agents can change lanes by editing the `lane:` field directly or using `spec-kitty agent tasks move-task`. - -7. **Finalize tasks with dependency parsing and commit**: - After generating all WP prompt files, run the finalization command to: - - Parse dependencies from tasks.md - - Update WP frontmatter with dependencies field - - Validate dependencies (check for cycles, invalid references) - - Commit all tasks to target branch - - **CRITICAL**: Run this command from repo root: - ```bash - spec-kitty agent feature finalize-tasks --json - ``` - - This step is MANDATORY for workspace-per-WP features. Without it: - - Dependencies won't be in frontmatter - - Agents won't know which --base flag to use - - Tasks won't be committed to target branch - - **IMPORTANT - DO NOT COMMIT AGAIN AFTER THIS COMMAND**: - - finalize-tasks COMMITS the files automatically - - JSON output includes "commit_created": true/false and "commit_hash" - - If commit_created=true, files are ALREADY committed - do not run git commit again - - Other dirty files shown by 'git status' (templates, config) are UNRELATED - - Verify using the commit_hash from JSON output, not by running git add/commit again - -8. **Report**: Provide a concise outcome summary: - - Path to `tasks.md` - - Work package count and per-package subtask tallies - - **Average prompt size** (estimate lines per WP) - - **Validation**: Flag if any WP has >10 subtasks or >700 estimated lines - - Parallelization highlights - - MVP scope recommendation (usually Work Package 1) - - Prompt generation stats (files written, directory structure, any skipped items with rationale) - - Finalization status (dependencies parsed, X WP files updated, committed to target branch) - - Next suggested command (e.g., `/spec-kitty.analyze` or `/spec-kitty.implement`) - -Context for work-package planning: $ARGUMENTS - -The combination of `tasks.md` and the bundled prompt files must enable a new engineer to pick up any work package and deliver it end-to-end without further specification spelunking. - -## Dependency Detection (0.11.0+) - -**Parse dependencies from tasks.md structure**: - -The LLM should analyze tasks.md for dependency relationships: -- Explicit phrases: "Depends on WP##", "Dependencies: WP##" -- Phase grouping: Phase 2 WPs typically depend on Phase 1 -- Default to empty if unclear - -**Generate dependencies in WP frontmatter**: - -Each WP prompt file MUST include a `dependencies` field: -```yaml ---- -work_package_id: "WP02" -title: "Build API" -lane: "planned" -dependencies: ["WP01"] # Generated from tasks.md -subtasks: ["T001", "T002"] ---- -``` - -**Include the correct implementation command**: -- No dependencies: `spec-kitty implement WP01` -- With dependencies: `spec-kitty implement WP02 --base WP01` - -The WP prompt must show the correct command so agents don't branch from the wrong base. - -## Work Package Sizing Guidelines (CRITICAL) - -### Ideal WP Size - -**Target: 3-7 subtasks per WP** -- Results in 200-500 line prompt files -- Agent can hold entire context in working memory -- Clear scope - easy to review -- Parallelizable - multiple agents can work simultaneously - -**Examples of well-sized WPs**: -- WP01: Foundation Setup (5 subtasks, ~300 lines) - - T001: Create database schema - - T002: Set up migration system - - T003: Create base models - - T004: Add validation layer - - T005: Write foundation tests - -- WP02: User Authentication (6 subtasks, ~400 lines) - - T006: Implement login endpoint - - T007: Implement logout endpoint - - T008: Add session management - - T009: Add password reset flow - - T010: Write auth tests - - T011: Add rate limiting - -### Maximum WP Size - -**Hard limit: 10 subtasks, ~700 lines** -- Beyond this, agents start making mistakes -- Prompts become overwhelming -- Reviews take too long -- Integration risk increases - -**If you need more than 10 subtasks**: SPLIT into multiple WPs. - -### Number of WPs: No Arbitrary Limit - -**DO NOT limit based on WP count. Limit based on SIZE.** - -- ✅ **20 WPs of 5 subtasks each** = 100 subtasks, manageable prompts -- ❌ **5 WPs of 20 subtasks each** = 100 subtasks, overwhelming 1400-line prompts - -**Feature complexity scales with subtask count, not WP count**: -- Simple feature: 10-15 subtasks → 2-4 WPs -- Medium feature: 30-50 subtasks → 6-10 WPs -- Complex feature: 80-120 subtasks → 15-20 WPs ← **Totally fine!** -- Very complex: 150+ subtasks → 25-30 WPs ← **Also fine!** - -**The goal is manageable WP size, not minimizing WP count.** - -### When to Split a WP - -**Split if ANY of these are true**: -- More than 10 subtasks -- Prompt would exceed 700 lines -- Multiple independent concerns mixed together -- Different phases or priorities mixed -- Agent would need to switch contexts multiple times - -**How to split**: -- By phase: Foundation WP01, Implementation WP02, Testing WP03 -- By component: Database WP01, API WP02, UI WP03 -- By user story: Story 1 WP01, Story 2 WP02, Story 3 WP03 -- By type of work: Code WP01, Tests WP02, Migration WP03, Docs WP04 - -### When to Merge WPs - -**Merge if ALL of these are true**: -- Each WP has <3 subtasks -- Combined would be <7 subtasks -- Both address the same concern/component -- No natural parallelization opportunity -- Implementation is highly coupled - -**Don't merge just to hit a WP count target!** - -## Task Generation Rules - -**Tests remain optional**. Only include testing tasks/steps if the feature spec or user explicitly demands them. - -1. **Subtask derivation**: - - Assign IDs `Txxx` sequentially in execution order. - - Use `[P]` for parallel-safe items (different files/components). - - Include migrations, data seeding, observability, and operational chores. - - **Ideal subtask granularity**: One clear action (e.g., "Create user model", "Add login endpoint") - - **Too granular**: "Add import statement", "Fix typo" (bundle these) - - **Too coarse**: "Build entire API" (split into endpoints) - -2. **Work package grouping**: - - **Focus on SIZE first, count second** - - Target 3-7 subtasks per WP (200-500 line prompts) - - Maximum 10 subtasks per WP (700 line prompts) - - Keep each work package laser-focused on a single goal - - Avoid mixing unrelated concerns - - **Let complexity dictate WP count**: 20+ WPs is fine for complex features - -3. **Prioritisation & dependencies**: - - Sequence work packages: setup → foundational → story phases (priority order) → polish. - - Call out inter-package dependencies explicitly in both `tasks.md` and the prompts. - - Front-load infrastructure/foundation WPs (enable parallelization) - -4. **Prompt composition**: - - Mirror subtask order inside the prompt. - - Provide actionable implementation and test guidance per subtask—short for trivial work, exhaustive for complex flows. - - **Aim for 30-70 lines per subtask** in the prompt (includes purpose, steps, files, validation) - - Surface risks, integration points, and acceptance gates clearly so reviewers know what to verify. - - Include examples where helpful (API request/response shapes, config file structures, test cases) - -5. **Quality checkpoints**: - - After drafting WPs, review each prompt size estimate - - If any WP >700 lines: **STOP and split it** - - If most WPs <200 lines: Consider merging related ones - - Aim for consistency: Most WPs should be similar size (within 200-line range) - - **Think like an implementer**: Can I complete this WP in one focused session? If not, it's too big. - -6. **Think like a reviewer**: Any vague requirement should be tightened until a reviewer can objectively mark it done or not done. - -## Step-by-Step Process - -### Step 1: Setup - -Run `spec-kitty agent feature check-prerequisites --json --paths-only --include-tasks` and capture `FEATURE_DIR`. - -### Step 2: Load Design Documents - -Read from `FEATURE_DIR`: -- spec.md (required) -- plan.md (required) -- data-model.md (optional) -- research.md (optional) -- contracts/ (optional) - -### Step 3: Derive ALL Subtasks - -Create complete list of subtasks with IDs T001, T002, etc. - -**Don't worry about count yet - capture EVERYTHING needed.** - -### Step 4: Group into Work Packages - -**SIZING ALGORITHM**: - -``` -For each cohesive unit of work: - 1. List related subtasks - 2. Count subtasks - 3. Estimate prompt lines (subtasks × 50 lines avg) - - If subtasks <= 7 AND estimated lines <= 500: - ✓ Good WP size - create it - - Else if subtasks > 10 OR estimated lines > 700: - ✗ Too large - split into 2+ WPs - - Else if subtasks < 3 AND can merge with related WP: - → Consider merging (but don't force it) -``` - -**Examples**: - -**Good sizing**: -- WP01: Database Foundation (5 subtasks, ~300 lines) ✓ -- WP02: User Authentication (7 subtasks, ~450 lines) ✓ -- WP03: Admin Dashboard (6 subtasks, ~400 lines) ✓ - -**Too large - MUST SPLIT**: -- ❌ WP01: Entire Backend (25 subtasks, ~1500 lines) - - ✓ Split into: DB Layer (5), Business Logic (6), API Layer (7), Auth (7) - -**Too small - CONSIDER MERGING**: -- WP01: Add config file (2 subtasks, ~100 lines) -- WP02: Add logging (2 subtasks, ~120 lines) - - ✓ Merge into: WP01: Infrastructure Setup (4 subtasks, ~220 lines) - -### Step 5: Write tasks.md - -Create work package sections with: -- Summary (goal, priority, test criteria) -- Included subtasks (checkbox list) -- Implementation notes -- Parallel opportunities -- Dependencies -- **Estimated prompt size** (e.g., "~400 lines") - -### Step 6: Generate WP Prompt Files - -For each WP, generate `FEATURE_DIR/tasks/WPxx-slug.md` using the template. - -**CRITICAL VALIDATION**: After generating each prompt: -1. Count lines in the prompt -2. If >700 lines: GO BACK and split the WP -3. If >1000 lines: **STOP - this will fail** - you MUST split it - -**Self-check**: -- Subtask count: 3-7? ✓ | 8-10? ⚠️ | 11+? ❌ SPLIT -- Estimated lines: 200-500? ✓ | 500-700? ⚠️ | 700+? ❌ SPLIT -- Can implement in one session? ✓ | Multiple sessions needed? ❌ SPLIT - -### Step 7: Finalize Tasks - -Run `spec-kitty agent feature finalize-tasks --json` to: -- Parse dependencies -- Update frontmatter -- Validate (cycles, invalid refs) -- Commit to target branch - -**DO NOT run git commit after this** - finalize-tasks commits automatically. -Check JSON output for "commit_created": true and "commit_hash" to verify. - -### Step 8: Report - -Provide summary with: -- WP count and subtask tallies -- **Size distribution** (e.g., "6 WPs ranging from 250-480 lines") -- **Size validation** (e.g., "✓ All WPs within ideal range" OR "⚠️ WP05 is 820 lines - consider splitting") -- Parallelization opportunities -- MVP scope -- Next command - -## Dependency Detection (0.11.0+) - -**Parse dependencies from tasks.md structure**: - -The LLM should analyze tasks.md for dependency relationships: -- Explicit phrases: "Depends on WP##", "Dependencies: WP##" -- Phase grouping: Phase 2 WPs typically depend on Phase 1 -- Default to empty if unclear - -**Generate dependencies in WP frontmatter**: - -Each WP prompt file MUST include a `dependencies` field: -```yaml ---- -work_package_id: "WP02" -title: "Build API" -lane: "planned" -dependencies: ["WP01"] # Generated from tasks.md -subtasks: ["T001", "T002"] ---- -``` - -**Include the correct implementation command**: -- No dependencies: `spec-kitty implement WP01` -- With dependencies: `spec-kitty implement WP02 --base WP01` - -The WP prompt must show the correct command so agents don't branch from the wrong base. - -## ⚠️ Common Mistakes to Avoid - -### ❌ MISTAKE 1: Optimizing for WP Count - -**Bad thinking**: "I'll create exactly 5-7 WPs to keep it manageable" -→ Results in: 20 subtasks per WP, 1200-line prompts, overwhelmed agents - -**Good thinking**: "Each WP should be 3-7 subtasks (200-500 lines). If that means 15 WPs, that's fine." -→ Results in: Focused WPs, successful implementation, happy agents - -### ❌ MISTAKE 2: Token Conservation During Planning - -**Bad thinking**: "I'll save tokens by writing brief prompts with minimal guidance" -→ Results in: Agents confused during implementation, asking clarifying questions, doing work wrong, requiring rework - -**Good thinking**: "I'll invest tokens now to write thorough prompts with examples and edge cases" -→ Results in: Agents implement correctly the first time, no rework needed, net token savings - -### ❌ MISTAKE 3: Mixing Unrelated Concerns - -**Bad example**: WP03: Misc Backend Work (12 subtasks) -- T010: Add user model -- T011: Configure logging -- T012: Set up email service -- T013: Add admin dashboard -- ... (8 more unrelated tasks) - -**Good approach**: Split by concern -- WP03: User Management (T010-T013, 4 subtasks) -- WP04: Infrastructure Services (T014-T017, 4 subtasks) -- WP05: Admin Dashboard (T018-T021, 4 subtasks) - -### ❌ MISTAKE 4: Insufficient Prompt Detail - -**Bad prompt** (~20 lines per subtask): -```markdown -### Subtask T001: Add user authentication - -**Purpose**: Implement login - -**Steps**: -1. Create endpoint -2. Add validation -3. Test it -``` - -**Good prompt** (~60 lines per subtask): -```markdown -### Subtask T001: Implement User Login Endpoint - -**Purpose**: Create POST /api/auth/login endpoint that validates credentials and returns JWT token. - -**Steps**: -1. Create endpoint handler in `src/api/auth.py`: - - Route: POST /api/auth/login - - Request body: `{email: string, password: string}` - - Response: `{token: string, user: UserProfile}` on success - - Error codes: 400 (invalid input), 401 (bad credentials), 429 (rate limited) - -2. Implement credential validation: - - Hash password with bcrypt (matches registration hash) - - Compare against stored hash from database - - Use constant-time comparison to prevent timing attacks - -3. Generate JWT token on success: - - Include: user_id, email, issued_at, expires_at (24 hours) - - Sign with SECRET_KEY from environment - - Algorithm: HS256 - -4. Add rate limiting: - - Max 5 attempts per IP per 15 minutes - - Return 429 with Retry-After header - -**Files**: -- `src/api/auth.py` (new file, ~80 lines) -- `tests/api/test_auth.py` (new file, ~120 lines) - -**Validation**: -- [ ] Valid credentials return 200 with token -- [ ] Invalid credentials return 401 -- [ ] Missing fields return 400 -- [ ] Rate limit enforced (test with 6 requests) -- [ ] JWT token is valid and contains correct claims -- [ ] Token expires after 24 hours - -**Edge Cases**: -- Account doesn't exist: Return 401 (same as wrong password - don't leak info) -- Empty password: Return 400 -- SQL injection in email field: Prevented by parameterized queries -- Concurrent login attempts: Handle with database locking -``` - -## Remember - -**This is the most important planning work you'll do.** - -A well-crafted set of work packages with detailed prompts makes implementation smooth and parallelizable. - -A rushed job with vague, oversized WPs causes: -- Agents getting stuck -- Implementation taking 2-3x longer -- Rework and review cycles -- Feature failure - -**Invest the tokens now. Be thorough. Future agents will thank you.** diff --git a/.kittify/.dashboard b/.kittify/.dashboard deleted file mode 100644 index 5ca685a761..0000000000 --- a/.kittify/.dashboard +++ /dev/null @@ -1,4 +0,0 @@ -http://127.0.0.1:9243 -9243 -7cdbc40f9431d8d2cc5d2e418e2d4346 -2216 diff --git a/.kittify/metadata.yaml b/.kittify/metadata.yaml deleted file mode 100644 index 4b1af3ec58..0000000000 --- a/.kittify/metadata.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Spec Kitty Project Metadata -# Auto-generated by spec-kitty init/upgrade -# DO NOT EDIT MANUALLY - -spec_kitty: - version: 0.14.2 - initialized_at: '2026-02-28T05:32:34.112867' - last_upgraded_at: null -environment: - python_version: 3.14.0 - platform: darwin - platform_version: macOS-26.0.1-arm64-arm-64bit-Mach-O -migrations: - applied: [] diff --git a/.kittify/missions/documentation/command-templates/implement.md b/.kittify/missions/documentation/command-templates/implement.md deleted file mode 100644 index fd54948f42..0000000000 --- a/.kittify/missions/documentation/command-templates/implement.md +++ /dev/null @@ -1,337 +0,0 @@ ---- -description: Implement documentation work packages using Divio templates and generators. ---- - -# Command Template: /spec-kitty.implement (Documentation Mission) - -**Phase**: Generate -**Purpose**: Create documentation from templates, invoke generators for reference docs, populate templates with content. - -## ⚠️ CRITICAL: Working Directory Requirement - -**After running `spec-kitty implement WP##`, you MUST:** - -1. **Run the cd command shown in the output** - e.g., `cd .worktrees/###-feature-WP##/` -2. **ALL file operations happen in this directory** - Read, Write, Edit tools must target files in the workspace -3. **NEVER write deliverable files to the main repository** - This is a critical workflow error - -**Why this matters:** -- Each WP has an isolated worktree with its own branch -- Changes in main repository will NOT be seen by reviewers looking at the WP worktree -- Writing to main instead of the workspace causes review failures and merge conflicts - -**Verify you're in the right directory:** -```bash -pwd -# Should show: /path/to/repo/.worktrees/###-feature-WP##/ -``` - ---- - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Implementation Workflow - -Documentation implementation follows the standard workspace-per-WP model: -- **Worktrees used** - Each WP has its own worktree with dedicated branch (same as code missions) -- **Templates populated** - Use Divio templates as starting point -- **Generators invoked** - Run JSDoc/Sphinx/rustdoc to create API reference -- **Content authored** - Write tutorial/how-to/explanation content in worktree -- **Quality validated** - Check accessibility, links, build before merging -- **Release prepared (optional)** - Draft `release.md` when publish is in scope - ---- - -## Per-Work-Package Implementation - -### For WP01: Structure & Generator Setup - -**Objective**: Create directory structure and configure doc generators. - -**Steps**: -1. Create docs/ directory structure: - ```bash - mkdir -p docs/{tutorials,how-to,reference/api,explanation} - ``` -2. Create index.md landing page: - ```markdown - # {Project Name} Documentation - - Welcome to the documentation for {Project Name}. - - ## Getting Started - - - [Tutorials](tutorials/) - Learn by doing - - [How-To Guides](how-to/) - Solve specific problems - - [Reference](reference/) - Technical specifications - - [Explanation](explanation/) - Understand concepts - ``` -3. Configure generators (per plan.md): - - For Sphinx: Create docs/conf.py from template - - For JSDoc: Create jsdoc.json from template - - For rustdoc: Update Cargo.toml with metadata -4. Create build script: - ```bash - #!/bin/bash - # build-docs.sh - - # Build Python docs with Sphinx - sphinx-build -b html docs/ docs/_build/html/ - - # Build JavaScript docs with JSDoc - npx jsdoc -c jsdoc.json - - # Build Rust docs - cargo doc --no-deps - - echo "Documentation built successfully!" - ``` -5. Test build: Run build script, verify no errors - -**Deliverables**: -- docs/ directory structure -- index.md landing page -- Generator configs (conf.py, jsdoc.json, Cargo.toml) -- build-docs.sh script -- Successful test build - ---- - -### For WP02-05: Content Creation (Tutorials, How-Tos, Reference, Explanation) - -**Objective**: Write documentation content using Divio templates. - -**Steps**: -1. **Select appropriate Divio template**: - - Tutorial: Use `templates/divio/tutorial-template.md` - - How-To: Use `templates/divio/howto-template.md` - - Reference: Use `templates/divio/reference-template.md` (for manual reference) - - Explanation: Use `templates/divio/explanation-template.md` - -2. **Copy template to docs/**: - ```bash - # Example for tutorial - cp templates/divio/tutorial-template.md docs/tutorials/getting-started.md - ``` - -3. **Fill in frontmatter**: - ```yaml - --- - type: tutorial - audience: "beginners" - purpose: "Learn how to get started with {Project}" - created: "2026-01-12" - estimated_time: "15 minutes" - prerequisites: "Python 3.11+, pip" - --- - ``` - -4. **Replace placeholders with content**: - - {Title} → Actual title - - [Description] → Actual description - - [Step actions] → Actual step-by-step instructions - - [Examples] → Real code examples - -5. **Follow Divio principles for this type**: - - **Tutorial**: Learning-oriented, step-by-step, show results at each step - - **How-To**: Goal-oriented, assume experience, solve specific problem - - **Reference**: Information-oriented, complete, consistent format - - **Explanation**: Understanding-oriented, conceptual, discuss alternatives - -6. **Add real examples and content**: - - Use actual project APIs, not placeholders - - Test all code examples (they must work!) - - Add real screenshots (with alt text) - - Use diverse example names (not just "John") - -7. **Validate against checklists**: - - Divio compliance (correct type characteristics?) - - Accessibility (heading hierarchy, alt text, clear language?) - - Inclusivity (diverse examples, neutral language?) - -**For Reference Documentation**: - -**Auto-Generated Reference** (API docs): -1. Ensure code has good doc comments: - - Python: Docstrings with Google/NumPy format - - JavaScript: JSDoc comments with @param, @returns - - Rust: /// doc comments -2. Run generator: - ```bash - # Sphinx (Python) - sphinx-build -b html docs/ docs/_build/html/ - - # JSDoc (JavaScript) - npx jsdoc -c jsdoc.json - - # rustdoc (Rust) - cargo doc --no-deps --document-private-items - ``` -3. Review generated output: - - Are all public APIs present? - - Are descriptions clear? - - Are examples included? - - Are links working? -4. If generated docs have gaps: - - Add/improve doc comments in source code - - Regenerate - - Or supplement with manual reference - -**Manual Reference** (CLI, config, data formats): -1. Use reference template -2. Document every option, every command, every field -3. Be consistent in format (use tables) -4. Include examples for each item - -**Deliverables**: -- Completed documentation files in docs/ -- All templates filled with real content -- All code examples tested and working -- All Divio type principles followed -- All accessibility/inclusivity checklists satisfied - ---- - -### For WP06: Quality Validation - -**Objective**: Validate documentation quality before considering complete. - -**Steps**: -1. **Automated checks**: - ```bash - # Check heading hierarchy - find docs/ -name "*.md" -exec grep -E '^#+' {} + | head -50 - - # Check for broken links - markdown-link-check docs/**/*.md - - # Check for missing alt text - grep -r '!\[.*\](' docs/ | grep -v '\[.*\]' || echo "✓ All images have alt text" - - # Spell check - aspell check docs/**/*.md - - # Build check - ./build-docs.sh 2>&1 | grep -i error || echo "✓ Build successful" - ``` - -2. **Manual checks**: - - Read each doc as target audience - - Follow tutorials - do they work? - - Try how-tos - do they solve problems? - - Check reference - is it complete? - - Read explanations - do they clarify? - -3. **Divio compliance check**: - - Is each doc correctly classified? - - Does it follow principles for its type? - - Is it solving the right problem for that type? - -4. **Accessibility check**: - - Proper heading hierarchy? - - All images have alt text? - - Clear language (not jargon-heavy)? - - Links are descriptive? - -5. **Peer review**: - - Have someone from target audience review - - Gather feedback on clarity, completeness, usability - - Revise based on feedback - -6. **Final build and deploy** (if applicable): - ```bash - # Build final documentation - ./build-docs.sh - - # Deploy to hosting (example for GitHub Pages) - # (Deployment steps depend on hosting platform) - ``` - -**Deliverables**: -- All automated checks passing -- Manual review completed with feedback addressed -- Divio compliance verified -- Accessibility compliance verified -- Final build successful -- Documentation deployed (if applicable) - ---- - -## Key Guidelines - -**For Agents**: -- Use Divio templates as starting point, not empty files -- Fill templates with real content, not more placeholders -- Test all code examples before committing -- Follow Divio principles strictly for each type -- Run generators for reference docs (don't write API docs manually) -- Validate quality at end (automated + manual checks) - -**For Users**: -- Implementation creates actual documentation, not just structure -- Templates provide guidance, you provide content -- Generators handle API reference, you write the rest -- Quality validation ensures documentation is actually useful -- Peer review from target audience is valuable - ---- - -## Common Pitfalls - -**DON'T**: -- Mix Divio types (tutorial that explains concepts, how-to that teaches basics) -- Skip testing code examples (broken examples break trust) -- Use only Western male names in examples -- Say "simply" or "just" or "obviously" (ableist language) -- Skip alt text for images (accessibility barrier) -- Write jargon-heavy prose (clarity issue) -- Commit before validating (quality issue) - -**DO**: -- Follow Divio principles for each type -- Test every code example -- Use diverse names in examples -- Use welcoming, clear language -- Add descriptive alt text -- Define technical terms -- Validate before considering complete - ---- - -## Commit Workflow - -**BEFORE moving to for_review**, you MUST commit your documentation: - -```bash -cd .worktrees/###-feature-WP##/ -git add docs/ -git commit -m "docs(WP##): " -``` - -**Example commit messages:** -- `docs(WP01): Add Divio structure and generator configs` -- `docs(WP02): Add getting started tutorial` -- `docs(WP05): Add API reference documentation` - -**Then move to review:** -```bash -spec-kitty agent tasks move-task WP## --to for_review --note "Ready for review: " -``` - -**Why this matters:** -- `move-task` validates that your worktree has commits beyond main -- Uncommitted changes will block the move to for_review -- This prevents lost work and ensures reviewers see complete documentation -- Dependent WPs will receive your work through the git merge-base - ---- - -## Status Tracking Note - -If `/spec-kitty.status` shows your WP in "doing" after you moved it to "for_review", don't panic - a reviewer may have moved it back (changes requested), or there's a sync delay. Focus on your WP. diff --git a/.kittify/missions/documentation/command-templates/plan.md b/.kittify/missions/documentation/command-templates/plan.md deleted file mode 100644 index a482f020ef..0000000000 --- a/.kittify/missions/documentation/command-templates/plan.md +++ /dev/null @@ -1,275 +0,0 @@ ---- -description: Produce a documentation mission plan with audit/design guidance and generator setup. ---- - -# Command Template: /spec-kitty.plan (Documentation Mission) - -**Phases**: Audit (if gap-filling), Design -**Purpose**: Plan documentation structure, configure generators, prioritize gaps, design content outline. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Location Pre-flight Check - -Verify you are in the main repository (not a worktree). Planning happens in main for ALL missions. - -```bash -git branch --show-current # Should show "main" -``` - -**Note**: Planning in main is standard for all spec-kitty missions. Implementation happens in per-WP worktrees. - ---- - -## Planning Interrogation - -For documentation missions, planning interrogation is lighter than software-dev: -- **Simple projects** (single language, initial docs): 1-2 questions about structure preferences -- **Complex projects** (multiple languages, existing docs): 2-3 questions about integration approach - -**Key Planning Questions**: - -**Q1: Documentation Framework** -"Do you have a preferred documentation framework/generator?" -- Sphinx (Python ecosystem standard) -- MkDocs (Markdown-focused, simple) -- Docusaurus (React-based, modern) -- Jekyll (GitHub Pages native) -- None (plain Markdown) - -**Why it matters**: Determines build system, theming options, hosting compatibility. - -**Q2: Generator Integration Approach** (if multiple languages detected) -"How should API reference for different languages be organized?" -- Unified (all APIs in one reference section) -- Separated (language-specific reference sections) -- Parallel (side-by-side comparison) - -**Why it matters**: Affects directory structure, navigation design. - ---- - -## Outline - -1. **Setup**: Run `spec-kitty agent feature setup-plan --json` to initialize plan.md - -2. **Load context**: Read spec.md, meta.json (especially `documentation_state`) - -3. **Phase 0: Research** (if gap-filling mode) - - ### Gap Analysis (gap-filling mode only) - - **Objective**: Audit existing documentation and identify gaps. - - **Steps**: - 1. Scan existing `docs/` directory (or wherever docs live) - 2. Detect documentation framework (Sphinx, MkDocs, Jekyll, etc.) - 3. For each markdown file: - - Parse frontmatter for `type` field - - Apply content heuristics if no explicit type - - Classify as tutorial/how-to/reference/explanation or "unclassified" - 4. Build coverage matrix: - - Rows: Project areas/features - - Columns: Divio types (tutorial, how-to, reference, explanation) - - Cells: Documentation files (or empty if missing) - 5. Calculate coverage percentage - 6. Prioritize gaps: - - **High**: Missing tutorials (blocks new users) - - **High**: Missing reference for public APIs - - **Medium**: Missing how-tos for common tasks - - **Low**: Missing explanations (nice-to-have) - 7. Generate `gap-analysis.md` with: - - Current documentation inventory - - Coverage matrix (markdown table) - - Prioritized gap list - - Recommendations - - **Output**: `gap-analysis.md` file in feature directory - - --- - - ### Generator Research (all modes) - - **Objective**: Research generator configuration options for detected languages. - - **For Each Detected Language**: - - **JavaScript/TypeScript → JSDoc/TypeDoc**: - - Check if JSDoc installed: `npx jsdoc --version` - - Research config options: output format (HTML/Markdown), template (docdash, clean-jsdoc) - - Determine source directories to document - - Plan integration with manual docs - - **Python → Sphinx**: - - Check if Sphinx installed: `sphinx-build --version` - - Research extensions: autodoc (API from docstrings), napoleon (Google/NumPy style), viewcode (source links) - - Research theme: sphinx_rtd_theme (Read the Docs), alabaster (default), pydata-sphinx-theme - - Plan autodoc configuration (which modules to document) - - Plan integration with manual docs - - **Rust → rustdoc**: - - Check if Cargo installed: `cargo doc --help` - - Research rustdoc options: --no-deps, --document-private-items - - Plan Cargo.toml metadata configuration - - Plan integration with manual docs (rustdoc outputs HTML, may need linking) - - **Output**: research.md with generator findings and decisions - -4. **Phase 1: Design** - - ### Documentation Structure Design - - **Directory Layout**: - Design docs/ structure following Divio organization: - - ``` - docs/ - ├── index.md # Landing page - ├── tutorials/ # Learning-oriented - │ ├── getting-started.md - │ └── advanced-usage.md - ├── how-to/ # Problem-solving - │ ├── authentication.md - │ ├── deployment.md - │ └── troubleshooting.md - ├── reference/ # Technical specs - │ ├── api/ # Generated API docs - │ │ ├── python/ # Sphinx output - │ │ ├── javascript/ # JSDoc output - │ │ └── rust/ # rustdoc output - │ ├── cli.md # Manual CLI reference - │ └── configuration.md # Manual config reference - └── explanation/ # Understanding - ├── architecture.md - ├── concepts.md - └── design-decisions.md - ``` - - **Adapt based on**: - - Selected Divio types (only create directories for selected types) - - Project size (small projects may flatten structure) - - Existing docs (extend existing structure if gap-filling) - - --- - - ### Generator Configuration Design - - **For Each Generator**: - - **Sphinx (Python)**: - ```python - # docs/conf.py - project = '{project_name}' - author = '{author}' - extensions = [ - 'sphinx.ext.autodoc', # Generate from docstrings - 'sphinx.ext.napoleon', # Google/NumPy docstring support - 'sphinx.ext.viewcode', # Link to source - 'sphinx.ext.intersphinx', # Link to other projects - ] - html_theme = 'sphinx_rtd_theme' - autodoc_default_options = { - 'members': True, - 'undoc-members': False, - 'show-inheritance': True, - } - ``` - - **JSDoc (JavaScript)**: - ```json - { - "source": { - "include": ["src/"], - "includePattern": ".+\\.js$" - }, - "opts": { - "destination": "docs/reference/api/javascript", - "template": "node_modules/docdash", - "recurse": true - } - } - ``` - - **rustdoc (Rust)**: - ```toml - [package.metadata.docs.rs] - all-features = true - rustdoc-args = ["--document-private-items"] - ``` - - **Output**: Generator config snippets in plan.md, templates ready for implementation - - --- - - ### Data Model - - Generate `data-model.md` with entities: - - **Documentation Mission**: Iteration state, selected types, configured generators - - **Divio Documentation Type**: Tutorial, How-To, Reference, Explanation with characteristics - - **Documentation Generator**: JSDoc, Sphinx, rustdoc configurations - - **Gap Analysis** (if applicable): Coverage matrix, prioritized gaps - - --- - - ### Work Breakdown - - Outline high-level work packages (detailed in `/spec-kitty.tasks`): - - **For Initial Mode**: - 1. WP01: Structure Setup - Create docs/ dirs, configure generators - 2. WP02: Tutorial Creation - Write selected tutorials - 3. WP03: How-To Creation - Write selected how-tos - 4. WP04: Reference Generation - Generate API docs, write manual reference - 5. WP05: Explanation Creation - Write selected explanations - 6. WP06: Quality Validation - Accessibility checks, link validation, build - - **For Gap-Filling Mode**: - 1. WP01: Gap Analysis Review - Review audit results with user - 2. WP02: High-Priority Gaps - Fill critical missing docs - 3. WP03: Medium-Priority Gaps - Fill important missing docs - 4. WP04: Generator Updates - Regenerate outdated API docs - 5. WP05: Quality Validation - Validate new and updated docs - - **For Feature-Specific Mode**: - 1. WP01: Feature Documentation - Document the specific feature across Divio types - 2. WP02: Integration - Integrate with existing documentation - 3. WP03: Quality Validation - Validate feature docs - - --- - - ### Quickstart - - Generate `quickstart.md` with: - - How to build documentation locally - - How to add new documentation (which template to use) - - How to regenerate API reference - - How to validate documentation quality - -5. **Report completion**: - - Plan file path - - Artifacts generated (research.md, data-model.md, gap-analysis.md, quickstart.md, release.md when publish is in scope) - - Next command: `/spec-kitty.tasks` - ---- - -## Key Guidelines - -**For Agents**: -- Run gap analysis only for gap-filling mode -- Auto-detect documentation framework from existing docs -- Configure generators based on detected languages -- Design structure following Divio principles -- Prioritize gaps by user impact (tutorials/reference high, explanations low) -- Plan includes both auto-generated and manual documentation - -**For Users**: -- Planning designs documentation structure, doesn't write content yet -- Generator configs enable automated API reference -- Gap analysis (if iterating) shows what needs attention -- Work breakdown will be detailed in `/spec-kitty.tasks` diff --git a/.kittify/missions/documentation/command-templates/review.md b/.kittify/missions/documentation/command-templates/review.md deleted file mode 100644 index ef60d00342..0000000000 --- a/.kittify/missions/documentation/command-templates/review.md +++ /dev/null @@ -1,344 +0,0 @@ ---- -description: Review documentation work packages for Divio compliance and quality. ---- - -# Command Template: /spec-kitty.review (Documentation Mission) - -**Phase**: Validate -**Purpose**: Review documentation for Divio compliance, accessibility, completeness, and quality. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Review Philosophy - -Documentation review is NOT code review: -- **Not about correctness** (code is about bugs) but **usability** (can readers accomplish their goals?) -- **Not about style** but **accessibility** (can everyone use these docs?) -- **Not about completeness** (covering every edge case) but **usefulness** (solving real problems) -- **Not pass/fail** but **continuous improvement** - ---- - -## Review Checklist - -### 1. Divio Type Compliance - -For each documentation file, verify it follows principles for its declared type: - -**Tutorial Review**: -- [ ] Learning-oriented (teaches by doing, not explaining)? -- [ ] Step-by-step progression with clear sequence? -- [ ] Each step shows immediate, visible result? -- [ ] Minimal explanations (links to explanation docs instead)? -- [ ] Assumes beginner level (no unexplained prerequisites)? -- [ ] Reliable (will work for all users following instructions)? -- [ ] Achieves concrete outcome (learner can do something new)? - -**How-To Review**: -- [ ] Goal-oriented (solves specific problem)? -- [ ] Assumes experienced user (not teaching basics)? -- [ ] Practical steps, minimal explanation? -- [ ] Flexible (readers can adapt to their situation)? -- [ ] Includes common variations? -- [ ] Links to reference for details, explanation for "why"? -- [ ] Title starts with "How to..."? - -**Reference Review**: -- [ ] Information-oriented (describes what exists)? -- [ ] Complete (all APIs/options/commands documented)? -- [ ] Consistent format (same structure for similar items)? -- [ ] Accurate (matches actual behavior)? -- [ ] Includes usage examples (not just descriptions)? -- [ ] Structured around code organization? -- [ ] Factual tone (no opinions or recommendations)? - -**Explanation Review**: -- [ ] Understanding-oriented (clarifies concepts)? -- [ ] Not instructional (not teaching how-to-do)? -- [ ] Discusses concepts, design decisions, trade-offs? -- [ ] Compares with alternatives fairly? -- [ ] Makes connections between ideas? -- [ ] Provides context and background? -- [ ] Identifies limitations and when (not) to use? - -**If type is wrong or mixed**: -- Return with feedback: "This is classified as {type} but reads like {actual_type}. Either reclassify or rewrite to match {type} principles." - ---- - -### 2. Accessibility Review - -**Heading Hierarchy**: -- [ ] One H1 per document (the title) -- [ ] H2s for major sections -- [ ] H3s for subsections under H2s -- [ ] No skipped levels (H1 → H3 is wrong) -- [ ] Headings are descriptive (not "Introduction", "Section 2") - -**Images**: -- [ ] All images have alt text -- [ ] Alt text describes what image shows (not "image" or "screenshot") -- [ ] Decorative images have empty alt text (`![]()`) -- [ ] Complex diagrams have longer descriptions - -**Language**: -- [ ] Clear, plain language (technical terms defined) -- [ ] Active voice ("run the command" not "the command should be run") -- [ ] Present tense ("returns" not "will return") -- [ ] Short sentences (15-20 words max) -- [ ] Short paragraphs (3-5 sentences) - -**Links**: -- [ ] Link text is descriptive ("see the installation guide" not "click here") -- [ ] Links are not bare URLs (use markdown links) -- [ ] No broken links (test all links) - -**Code Blocks**: -- [ ] All code blocks have language tags for syntax highlighting -- [ ] Expected output is shown (not just commands) -- [ ] Code examples actually work (tested) - -**Tables**: -- [ ] Tables have headers -- [ ] Headers use `|---|` syntax -- [ ] Tables are not too wide (wrap if needed) - -**Lists**: -- [ ] Proper markdown lists (not paragraphs with commas) -- [ ] Consistent bullet style -- [ ] Items are parallel in structure - -**If accessibility issues found**: -- Return with feedback listing specific issues and how to fix - ---- - -### 3. Inclusivity Review - -**Examples and Names**: -- [ ] Uses diverse names (not just Western male names) -- [ ] Names span different cultures and backgrounds -- [ ] Avoids stereotypical name choices - -**Language**: -- [ ] Gender-neutral ("they" not "he/she", "developers" not "guys") -- [ ] Avoids ableist language ("just", "simply", "obviously", "easy" imply reader inadequacy) -- [ ] Person-first language where appropriate ("person with disability" not "disabled person") -- [ ] Avoids idioms (cultural-specific phrases that don't translate) - -**Cultural Assumptions**: -- [ ] No religious references (Christmas, Ramadan, etc.) -- [ ] No cultural-specific examples (American holidays, sports, food) -- [ ] Date formats explained (ISO 8601 preferred) -- [ ] Currency and units specified (USD, meters, etc.) - -**Tone**: -- [ ] Welcoming to newcomers (not intimidating) -- [ ] Assumes good faith (users aren't "doing it wrong") -- [ ] Encouraging (celebrates progress) - -**If inclusivity issues found**: -- Return with feedback listing examples to change - ---- - -### 4. Completeness Review - -**For Initial Documentation**: -- [ ] All selected Divio types are present -- [ ] Tutorials enable new users to get started -- [ ] Reference covers all public APIs -- [ ] How-tos address common problems (from user research or support tickets) -- [ ] Explanations clarify key concepts and design - -**For Gap-Filling**: -- [ ] High-priority gaps from audit are filled -- [ ] Outdated docs are updated -- [ ] Coverage percentage improved - -**For Feature-Specific**: -- [ ] Feature is documented across relevant Divio types -- [ ] Feature docs integrate with existing documentation -- [ ] Feature is discoverable (linked from main index, relevant how-tos, etc.) - -**Common Gaps**: -- [ ] Installation/setup covered (tutorial or how-to)? -- [ ] Common tasks have how-tos? -- [ ] All public APIs in reference? -- [ ] Error messages explained (troubleshooting how-tos)? -- [ ] Architecture/design explained (explanation)? - -**If completeness gaps found**: -- Return with feedback listing missing documentation - ---- - -### 5. Quality Review - -**Tutorial Quality**: -- [ ] Tutorial actually works (reviewer followed it successfully)? -- [ ] Each step shows result (not "do X, Y, Z" without checkpoints)? -- [ ] Learner accomplishes something valuable? -- [ ] Appropriate for stated audience? - -**How-To Quality**: -- [ ] Solves the stated problem? -- [ ] Steps are clear and actionable? -- [ ] Reader can adapt to their situation? -- [ ] Links to reference for details? - -**Reference Quality**: -- [ ] Descriptions match actual behavior (not outdated)? -- [ ] Examples work (not broken or misleading)? -- [ ] Format is consistent across similar items? -- [ ] Search-friendly (clear headings, keywords)? - -**Explanation Quality**: -- [ ] Concepts are clarified (not more confusing)? -- [ ] Design rationale is clear? -- [ ] Alternatives are discussed fairly? -- [ ] Trade-offs are identified? - -**General Quality**: -- [ ] Documentation builds without errors -- [ ] No broken links (internal or external) -- [ ] No spelling errors -- [ ] Code examples work -- [ ] Images load correctly -- [ ] If `release.md` is present, it reflects the actual publish path and handoff steps - -**If quality issues found**: -- Return with feedback describing issues and how to improve - ---- - -## Review Process - -1. **Load work package**: - - Read WP prompt file (e.g., `tasks/WP02-tutorials.md`) - - Identify which documentation was created/updated - -2. **Review each document** against checklists above - -3. **Build documentation** and verify: - ```bash - ./build-docs.sh - ``` - - Check for build errors/warnings - - Navigate to docs in browser - - Test links, images, navigation - -4. **Test tutorials** (if present): - - Follow tutorial steps exactly - - Verify each step works - - Confirm outcome is achieved - -5. **Test how-tos** (if present): - - Attempt to solve the problem using the guide - - Verify solution works - -6. **Validate generated reference** (if present): - - Check auto-generated API docs - - Verify all public APIs present - - Check descriptions are clear - -7. **Decide**: - - **If all checks pass**: - - Move WP to "done" lane - - Update activity log with approval - - Proceed to next WP - - **If issues found**: - - Populate Review Feedback section in WP prompt - - List specific issues with locations and fix guidance - - Set `review_status: has_feedback` - - Move WP back to "planned" or "doing" - - Notify implementer - ---- - -## Review Feedback Format - -When returning work for changes, use this format: - -```markdown -## Review Feedback - -### Divio Type Compliance - -**Issue**: docs/tutorials/getting-started.md is classified as tutorial but reads like how-to (assumes too much prior knowledge). - -**Fix**: Either: -- Reclassify as how-to (change frontmatter `type: how-to`) -- Rewrite to be learning-oriented for beginners (add prerequisites section, simplify steps, show results at each step) - -### Accessibility - -**Issue**: docs/tutorials/getting-started.md has image without alt text (line 45). - -**Fix**: Add alt text describing what the image shows: -```markdown -![Screenshot showing the welcome screen after successful login](images/welcome.png) -``` - -### Inclusivity - -**Issue**: docs/how-to/authentication.md uses only male names in examples ("Bob", "John", "Steve"). - -**Fix**: Use diverse names: "Aisha", "Yuki", "Carlos", "Alex". - -### Completeness - -**Issue**: Public API `DocumentGenerator.configure()` is not documented in reference. - -**Fix**: Add entry to docs/reference/api.md or regenerate API docs if using auto-generation. - -### Quality - -**Issue**: Tutorial step 3 command fails (missing required --flag option). - -**Fix**: Add --flag to command on line 67: -```bash -command --flag --other-option value -``` -``` - ---- - -## Key Guidelines - -**For Reviewers**: -- Focus on usability and accessibility, not perfection -- Provide specific, actionable feedback with line numbers -- Explain why something is an issue (educate, don't just reject) -- Test tutorials and how-tos by actually following them -- Check Divio type compliance carefully (most common issue) - -**For Implementers**: -- Review feedback is guidance, not criticism -- Address all feedback items before re-submitting -- Mark `review_status: acknowledged` when you understand feedback -- Update activity log as you address each item - ---- - -## Success Criteria - -Documentation is ready for "done" when: -- [ ] All Divio type principles followed -- [ ] All accessibility checks pass -- [ ] All inclusivity checks pass -- [ ] All completeness requirements met -- [ ] All quality validations pass -- [ ] Documentation builds successfully -- [ ] Tutorials work when followed -- [ ] How-tos solve stated problems -- [ ] Reference is complete and accurate -- [ ] Explanations clarify concepts diff --git a/.kittify/missions/documentation/command-templates/specify.md b/.kittify/missions/documentation/command-templates/specify.md deleted file mode 100644 index 747b8c7545..0000000000 --- a/.kittify/missions/documentation/command-templates/specify.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -description: Create a documentation-focused feature specification with discovery and Divio scoping. ---- - -# Command Template: /spec-kitty.specify (Documentation Mission) - -**Phase**: Discover -**Purpose**: Understand documentation needs, identify iteration mode, select Divio types, detect languages, recommend generators. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Discovery Gate (mandatory) - -Before running any scripts or writing to disk, conduct a structured discovery interview tailored to documentation missions. - -**Scope proportionality**: For documentation missions, discovery depth depends on project maturity: -- **New project** (initial mode): 3-4 questions about audience, goals, Divio types -- **Existing docs** (gap-filling mode): 2-3 questions about gaps, priorities, maintenance -- **Feature-specific** (documenting new feature): 1-2 questions about feature scope, integration - -### Discovery Questions - -**Question 1: Iteration Mode** (CRITICAL) - -Ask user which documentation scenario applies: - -**(A) Initial Documentation** - First-time documentation for a project (no existing docs) -**(B) Gap-Filling** - Improving/extending existing documentation -**(C) Feature-Specific** - Documenting a specific new feature/module - -**Why it matters**: Determines whether to run gap analysis, how to structure workflow. - -**Store answer in**: `meta.json → documentation_state.iteration_mode` - ---- - -**Question 2A: For Initial Mode - What to Document** - -Ask user: -- What is the primary audience? (developers, end users, contributors, operators) -- What are the documentation goals? (onboarding, API reference, troubleshooting, understanding architecture) -- Which Divio types are most important? (tutorial, how-to, reference, explanation) - -**Why it matters**: Determines which templates to generate, what content to prioritize. - ---- - -**Question 2B: For Gap-Filling Mode - What's Missing** - -Inform user you will audit existing documentation, then ask: -- What problems are users reporting? (can't get started, can't solve specific problems, APIs undocumented, don't understand concepts) -- Which areas need documentation most urgently? (specific features, concepts, tasks) -- What Divio types are you willing to add? (tutorial, how-to, reference, explanation) - -**Why it matters**: Focuses gap analysis on user-reported issues, prioritizes work. - ---- - -**Question 2C: For Feature-Specific Mode - Feature Details** - -Ask user: -- Which feature/module are you documenting? -- Who will use this feature? (what audience) -- What aspects need documentation? (getting started, common tasks, API details, architecture/design) - -**Why it matters**: Scopes documentation to just the feature, determines which Divio types apply. - ---- - -**Question 3: Language Detection & Generators** - -Auto-detect project languages: -- Scan for `.js`, `.ts`, `.jsx`, `.tsx` files → Recommend JSDoc/TypeDoc -- Scan for `.py` files → Recommend Sphinx -- Scan for `Cargo.toml`, `.rs` files → Recommend rustdoc - -Present to user: -"Detected languages: [list]. Recommend these generators: [list]. Proceed with these?" - -Allow user to: -- Confirm all -- Select subset -- Skip generators (manual documentation only) - -**Why it matters**: Determines which generators to configure in planning phase. - -**Store answer in**: `meta.json → documentation_state.generators_configured` - ---- - -**Question 4: Target Audience (if not already clear)** - -If not clear from earlier answers, ask: -"Who is the primary audience for this documentation?" -- Developers integrating your library/API -- End users using your application -- Contributors to your project -- Operators deploying/maintaining your system -- Mix of above (specify) - -**Why it matters**: Affects documentation tone, depth, assumed knowledge. - -**Store answer in**: `spec.md → ## Documentation Scope → Target Audience` - ---- - -**Question 5: Publish Scope (optional)** - -Ask user: -- Is documentation release/publish in scope for this effort? -- If yes, should we produce `release.md` with hosting and handoff details? - -**Why it matters**: Avoids unnecessary release work when publishing is handled elsewhere. - ---- - -### Intent Summary - -After discovery questions answered, synthesize into Intent Summary: - -```markdown -## Documentation Mission Intent - -**Iteration Mode**: [initial | gap-filling | feature-specific] -**Primary Goal**: [Describe what user wants to accomplish] -**Target Audience**: [Who will read these docs] -**Selected Divio Types**: [tutorial, how-to, reference, explanation] -**Detected Languages**: [Python, JavaScript, Rust, etc.] -**Recommended Generators**: [JSDoc, Sphinx, rustdoc] - -**Scope**: [Summary of what will be documented] -``` - -Confirm with user before proceeding. - ---- - -## Outline - -1. **Check discovery status**: If questions unanswered, ask one at a time (Discovery Gate above) - -2. **Generate feature directory**: Run `spec-kitty agent feature create-feature "doc-{project-name}" --json --mission documentation` - - Feature naming convention: `doc-{project-name}` or `docs-{feature-name}` for feature-specific - -3. **Create meta.json**: Include `mission: "documentation"` and `documentation_state` field: - ```json - { - "feature_number": "###", - "slug": "doc-project-name", - "friendly_name": "Documentation: Project Name", - "mission": "documentation", - "source_description": "...", - "created_at": "...", - "documentation_state": { - "iteration_mode": "initial", - "divio_types_selected": ["tutorial", "reference"], - "generators_configured": [ - {"name": "sphinx", "language": "python"} - ], - "target_audience": "developers", - "last_audit_date": null, - "coverage_percentage": 0.0 - } - } - ``` - -4. **Run gap analysis** (gap-filling mode only): - - Scan existing `docs/` directory - - Classify docs into Divio types - - Build coverage matrix - - Generate `gap-analysis.md` with findings - -5. **Generate specification**: - - Use `templates/spec-template.md` from documentation mission - - Fill in Documentation Scope section with discovery answers - - Include gap analysis results if gap-filling mode - - Define requirements based on selected Divio types and generators - - Define success criteria (accessibility, completeness, audience satisfaction) - -6. **Validate specification**: Run quality checks (see spec-template.md checklist) - -7. **Report completion**: Spec file path, next command (`/spec-kitty.plan`) - ---- - -## Key Guidelines - -**For Agents**: -- Ask discovery questions one at a time (don't overwhelm user) -- Auto-detect languages to recommend generators -- For gap-filling, show audit results to user before asking what to fill -- Store iteration state in meta.json (enables future iterations) -- Emphasize Divio types in specification (tutorial/how-to/reference/explanation) -- Link to Write the Docs and Divio resources in spec - -**For Users**: -- Discovery helps ensure documentation meets real needs -- Gap analysis (if iterating) shows what's missing -- Generator recommendations save manual API documentation work -- Iteration mode affects workflow (initial vs gap-filling vs feature-specific) diff --git a/.kittify/missions/documentation/command-templates/tasks.md b/.kittify/missions/documentation/command-templates/tasks.md deleted file mode 100644 index bd4c842803..0000000000 --- a/.kittify/missions/documentation/command-templates/tasks.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -description: Generate documentation work packages and subtasks aligned to Divio types. ---- - -# Command Template: /spec-kitty.tasks (Documentation Mission) - -**Phase**: Design (finalizing work breakdown) -**Purpose**: Break documentation work into independently implementable work packages with subtasks. - -## User Input - -```text -$ARGUMENTS -``` - -You **MUST** consider the user input before proceeding (if not empty). - -## Location Pre-flight Check - -Verify you are in the main repository (not a worktree). Task generation happens in main for ALL missions. - -```bash -git branch --show-current # Should show "main" -``` - -**Note**: Task generation in main is standard for all spec-kitty missions. Implementation happens in per-WP worktrees. - ---- - -## Outline - -1. **Setup**: Run `spec-kitty agent feature check-prerequisites --json --paths-only --include-tasks` - -2. **Load design documents**: - - spec.md (documentation goals, selected Divio types) - - plan.md (structure design, generator configs) - - gap-analysis.md (if gap-filling mode) - - meta.json (iteration_mode, generators_configured) - -3. **Derive fine-grained subtasks**: - - ### Subtask Patterns for Documentation - - **Structure Setup** (all modes): - - T001: Create `docs/` directory structure - - T002: Create index.md landing page - - T003: [P] Configure Sphinx (if Python detected) - - T004: [P] Configure JSDoc (if JavaScript detected) - - T005: [P] Configure rustdoc (if Rust detected) - - T006: Set up build script (Makefile or build.sh) - - **Tutorial Creation** (if tutorial selected): - - T010: Write "Getting Started" tutorial - - T011: Write "Basic Usage" tutorial - - T012: [P] Write "Advanced Topics" tutorial - - T013: Add screenshots/examples to tutorials - - T014: Test tutorials with fresh user - - **How-To Creation** (if how-to selected): - - T020: Write "How to Deploy" guide - - T021: Write "How to Configure" guide - - T022: Write "How to Troubleshoot" guide - - T023: [P] Write additional task-specific guides - - **Reference Generation** (if reference selected): - - T030: Generate Python API reference (Sphinx autodoc) - - T031: Generate JavaScript API reference (JSDoc) - - T032: Generate Rust API reference (cargo doc) - - T033: Write CLI reference (manual) - - T034: Write configuration reference (manual) - - T035: Integrate generated + manual reference - - T036: Validate all public APIs documented - - **Explanation Creation** (if explanation selected): - - T040: Write "Architecture Overview" explanation - - T041: Write "Core Concepts" explanation - - T042: Write "Design Decisions" explanation - - T043: [P] Add diagrams illustrating concepts - - **Quality Validation** (all modes): - - T050: Validate heading hierarchy - - T051: Validate all images have alt text - - T052: Check for broken internal links - - T053: Check for broken external links - - T054: Verify code examples work - - T055: Check bias-free language - - T056: Build documentation site - - T057: Deploy to hosting (if applicable) - -4. **Roll subtasks into work packages**: - - ### Work Package Patterns - - **For Initial Mode**: - - WP01: Structure & Generator Setup (T001-T006) - - WP02: Tutorial Documentation (T010-T014) - If tutorials selected - - WP03: How-To Documentation (T020-T023) - If how-tos selected - - WP04: Reference Documentation (T030-T036) - If reference selected - - WP05: Explanation Documentation (T040-T043) - If explanation selected - - WP06: Quality Validation (T050-T057) - - **For Gap-Filling Mode**: - - WP01: High-Priority Gaps (tasks for critical missing docs from gap analysis) - - WP02: Medium-Priority Gaps (tasks for important missing docs) - - WP03: Generator Updates (regenerate outdated API docs) - - WP04: Quality Validation (validate all docs, old and new) - - **For Feature-Specific Mode**: - - WP01: Feature Documentation (tasks for documenting the feature across selected Divio types) - - WP02: Integration (tasks for integrating feature docs with existing docs) - - WP03: Quality Validation (validate feature-specific docs) - - ### Prioritization - - - **P0 (foundation)**: Structure setup, generator configuration - - **P1 (critical)**: Tutorials (if new users), Reference (if API docs missing) - - **P2 (important)**: How-Tos (solve known problems), Explanation (understanding) - - **P3 (polish)**: Quality validation, accessibility improvements - -5. **Write `tasks.md`**: - - Use `templates/tasks-template.md` from documentation mission - - Include work packages with subtasks - - Mark parallel opportunities (`[P]`) - - Define dependencies (WP01 must complete before others) - - Identify MVP scope (typically WP01 + Reference generation) - -6. **Generate prompt files**: - - Create flat `FEATURE_DIR/tasks/` directory (no subdirectories!) - - For each work package: - - Generate `WPxx-slug.md` using `templates/task-prompt-template.md` - - Include objectives, context, subtask guidance - - Add quality validation strategy (documentation-specific) - - Include Divio compliance checks - - Add accessibility/inclusivity checklists - - Set `lane: "planned"` in frontmatter - -7. **Report**: - - Path to tasks.md - - Work package count and subtask tallies - - Parallelization opportunities - - MVP recommendation - - Next command: `/spec-kitty.implement WP01` (or review tasks.md first) - ---- - -## Documentation-Specific Task Generation Rules - -**Generator Subtasks**: -- Mark generators as `[P]` (parallel) - different languages can generate simultaneously -- Include tool check subtasks (verify sphinx-build, npx, cargo available) -- Include config generation subtasks (create conf.py, jsdoc.json) -- Include actual generation subtasks (run the generator) -- Include integration subtasks (link generated docs into manual structure) - -**Content Authoring Subtasks**: -- One subtask per document (don't bundle "write all tutorials" into one task) -- Mark independent docs as `[P]` (parallel) - different docs can be written simultaneously -- Include validation subtasks (test tutorials, verify how-tos solve problems) - -**Quality Validation Subtasks**: -- Mark validation checks as `[P]` (parallel) - different checks can run simultaneously -- Include automated checks (link checker, spell check, build) -- Include manual checks (accessibility review, Divio compliance) - -**Work Package Scope**: -- Each Divio type typically gets its own work package (WP for tutorials, WP for how-tos, etc.) -- Exception: Small projects may combine types if only 1-2 docs per type -- Generator setup is always separate (WP01 foundation) -- Quality validation is always separate (final WP) - ---- - -## Key Guidelines - -**For Agents**: -- Adapt work packages to iteration mode -- For gap-filling, work packages target specific gaps from audit -- Mark generator invocations as parallel (different languages) -- Mark independent docs as parallel (different files) -- Include Divio compliance in Definition of Done for each WP -- Quality validation is final work package (depends on all others) -- If publish is in scope, add a release WP to produce `release.md` - -**For Users**: -- Tasks.md shows the full work breakdown -- Work packages are independently implementable -- MVP often just structure + reference (API docs) -- Full documentation includes all Divio types -- Parallel work packages can be implemented simultaneously diff --git a/.kittify/missions/documentation/mission.yaml b/.kittify/missions/documentation/mission.yaml deleted file mode 100644 index e3cac4c8bc..0000000000 --- a/.kittify/missions/documentation/mission.yaml +++ /dev/null @@ -1,115 +0,0 @@ -name: "Documentation Kitty" -description: "Create and maintain high-quality software documentation following Write the Docs and Divio principles" -version: "1.0.0" -domain: "other" - -# Workflow customization -workflow: - phases: - - name: "discover" - description: "Identify documentation needs and target audience" - - name: "audit" - description: "Analyze existing documentation and identify gaps" - - name: "design" - description: "Plan documentation structure and Divio types" - - name: "generate" - description: "Create documentation from templates and generators" - - name: "validate" - description: "Check quality, accessibility, and completeness" - - name: "publish" - description: "Deploy documentation and notify stakeholders" - -# Expected artifacts -artifacts: - required: - - spec.md - - plan.md - - tasks.md - - gap-analysis.md - optional: - - divio-templates/ - - generator-configs/ - - audit-report.md - - research.md - - data-model.md - - quickstart.md - - release.md - -# Path conventions for this mission -paths: - workspace: "docs/" - deliverables: "docs/output/" - documentation: "docs/" - -# Validation rules -validation: - checks: - - all_divio_types_valid - - no_conflicting_generators - - templates_populated - - gap_analysis_complete - - documentation_state_exists - - audit_recency - custom_validators: false # No custom validators.py initially - -# MCP tools recommended for this mission -mcp_tools: - required: - - filesystem - - git - recommended: - - web-search - - code-search - optional: - - github - - gitlab - -# Agent personality/instructions -agent_context: | - You are a documentation agent following Write the Docs best practices and the Divio documentation system. - - Key Practices: - - Documentation as code: docs live in version control alongside source - - Divio 4-type system: tutorial, how-to, reference, explanation (distinct purposes) - - Accessibility: clear language, proper headings, alt text for images - - Bias-free language: inclusive examples and terminology - - Iterative improvement: support gap-filling and feature-specific documentation - - Workflow Phases: discover → audit → design → generate → validate → publish - - Generator Integration: - - JSDoc for JavaScript/TypeScript API reference - - Sphinx for Python API reference (autodoc + napoleon) - - rustdoc for Rust API reference - - Gap Analysis: - - Audit existing docs to identify missing Divio types - - Build coverage matrix showing what exists vs what's needed - - Prioritize gaps by user impact - -# Task metadata fields -task_metadata: - required: - - task_id - - lane - - phase - - agent - optional: - - shell_pid - - assignee - - estimated_hours - -# Command customization -commands: - specify: - prompt: "Define documentation needs: iteration mode (initial/gap-filling/feature-specific), Divio types to include, target audience, and documentation goals" - plan: - prompt: "Design documentation structure, configure generators (JSDoc/Sphinx/rustdoc), plan gap-filling strategy if iterating" - tasks: - prompt: "Break documentation work into packages: template creation, generator setup, content authoring, quality validation" - implement: - prompt: "Generate documentation from templates, invoke generators for reference docs, populate templates with project-specific content" - review: - prompt: "Validate Divio type adherence, check accessibility guidelines, verify generator output quality, assess completeness" - accept: - prompt: "Validate documentation completeness, quality gates, and readiness for publication" diff --git a/.kittify/missions/documentation/templates/divio/explanation-template.md b/.kittify/missions/documentation/templates/divio/explanation-template.md deleted file mode 100644 index 6c325438d6..0000000000 --- a/.kittify/missions/documentation/templates/divio/explanation-template.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -type: explanation -divio_category: understanding-oriented -target_audience: curious-users -purpose: conceptual-clarification -outcome: user-understands-why-and-how ---- - -# Explanation: {CONCEPT_OR_TOPIC} - -> **Divio Type**: Explanation (Understanding-Oriented) -> **Target Audience**: Users who want to understand concepts and context -> **Purpose**: Clarify and illuminate to deepen understanding -> **Outcome**: User understands why things are the way they are - -## Overview - -{Introduce the concept/topic and why understanding it matters} - -This explanation covers: -- {Key aspect 1 that will be discussed} -- {Key aspect 2 that will be discussed} -- {Key aspect 3 that will be discussed} - -## Background and Context - -{Provide historical context or background that helps frame the discussion} - -**Why this matters**: {Explain relevance to users} - -{Add diagram if it helps explain the concept} - -![{Descriptive alt text}]({path-to-diagram}) - -## Core Concepts - -### {Concept 1} - -{Explain the concept in depth} - -**Analogy**: {Use an analogy if it helps understanding} - -**Why it works this way**: {Explain the reasoning} - -**Implications**: -- {What this means for users} -- {How this affects behavior} -- {What to keep in mind} - -### {Concept 2} - -{Explain the next concept} - -**Connection to {Concept 1}**: {How concepts relate} - -### {Concept 3} - -{Continue explaining key concepts} - -## How It Works - -{Explain the mechanism or process in detail} - -**Step-by-step explanation**: - -1. **{Phase 1}**: {What happens and why} -2. **{Phase 2}**: {What happens next and why} -3. **{Phase 3}**: {Continue the explanation} - -{Use diagrams, flowcharts, or illustrations to clarify} - -## Design Decisions - -### Why {Decision/Approach} Was Chosen - -**The problem**: {What needed to be solved} - -**Considered alternatives**: -- **Option A**: {Description} - - Pros: {Benefits} - - Cons: {Drawbacks} -- **Option B**: {Description} - - Pros: {Benefits} - - Cons: {Drawbacks} -- **Chosen: Option C**: {Description} - - Why this was chosen: {Reasoning} - - Trade-offs accepted: {What was sacrificed for the benefits} - -## Common Misconceptions - -### Misconception: "{Common wrong belief}" - -**Reality**: {What's actually true} - -**Why the confusion**: {Why people think this} - -**Clarification**: {Detailed explanation of the truth} - -### Misconception: "{Another common wrong belief}" - -**Reality**: {What's actually true} - -**Example to illustrate**: {Concrete example that clarifies} - -## Relationships and Connections - -### Connection to {Related Concept} - -{Explain how this concept relates to another} - -**Differences**: -- {Key difference 1} -- {Key difference 2} - -**Similarities**: -- {Key similarity 1} -- {Key similarity 2} - -### Impact on {Related System/Feature} - -{Explain how this concept affects other parts of the system} - -## Trade-offs and Limitations - -**Benefits of this approach**: -- {Benefit 1} -- {Benefit 2} -- {Benefit 3} - -**Limitations**: -- {Limitation 1 and why it exists} -- {Limitation 2 and why it exists} - -**When this might not be ideal**: {Scenarios where trade-offs are problematic} - -## Practical Implications - -### For {User Type 1} - -{What this concept means for this type of user} - -**Key takeaways**: -- {Actionable insight 1} -- {Actionable insight 2} - -### For {User Type 2} - -{What this concept means for this type of user} - -## Further Reading - -- **Tutorial**: Learn by doing with [Tutorial: {Topic}](../tutorials/{link}) -- **How-To**: Apply this in practice with [How-To: {Task}](../how-to/{link}) -- **Reference**: Technical details in [{Reference}](../reference/{link}) -- **External**: [Article/Book about {Topic}]({external-link}) - ---- - -## Write the Docs Best Practices (Remove this section before publishing) - -**Explanation Principles**: -- ✅ Understanding-oriented: Clarify and illuminate -- ✅ Discuss concepts, not tasks (not instructional) -- ✅ Provide context and background -- ✅ Explain "why" things are the way they are -- ✅ Discuss alternatives and trade-offs -- ✅ Make connections between ideas -- ✅ Can be more free-form than other types -- ✅ No imperative mood (not "do this") - -**Accessibility**: -- ✅ Proper heading hierarchy -- ✅ Alt text for diagrams (especially important for conceptual diagrams) -- ✅ Clear language, define technical terms -- ✅ Use diagrams to clarify complex concepts -- ✅ Descriptive link text - -**Inclusivity**: -- ✅ Diverse examples -- ✅ Gender-neutral language -- ✅ No cultural assumptions -- ✅ Consider different learning styles (visual, verbal, etc.) - -**Explanation-Specific Guidelines**: -- Start with "why" before "what" -- Use analogies and metaphors to clarify -- Diagrams are very valuable for explanations -- Discuss design decisions and trade-offs -- Address common misconceptions -- Make connections to related concepts -- Don't just describe - explain and clarify -- Be conversational but accurate diff --git a/.kittify/missions/documentation/templates/divio/howto-template.md b/.kittify/missions/documentation/templates/divio/howto-template.md deleted file mode 100644 index 39db783bdd..0000000000 --- a/.kittify/missions/documentation/templates/divio/howto-template.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -type: how-to -divio_category: goal-oriented -target_audience: experienced-users -purpose: problem-solving-guide -outcome: user-solves-specific-problem ---- - -# How-To: {TASK_TO_ACCOMPLISH} - -> **Divio Type**: How-To Guide (Goal-Oriented) -> **Target Audience**: Users with basic familiarity who need to solve a specific problem -> **Purpose**: Provide practical steps to accomplish a specific goal -> **Outcome**: User successfully completes the task - -## Goal - -This guide shows you how to {accomplish specific task}. - -**Use this guide when you need to**: {Describe the problem/goal this solves} - -## Prerequisites - -- {Required knowledge - assume user has basic familiarity} -- {Required setup or configuration} -- {Required tools or access} - -## Quick Summary - -If you're experienced, here's the short version: - -```bash -# Step 1: {Brief description} -{command} - -# Step 2: {Brief description} -{command} - -# Step 3: {Brief description} -{command} -``` - -## Detailed Steps - -### 1. {First Major Step} - -{Brief context for why this step is needed} - -```{language} -{code-or-command} -``` - -**Options**: -- `--option1`: {When to use this} -- `--option2`: {When to use this} - -**Common variations**: -- **If you need {variation}**: Use `{alternative command}` -- **If you're using {different setup}**: Modify the command to `{modified version}` - -### 2. {Second Major Step} - -{Brief context} - -```{language} -{code-or-command} -``` - -**Important**: {Critical thing to watch out for} - -### 3. {Third Major Step} - -{Brief context} - -```{language} -{code-or-command} -``` - -### 4. {Continue as needed...} - -## Verification - -To confirm it worked: - -```bash -{command-to-verify} -``` - -You should see: -``` -{expected-output} -``` - -## Troubleshooting - -### Issue: {Common problem} - -**Symptoms**: -- {What user sees} -- {Error message or behavior} - -**Cause**: {Why this happens} - -**Solution**: -```bash -{fix-command} -``` - -### Issue: {Another common problem} - -**Symptoms**: {What user sees} - -**Solution**: {Steps to fix} - -## Alternative Approaches - -**Method 1** (described above): Best when {scenario} - -**Method 2**: If you need {different requirement}, use this instead: -```bash -{alternative-approach} -``` - -**Method 3**: For {specific use case}: -```bash -{another-approach} -``` - -## Related Resources - -- **Tutorial**: New to this? Start with [Tutorial: {Topic}](../tutorials/{link}) -- **Reference**: See [{API/CLI Reference}](../reference/{link}) for all options -- **Explanation**: Understand why this works in [Explanation: {Topic}](../explanation/{link}) - ---- - -## Write the Docs Best Practices (Remove this section before publishing) - -**How-To Principles**: -- ✅ Goal-oriented: Solve a specific problem -- ✅ Assume reader has basic knowledge (not for beginners) -- ✅ Focus on practical steps, minimal theory -- ✅ Flexible: Reader can adapt to their situation -- ✅ Provide options and alternatives for different scenarios -- ✅ Include troubleshooting for common issues -- ✅ Link to Reference for details, Explanation for "why" -- ✅ Use imperative mood ("Do this", not "You might want to") - -**Accessibility**: -- ✅ Proper heading hierarchy -- ✅ Alt text for all images -- ✅ Clear, plain language -- ✅ Syntax highlighting for code blocks -- ✅ Descriptive link text - -**Inclusivity**: -- ✅ Diverse examples -- ✅ Gender-neutral language -- ✅ No cultural assumptions - -**How-To Specific Guidelines**: -- Start with the goal (what will be accomplished) -- Provide quick summary for experienced users -- Offer options and variations (real-world scenarios vary) -- Include verification step (how to know it worked) -- Troubleshoot common problems -- Don't explain concepts - link to Explanations -- Assume familiarity - not a tutorial diff --git a/.kittify/missions/documentation/templates/divio/reference-template.md b/.kittify/missions/documentation/templates/divio/reference-template.md deleted file mode 100644 index f49fbaac03..0000000000 --- a/.kittify/missions/documentation/templates/divio/reference-template.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -type: reference -divio_category: information-oriented -target_audience: all-users -purpose: technical-description -outcome: user-knows-what-exists ---- - -# Reference: {API/CLI/CONFIG_NAME} - -> **Divio Type**: Reference (Information-Oriented) -> **Target Audience**: All users looking up technical details -> **Purpose**: Provide accurate, complete technical description -> **Outcome**: User finds the information they need - -## Overview - -{Brief description of what this reference documents} - -**Quick Navigation**: -- [{Section 1}](#{section-anchor}) -- [{Section 2}](#{section-anchor}) -- [{Section 3}](#{section-anchor}) - -## {API Class/CLI Command/Config Section} - -### Syntax - -```{language} -{canonical-syntax} -``` - -### Description - -{Neutral, factual description of what this does} - -### Parameters - -| Parameter | Type | Required | Default | Description | -|-----------|------|----------|---------|-------------| -| `{param1}` | `{type}` | Yes | - | {Description} | -| `{param2}` | `{type}` | No | `{default}` | {Description} | -| `{param3}` | `{type}` | No | `{default}` | {Description} | - -### Return Value - -**Type**: `{return-type}` - -**Description**: {What is returned} - -**Possible values**: -- `{value1}`: {When this is returned} -- `{value2}`: {When this is returned} - -### Exceptions / Errors - -| Error | Condition | Resolution | -|-------|-----------|------------| -| `{ErrorType}` | {When it occurs} | {How to handle} | -| `{ErrorType}` | {When it occurs} | {How to handle} | - -### Examples - -**Basic usage**: -```{language} -{basic-example} -``` - -**With options**: -```{language} -{example-with-options} -``` - -**Advanced usage**: -```{language} -{advanced-example} -``` - -### Notes - -- {Important implementation detail} -- {Edge case or limitation} -- {Performance consideration} - -### See Also - -- [{Related API/command}](#{anchor}) -- [{Related concept}](../explanation/{link}) - ---- - -## {Next API Class/CLI Command/Config Section} - -{Repeat the structure above for each item being documented} - ---- - -## Constants / Enumerations - -### `{ConstantName}` - -**Type**: `{type}` -**Value**: `{value}` -**Description**: {What it represents} - -**Usage**: -```{language} -{usage-example} -``` - ---- - -## Type Definitions - -### `{TypeName}` - -```{language} -{type-definition} -``` - -**Properties**: - -| Property | Type | Description | -|----------|------|-------------| -| `{prop1}` | `{type}` | {Description} | -| `{prop2}` | `{type}` | {Description} | - -**Example**: -```{language} -{example-usage} -``` - ---- - -## Version History - -### Version {X.Y.Z} -- Added: `{new-feature}` -- Changed: `{modified-behavior}` -- Deprecated: `{old-feature}` (use `{new-feature}` instead) -- Removed: `{removed-feature}` - -### Version {X.Y.Z} -- {Changes in this version} - ---- - -## Write the Docs Best Practices (Remove this section before publishing) - -**Reference Principles**: -- ✅ Information-oriented: Describe facts accurately -- ✅ Structure around code organization (classes, modules, commands) -- ✅ Consistent format for all similar items -- ✅ Complete and accurate -- ✅ Neutral tone (no opinions or recommendations) -- ✅ Include examples for every item -- ✅ Do not explain how to use (that's How-To) or why (that's Explanation) - -**Accessibility**: -- ✅ Proper heading hierarchy -- ✅ Alt text for diagrams/screenshots -- ✅ Tables for structured data -- ✅ Syntax highlighting for code -- ✅ Descriptive link text - -**Inclusivity**: -- ✅ Diverse example names -- ✅ Gender-neutral language -- ✅ No cultural assumptions - -**Reference-Specific Guidelines**: -- Alphabetical or logical ordering -- Every public API/command documented -- Parameters/options in consistent format (tables work well) -- Examples for typical usage -- Don't bury the lead - most important info first -- Link to related reference items -- Version history for deprecations/changes -- Autogenerate from code when possible (JSDoc, Sphinx, rustdoc) diff --git a/.kittify/missions/documentation/templates/divio/tutorial-template.md b/.kittify/missions/documentation/templates/divio/tutorial-template.md deleted file mode 100644 index bf3e562594..0000000000 --- a/.kittify/missions/documentation/templates/divio/tutorial-template.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -type: tutorial -divio_category: learning-oriented -target_audience: beginners -purpose: hands-on-lesson -outcome: learner-can-do-something ---- - -# Tutorial: {TUTORIAL_TITLE} - -> **Divio Type**: Tutorial (Learning-Oriented) -> **Target Audience**: Beginners with little to no prior experience -> **Purpose**: Guide learners through completing a meaningful task step-by-step -> **Outcome**: By the end, learners will have accomplished something concrete - -## What You'll Learn - -In this tutorial, you will: -- {Bullet point: First thing they'll accomplish} -- {Bullet point: Second thing they'll accomplish} -- {Bullet point: Third thing they'll accomplish} - -**Time to Complete**: Approximately {X} minutes - -## Before You Begin - -**Prerequisites**: -- {Required knowledge or skill - keep minimal for beginners} -- {Required tool or software with installation link} -- {Required account or access} - -**What You'll Need**: -- {Physical or digital resources needed} -- {Optional: Link to starter code or files} - -## Step 1: {First Step Title} - -{Brief introduction to what this step accomplishes} - -**Do this**: - -```bash -# Exact command to run -{command-here} -``` - -**You should see**: - -``` -{Expected output} -``` - -✅ **Checkpoint**: {How learner knows this step succeeded} - -> **💡 Learning Note**: {Brief explanation of what just happened - keep it short} - -## Step 2: {Second Step Title} - -{Brief introduction to what this step builds on the previous one} - -**Do this**: - -```{language} -# Code to write or run -{code-here} -``` - -**Where to put it**: {Exact file location} - -✅ **Checkpoint**: {How learner knows this step succeeded} - -> **💡 Learning Note**: {Brief explanation} - -## Step 3: {Third Step Title} - -{Continue pattern - each step builds on the last} - -**Do this**: - -{Concrete action} - -✅ **Checkpoint**: {Verification} - -## Step 4: {Continue as needed...} - -{Maintain momentum - learners should see progress at every step} - -## What You've Accomplished - -Congratulations! You've completed {the task}. You now have: -- ✅ {Concrete accomplishment #1} -- ✅ {Concrete accomplishment #2} -- ✅ {Concrete accomplishment #3} - -## Next Steps - -Now that you've learned {the basics}, you can: -- **Learn More**: See [Explanation: {Topic}](../explanation/{link}) to understand why this works -- **Solve Problems**: Check [How-To: {Task}](../how-to/{link}) for specific scenarios -- **Reference**: Refer to [{API/CLI}](../reference/{link}) for all options - -## Troubleshooting - -**Problem**: {Common issue learners face} -**Solution**: {Exact steps to fix it} - -**Problem**: {Another common issue} -**Solution**: {Exact steps to fix it} - ---- - -## Write the Docs Best Practices (Remove this section before publishing) - -**Tutorial Principles**: -- ✅ Learning-oriented: Help learners gain competence -- ✅ Allow learner to learn by doing -- ✅ Get learners started quickly with early success -- ✅ Make sure tutorial works reliably -- ✅ Give immediate sense of achievement at each step -- ✅ Ensure learner sees results immediately -- ✅ Make tutorial repeatable and reliable -- ✅ Focus on concrete steps, not abstract concepts -- ✅ Provide minimum necessary explanation (link to Explanation docs) -- ✅ Ignore options and alternatives (focus on the happy path) - -**Accessibility**: -- ✅ Use proper heading hierarchy (H1 → H2 → H3, no skipping) -- ✅ Provide alt text for all images and screenshots -- ✅ Use clear, plain language (avoid jargon, or define it immediately) -- ✅ Code blocks have language tags for syntax highlighting -- ✅ Use descriptive link text (not "click here") - -**Inclusivity**: -- ✅ Use diverse names in examples (not just "John", "Alice") -- ✅ Gender-neutral language where possible -- ✅ Avoid cultural assumptions (not everyone celebrates the same holidays) -- ✅ Consider accessibility needs (keyboard navigation, screen readers) - -**Tutorial-Specific Guidelines**: -- Start with a working example, not theory -- Each step should produce a visible result -- Don't explain everything - just enough to complete the task -- Link to Explanation docs for deeper understanding -- Test the tutorial with a beginner (does it work as written?) -- Keep it short - 15-30 minutes maximum -- Use consistent formatting for commands, code, and checkpoints diff --git a/.kittify/missions/documentation/templates/generators/jsdoc.json.template b/.kittify/missions/documentation/templates/generators/jsdoc.json.template deleted file mode 100644 index fa585d031b..0000000000 --- a/.kittify/missions/documentation/templates/generators/jsdoc.json.template +++ /dev/null @@ -1,18 +0,0 @@ -{ - "source": { - "include": ["{source_dir}"], - "includePattern": ".+\\.(js|jsx|ts|tsx)$", - "excludePattern": "(^|\\/|\\\\)_" - }, - "opts": { - "destination": "{output_dir}", - "recurse": true, - "readme": "README.md", - "template": "node_modules/{template}" - }, - "plugins": ["plugins/markdown"], - "templates": { - "cleverLinks": false, - "monospaceLinks": false - } -} diff --git a/.kittify/missions/documentation/templates/generators/sphinx-conf.py.template b/.kittify/missions/documentation/templates/generators/sphinx-conf.py.template deleted file mode 100644 index d1c13bada9..0000000000 --- a/.kittify/missions/documentation/templates/generators/sphinx-conf.py.template +++ /dev/null @@ -1,36 +0,0 @@ -# Sphinx configuration for {project_name} -# Auto-generated by spec-kitty documentation mission - -project = '{project_name}' -author = '{author}' -version = '{version}' -release = version - -# Extensions -extensions = [ - 'sphinx.ext.autodoc', # Auto-generate docs from docstrings - 'sphinx.ext.napoleon', # Support Google/NumPy docstring styles - 'sphinx.ext.viewcode', # Add links to source code - 'sphinx.ext.intersphinx', # Link to other project docs -] - -# Napoleon settings for Google-style docstrings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_init_with_doc = True - -# HTML output options -html_theme = '{theme}' -html_static_path = ['_static'] - -# Autodoc options -autodoc_default_options = { - 'members': True, - 'undoc-members': True, - 'show-inheritance': True, -} - -# Path setup -import os -import sys -sys.path.insert(0, os.path.abspath('..')) diff --git a/.kittify/missions/documentation/templates/plan-template.md b/.kittify/missions/documentation/templates/plan-template.md deleted file mode 100644 index fe01c29c24..0000000000 --- a/.kittify/missions/documentation/templates/plan-template.md +++ /dev/null @@ -1,269 +0,0 @@ -# Implementation Plan: [DOCUMENTATION PROJECT] - -**Branch**: `[###-feature-name]` | **Date**: [DATE] | **Spec**: [link] -**Input**: Feature specification from `/kitty-specs/[###-feature-name]/spec.md` - -**Note**: This template is filled in by the `/spec-kitty.plan` command. See mission command templates for execution workflow. - -## Summary - -[Extract from spec: documentation goals, Divio types selected, target audience, generators needed] - -## Technical Context - -**Documentation Framework**: [Sphinx | MkDocs | Docusaurus | Jekyll | Hugo | None (starting fresh) or NEEDS CLARIFICATION] -**Languages Detected**: [Python, JavaScript, Rust, etc. - from codebase analysis] -**Generator Tools**: -- JSDoc for JavaScript/TypeScript API reference -- Sphinx for Python API reference (autodoc + napoleon extensions) -- rustdoc for Rust API reference - -**Output Format**: [HTML | Markdown | PDF or NEEDS CLARIFICATION] -**Hosting Platform**: [Read the Docs | GitHub Pages | GitBook | Custom or NEEDS CLARIFICATION] -**Build Commands**: -- `sphinx-build -b html docs/ docs/_build/html/` (Python) -- `npx jsdoc -c jsdoc.json` (JavaScript) -- `cargo doc --no-deps` (Rust) - -**Theme**: [sphinx_rtd_theme | docdash | custom or NEEDS CLARIFICATION] -**Accessibility Requirements**: WCAG 2.1 AA compliance (proper headings, alt text, contrast) - -## Project Structure - -### Documentation (this feature) - -``` -kitty-specs/[###-feature]/ -├── spec.md # Documentation goals and user scenarios -├── plan.md # This file -├── research.md # Phase 0 output (gap analysis, framework research) -├── data-model.md # Phase 1 output (Divio type definitions) -├── quickstart.md # Phase 1 output (getting started guide) -└── tasks.md # Phase 2 output (/spec-kitty.tasks command) -``` - -### Documentation Files (repository root) - -``` -docs/ -├── index.md # Landing page with navigation -├── tutorials/ -│ ├── getting-started.md # Step-by-step for beginners -│ └── [additional-tutorials].md -├── how-to/ -│ ├── authentication.md # Problem-solving guides -│ ├── deployment.md -│ └── [additional-guides].md -├── reference/ -│ ├── api/ # Generated API documentation -│ │ ├── python/ # Sphinx autodoc output -│ │ ├── javascript/ # JSDoc output -│ │ └── rust/ # cargo doc output -│ ├── cli.md # CLI reference (manual) -│ └── config.md # Configuration reference (manual) -├── explanation/ -│ ├── architecture.md # Design decisions and rationale -│ ├── concepts.md # Core concepts explained -│ └── [additional-explanations].md -├── conf.py # Sphinx configuration (if using Sphinx) -├── jsdoc.json # JSDoc configuration (if using JSDoc) -└── Cargo.toml # Rust docs config (if using rustdoc) -``` - -**Divio Type Organization**: -- **Tutorials** (`tutorials/`): Learning-oriented, hands-on lessons for beginners -- **How-To Guides** (`how-to/`): Goal-oriented recipes for specific tasks -- **Reference** (`reference/`): Information-oriented technical specifications -- **Explanation** (`explanation/`): Understanding-oriented concept discussions - -## Phase 0: Research - -### Objective - -[For gap-filling mode] Audit existing documentation, classify into Divio types, identify gaps and priorities. -[For initial mode] Research documentation best practices, evaluate framework options, plan structure. - -### Research Tasks - -1. **Documentation Audit** (gap-filling mode only) - - Scan existing documentation directory for markdown files - - Parse frontmatter to classify Divio type - - Build coverage matrix: which features/areas have which documentation types - - Identify high-priority gaps (e.g., no tutorials for key workflows) - - Calculate coverage percentage - -2. **Generator Setup Research** - - Verify JSDoc installed: `npx jsdoc --version` - - Verify Sphinx installed: `sphinx-build --version` - - Verify rustdoc available: `cargo doc --help` - - Research configuration options for each applicable generator - - Plan integration strategy for generated + manual docs - -3. **Divio Template Research** - - Review Write the Docs guidance for each documentation type - - Identify examples of effective tutorials, how-tos, reference, and explanation docs - - Plan section structure appropriate for each type - - Consider target audience knowledge level - -4. **Framework Selection** (if starting fresh) - - Evaluate static site generators (Sphinx, MkDocs, Docusaurus, Jekyll, Hugo) - - Consider language ecosystem (Python project → Sphinx, JavaScript → Docusaurus) - - Review hosting options and deployment complexity - - Select theme that meets accessibility requirements - -### Research Output - -See [research.md](research.md) for detailed findings on: -- Gap analysis results (coverage matrix, prioritized gaps) -- Generator configuration research -- Divio template examples -- Framework selection rationale - -## Phase 1: Design - -### Objective - -Define documentation structure, configure generators, plan content outline for each Divio type. - -### Documentation Structure - -**Directory Layout**: -``` -docs/ -├── index.md # Landing page -├── tutorials/ # Learning-oriented -├── how-to/ # Problem-solving -├── reference/ # Technical specs -└── explanation/ # Understanding -``` - -**Navigation Strategy**: -- Landing page links to all four Divio sections -- Each section has clear purpose statement -- Cross-links between types (tutorials → reference, how-tos → explanation) -- Search functionality (if framework supports it) - -### Generator Configurations - -**Sphinx Configuration** (Python): -```python -# docs/conf.py -project = '[PROJECT NAME]' -extensions = [ - 'sphinx.ext.autodoc', # Generate docs from docstrings - 'sphinx.ext.napoleon', # Support Google/NumPy docstring styles - 'sphinx.ext.viewcode', # Add source code links - 'sphinx.ext.intersphinx', # Link to other projects' docs -] -html_theme = 'sphinx_rtd_theme' -html_static_path = ['_static'] -``` - -**JSDoc Configuration** (JavaScript): -```json -{ - "source": { - "include": ["src/"], - "includePattern": ".+\\.js$", - "excludePattern": "(node_modules/|test/)" - }, - "opts": { - "destination": "docs/reference/api/javascript", - "template": "node_modules/docdash", - "recurse": true - }, - "plugins": ["plugins/markdown"] -} -``` - -**rustdoc Configuration** (Rust): -```toml -# Cargo.toml -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--document-private-items"] -``` - -### Content Outline - -**Tutorials** (WP02 in tasks): -- Getting Started (installation, first use, basic concepts) -- [Additional tutorials based on key user journeys] - -**How-To Guides** (WP03 in tasks): -- How to [solve specific problem 1] -- How to [solve specific problem 2] -- [Additional guides based on common tasks] - -**Reference** (WP04 in tasks): -- API Reference (generated from code) -- CLI Reference (manual) -- Configuration Reference (manual) - -**Explanation** (WP05 in tasks): -- Architecture Overview (design decisions, system structure) -- Core Concepts (domain concepts explained) -- [Additional explanations as needed] - -### Work Breakdown Preview - -Detailed work packages will be generated in Phase 2 (tasks.md). High-level packages: - -1. **WP01: Documentation Structure Setup** - Create directories, configure generators, set up build -2. **WP02: Tutorial Documentation** - Write learning-oriented tutorials -3. **WP03: How-To Guide Documentation** - Write problem-solving guides -4. **WP04: Reference Documentation** - Generate API docs, write manual reference -5. **WP05: Explanation Documentation** - Write understanding-oriented explanations -6. **WP06: Quality Validation & Publishing** - Validate accessibility, build, deploy - -## Phase 2: Implementation - -**Note**: Phase 2 (work package generation) is handled by the `/spec-kitty.tasks` command. - -## Success Criteria Validation - -Validating against spec.md success criteria: - -- **SC-001** (findability): Structured navigation and search enable quick information access -- **SC-002** (accessibility): Templates enforce proper headings, alt text, clear language -- **SC-003** (API completeness): Generators ensure comprehensive API coverage -- **SC-004** (task completion): Tutorials and how-tos enable users to succeed independently -- **SC-005** (build quality): Documentation builds without errors or warnings - -## Constitution Check - -*GATE: Documentation mission requires adherence to Write the Docs best practices and Divio principles.* - -**Write the Docs Principles**: -- Documentation as code (version controlled, reviewed, tested) -- Accessible language (clear, plain, bias-free) -- User-focused (written for audience, not developers) -- Maintained (updated with code changes) - -**Divio Documentation System**: -- Four distinct types with clear purposes -- Learning-oriented tutorials -- Goal-oriented how-tos -- Information-oriented reference -- Understanding-oriented explanations - -**Accessibility Standards**: -- WCAG 2.1 AA compliance -- Proper heading hierarchy -- Alt text for all images -- Sufficient color contrast -- Keyboard navigation support - -## Risks & Dependencies - -**Risks**: -- Documentation becomes outdated as code evolves -- Generated documentation quality depends on code comment quality -- Accessibility requirements may require manual auditing -- Framework limitations may restrict functionality - -**Dependencies**: -- Generator tools must be installed in development environment -- Code must have comments/docstrings for reference generation -- Hosting platform must be available and accessible -- Build pipeline must support documentation generation diff --git a/.kittify/missions/documentation/templates/release-template.md b/.kittify/missions/documentation/templates/release-template.md deleted file mode 100644 index 8998c252f7..0000000000 --- a/.kittify/missions/documentation/templates/release-template.md +++ /dev/null @@ -1,222 +0,0 @@ -# Release: {documentation_title} - -**Documentation Mission**: {mission_name} -**Release Date**: {release_date} -**Version**: {version} - -> **Purpose**: This document captures the publish and handoff details for this documentation effort. Use it to record hosting configuration, deployment steps, and ownership information. - ---- - -## Hosting Target - -**Platform**: {platform} - - -**Production URL**: {production_url} - - -**Staging URL** (if applicable): {staging_url} - - -**Domain Configuration**: -- Custom domain: {custom_domain} (or N/A) -- DNS provider: {dns_provider} -- SSL/TLS: {ssl_configuration} - ---- - -## Build Output - -**Build Command**: -```bash -{build_command} -``` - - -**Output Directory**: `{output_directory}` - - -**Build Requirements**: -- {requirement_1} -- {requirement_2} - - -**Build Time**: ~{build_time} seconds - - ---- - -## Deployment Steps - -### Automated Deployment (if configured) - -**CI/CD Platform**: {ci_cd_platform} - - -**Trigger**: {deployment_trigger} - - -**Workflow File**: `{workflow_file_path}` - - -### Manual Deployment Steps - -If automated deployment is not available, follow these steps: - -1. **Build documentation locally**: - ```bash - {manual_build_step_1} - ``` - -2. **Verify build output**: - ```bash - {manual_verify_step} - ``` - -3. **Deploy to hosting**: - ```bash - {manual_deploy_step} - ``` - -4. **Verify live site**: - - Navigate to {production_url} - - Check all pages load correctly - - Verify navigation works - - Test search functionality (if applicable) - ---- - -## Configuration Files - -**Key Configuration Locations**: - -| File | Purpose | Location | -|------|---------|----------| -| {config_file_1} | {purpose_1} | `{location_1}` | -| {config_file_2} | {purpose_2} | `{location_2}` | - - - ---- - -## Access & Credentials - -**Hosting Platform Access**: -- Login URL: {platform_login_url} -- Access method: {access_method} - -- Credentials stored: {credential_location} - - -**Required Permissions**: -- {permission_1} -- {permission_2} - - -**Team Members with Access**: -- {name_1} - {role_1} - {email_1} -- {name_2} - {role_2} - {email_2} - ---- - -## Ownership & Maintenance - -**Primary Maintainer**: {primary_maintainer_name} -**Contact**: {primary_maintainer_contact} -**Backup Maintainer**: {backup_maintainer_name} - -**Maintenance Schedule**: -- Documentation reviews: {review_frequency} - -- Dependency updates: {dependency_update_frequency} - -- Content refresh: {content_refresh_frequency} - - -**Known Issues**: -- {known_issue_1} -- {known_issue_2} - - ---- - -## Monitoring & Analytics - -**Analytics Platform**: {analytics_platform} - - -**Dashboard URL**: {analytics_dashboard_url} - -**Key Metrics**: -- Page views tracked: {yes_no} -- Search queries tracked: {yes_no} -- User feedback collected: {yes_no} - -**Monitoring**: -- Uptime monitoring: {uptime_service} - -- Build status: {build_status_url} - - ---- - -## Handoff Checklist - -Use this checklist when transferring documentation ownership: - -- [ ] New maintainer has access to hosting platform -- [ ] New maintainer can build documentation locally -- [ ] New maintainer has credentials to all required services -- [ ] New maintainer understands deployment process -- [ ] Build and deploy have been demonstrated -- [ ] Known issues and workarounds explained -- [ ] Contact information updated in this document -- [ ] Team notification sent about ownership change - ---- - -## Troubleshooting - -### Build Fails - -**Symptom**: {build_error_symptom} -**Cause**: {likely_cause} -**Solution**: {fix_steps} - -### Deployment Fails - -**Symptom**: {deploy_error_symptom} -**Cause**: {likely_cause} -**Solution**: {fix_steps} - -### Site Not Updating - -**Symptom**: Changes committed but not visible on live site -**Causes**: -- Cache not cleared -- Deployment pipeline failed silently -- Wrong branch deployed - -**Solutions**: -- Check CI/CD logs for errors -- Clear browser cache and CDN cache -- Verify correct branch is configured for deployment - ---- - -## Additional Resources - -- **Documentation Source**: {repo_url} -- **Issue Tracker**: {issue_tracker_url} -- **Team Chat**: {chat_channel} -- **Internal Docs**: {internal_docs_url} - ---- - -**Notes**: - diff --git a/.kittify/missions/documentation/templates/spec-template.md b/.kittify/missions/documentation/templates/spec-template.md deleted file mode 100644 index 96da7fe6c9..0000000000 --- a/.kittify/missions/documentation/templates/spec-template.md +++ /dev/null @@ -1,172 +0,0 @@ -# Feature Specification: Documentation Project - [PROJECT NAME] - - -**Feature Branch**: `[###-feature-name]` -**Created**: [DATE] -**Status**: Draft -**Mission**: documentation -**Input**: User description: "$ARGUMENTS" - -## Documentation Scope - -**Iteration Mode**: [NEEDS CLARIFICATION: initial | gap-filling | feature-specific] -**Target Audience**: [NEEDS CLARIFICATION: developers integrating library | end users | contributors | operators] -**Selected Divio Types**: [NEEDS CLARIFICATION: Which of tutorial, how-to, reference, explanation?] -**Languages Detected**: [Auto-detected during planning - JavaScript, Python, Rust, etc.] -**Generators to Use**: [Based on languages - JSDoc, Sphinx, rustdoc] - -### Gap Analysis Results *(for gap-filling mode only)* - -**Existing Documentation**: -- [List current docs and their Divio types] -- Example: `README.md` - explanation (partial) -- Example: `API.md` - reference (outdated) - -**Identified Gaps**: -- [Missing Divio types or outdated content] -- Example: No tutorial for getting started -- Example: Reference docs don't cover new v2 API - -**Coverage Percentage**: [X%] *(calculated from gap analysis)* - -## User Scenarios & Testing *(mandatory)* - - - -### User Story 1 - [Documentation Consumer Need] (Priority: P1) - -[Describe who needs the documentation and what they want to accomplish] - -**Why this priority**: [Explain value - e.g., "New users can't adopt the library without a tutorial"] - -**Independent Test**: [How to verify documentation achieves the goal] -- Example: "New developer with no prior knowledge can complete getting-started tutorial in under 15 minutes" - -**Acceptance Scenarios**: - -1. **Given** [user's starting state], **When** [they read/follow this documentation], **Then** [they accomplish their goal] -2. **Given** [documentation exists], **When** [user searches for information], **Then** [they find it within X clicks] - ---- - -### User Story 2 - [Documentation Consumer Need] (Priority: P2) - -[Describe the second most important documentation need] - -**Why this priority**: [Explain value] - -**Independent Test**: [Describe how this can be tested independently] - -**Acceptance Scenarios**: - -1. **Given** [initial state], **When** [action], **Then** [expected outcome] - ---- - -### User Story 3 - [Documentation Consumer Need] (Priority: P3) - -[Describe the third most important documentation need] - -**Why this priority**: [Explain value] - -**Independent Test**: [Describe how this can be tested independently] - -**Acceptance Scenarios**: - -1. **Given** [initial state], **When** [action], **Then** [expected outcome] - ---- - -[Add more user stories as needed, each with an assigned priority] - -### Edge Cases - -- What happens when documentation becomes outdated after code changes? -- How do users find information that doesn't fit standard Divio types? -- What if generated documentation conflicts with manually-written documentation? - -## Requirements *(mandatory)* - -### Functional Requirements - -#### Documentation Content - -- **FR-001**: Documentation MUST include [tutorial | how-to | reference | explanation] for [feature/area] -- **FR-002**: Documentation MUST be accessible (proper heading hierarchy, alt text for images, clear language) -- **FR-003**: Documentation MUST use bias-free language and inclusive examples -- **FR-004**: Documentation MUST provide working code examples for all key use cases - -*Example of marking unclear requirements:* - -- **FR-005**: Documentation MUST cover [NEEDS CLARIFICATION: which features? all public APIs? core features only?] - -#### Generation Requirements *(if using generators)* - -- **FR-006**: System MUST generate API reference from [JSDoc comments | Python docstrings | Rust doc comments] -- **FR-007**: Generated documentation MUST integrate seamlessly with manually-written documentation -- **FR-008**: Generator configuration MUST be version-controlled and reproducible - -#### Gap-Filling Requirements *(if gap-filling mode)* - -- **FR-009**: Gap analysis MUST identify missing Divio types across all documentation areas -- **FR-010**: Gap analysis MUST detect API reference docs that are outdated compared to current code -- **FR-011**: System MUST prioritize gaps by user impact (critical, high, medium, low) - -### Key Entities - -- **Divio Documentation Type**: One of tutorial, how-to, reference, explanation - each with distinct purpose and characteristics -- **Documentation Generator**: Tool that creates reference documentation from code comments (JSDoc for JavaScript, Sphinx for Python, rustdoc for Rust) -- **Gap Analysis**: Assessment identifying missing or outdated documentation, with coverage metrics -- **Documentation Template**: Structured template following Divio principles for a specific documentation type - -## Success Criteria *(mandatory)* - -### Measurable Outcomes - -- **SC-001**: Users can find information they need within [X] clicks/searches -- **SC-002**: Documentation passes accessibility checks (proper heading hierarchy, alt text for images, clear language) -- **SC-003**: API reference is [X]% complete (all public APIs documented) -- **SC-004**: [X]% of users successfully complete tasks using documentation alone (measure via user testing) -- **SC-005**: Documentation build completes with zero warnings or errors - -### Quality Gates - -- All images have descriptive alt text -- Heading hierarchy is proper (H1 → H2 → H3, no skipping levels) -- No broken links (internal or external) -- All code examples have been tested and work -- Spelling and grammar are correct - -## Assumptions - -- **ASM-001**: Project has code comments/docstrings for reference generation to be valuable -- **ASM-002**: Users are willing to maintain documentation alongside code changes -- **ASM-003**: Documentation will be hosted on [platform] using [static site generator] -- **ASM-004**: Target audience has [technical background level] and familiarity with [technologies] - -## Out of Scope - -The following are explicitly NOT included in this documentation project: - -- Documentation hosting/deployment infrastructure (generates source files only) -- Documentation analytics and metrics collection (page views, search queries, time on page) -- AI-powered content generation (templates have placeholders, but content is human-written) -- Interactive documentation features (try-it-now API consoles, code playgrounds, live demos) -- Automatic documentation updates when code changes (manual maintenance required) -- Translation/localization to other languages -- Video tutorials or screencasts -- PDF or print-optimized formats (unless explicitly requested) - -## Constraints - -- Documentation must be maintained as code changes -- Generated documentation is only as good as code comments -- Static site generators have limitations on interactivity -- Some documentation types (tutorials especially) require significant manual effort -- Documentation must remain accurate - outdated docs are worse than no docs diff --git a/.kittify/missions/documentation/templates/task-prompt-template.md b/.kittify/missions/documentation/templates/task-prompt-template.md deleted file mode 100644 index eeeafc1c65..0000000000 --- a/.kittify/missions/documentation/templates/task-prompt-template.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -work_package_id: "WPxx" -subtasks: - - "Txxx" -title: "Replace with work package title" -phase: "Phase N - Replace with phase name" -lane: "planned" # DO NOT EDIT - use: spec-kitty agent tasks move-task --to -assignee: "" # Optional friendly name when in doing/for_review -agent: "" # CLI agent identifier (claude, codex, etc.) -shell_pid: "" # PID captured when the task moved to the current lane -review_status: "" # empty | has_feedback | acknowledged (populated by reviewers/implementers) -reviewed_by: "" # Agent ID of the reviewer (if reviewed) -history: - - timestamp: "{{TIMESTAMP}}" - lane: "planned" - agent: "system" - shell_pid: "" - action: "Prompt generated via /spec-kitty.tasks" ---- - -# Work Package Prompt: {{work_package_id}} – {{title}} - -## ⚠️ IMPORTANT: Review Feedback Status - -**Read this first if you are implementing this task!** - -- **Has review feedback?**: Check the `review_status` field above. If it says `has_feedback`, scroll to the **Review Feedback** section immediately (right below this notice). -- **You must address all feedback** before your work is complete. Feedback items are your implementation TODO list. -- **Mark as acknowledged**: When you understand the feedback and begin addressing it, update `review_status: acknowledged` in the frontmatter. -- **Report progress**: As you address each feedback item, update the Activity Log explaining what you changed. - ---- - -## Review Feedback - -> **Populated by `/spec-kitty.review`** – Reviewers add detailed feedback here when work needs changes. Implementation must address every item listed below before returning for re-review. - -*[This section is empty initially. Reviewers will populate it if the work is returned from review. If you see feedback here, treat each item as a must-do before completion.]* - ---- - -## Markdown Formatting -Wrap HTML/XML tags in backticks: `` `
` ``, `` `