From 422e587db13c213ec37ba12c7d47a1d333d8f880 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Darko=20Mijic=CC=81?= Date: Fri, 6 Mar 2026 13:27:03 +0100 Subject: [PATCH 1/3] fix: allow unlock-reason to bypass FSM transition checks for existing files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The decider previously only bypassed FSM checks for new files with unlock-reason, blocking legitimate retroactive completions of existing specs. This removes the isNewFile constraint, making unlock-reason a universal FSM escape hatch for both new files and existing files. Adds test scenario verifying existing file with unlock-reason bypasses FSM check for roadmap → completed transition. --- src/lint/process-guard/decider.ts | 4 +- .../features/validation/process-guard.feature | 8 +++ tests/steps/validation/process-guard.steps.ts | 50 ++++++++++++++++++- 3 files changed, 59 insertions(+), 3 deletions(-) diff --git a/src/lint/process-guard/decider.ts b/src/lint/process-guard/decider.ts index f9dcb0c0..4ce60d4a 100644 --- a/src/lint/process-guard/decider.ts +++ b/src/lint/process-guard/decider.ts @@ -298,8 +298,8 @@ function checkStatusTransitions(state: ProcessState, changes: ChangeDetection): const violations: ProcessViolation[] = []; for (const [file, transition] of changes.statusTransitions) { - // New files with unlock-reason bypass FSM check (supports file splits/reorganization) - if (transition.isNewFile === true && transition.hasUnlockReason === true) { + // Files with unlock-reason bypass FSM check (supports retroactive completions and file splits) + if (transition.hasUnlockReason === true) { continue; } diff --git a/tests/features/validation/process-guard.feature b/tests/features/validation/process-guard.feature index c3e0703e..baad1889 100644 --- a/tests/features/validation/process-guard.feature +++ b/tests/features/validation/process-guard.feature @@ -122,6 +122,14 @@ Feature: Process Guard Linter | completed | roadmap | | completed | deferred | + @rule:invalid-status-transition + Scenario: Existing file with unlock-reason bypasses FSM check + Given a file "specs/feature.feature" with status "roadmap" + And the file has unlock-reason "Retroactive-completion" + When the status changes to "completed" with unlock-reason + And validating changes + Then no "invalid-status-transition" violation is reported + # ========================================================================== # scope-creep Rule # ========================================================================== diff --git a/tests/steps/validation/process-guard.steps.ts b/tests/steps/validation/process-guard.steps.ts index f0a2021e..4c9abc6e 100644 --- a/tests/steps/validation/process-guard.steps.ts +++ b/tests/steps/validation/process-guard.steps.ts @@ -359,7 +359,7 @@ describeFeature(feature, ({ Background, Rule, AfterEachScenario }) => { // invalid-status-transition Rule // =========================================================================== - Rule('Status transitions must follow PDR-005 FSM', ({ RuleScenarioOutline }) => { + Rule('Status transitions must follow PDR-005 FSM', ({ RuleScenario, RuleScenarioOutline }) => { RuleScenarioOutline( 'Valid transitions pass validation', ({ Given, When, And, Then }, variables: { from: string; to: string }) => { @@ -435,6 +435,54 @@ describeFeature(feature, ({ Background, Rule, AfterEachScenario }) => { }); } ); + + RuleScenario( + 'Existing file with unlock-reason bypasses FSM check', + ({ Given, And, When, Then }) => { + Given( + 'a file "specs/feature.feature" with status {string}', + (_ctx: unknown, status: string) => { + const fileState = createFileState( + 'specs/feature.feature', + status as ProcessStatusValue + ); + state!.files.set('specs/feature.feature', fileState); + state!.currentFile = 'specs/feature.feature'; + } + ); + + And('the file has unlock-reason {string}', (_ctx: unknown, reason: string) => { + const existing = state!.files.get(state!.currentFile)!; + const updated = createFileState(existing.relativePath, existing.status, { + deliverables: existing.deliverables, + hasUnlockReason: true, + unlockReason: reason, + }); + state!.files.set(state!.currentFile, updated); + }); + + When( + 'the status changes to {string} with unlock-reason', + (_ctx: unknown, toStatus: string) => { + state!.modifiedFiles.push(state!.currentFile); + state!.statusTransitions.set(state!.currentFile, { + from: 'roadmap' as ProcessStatusValue, + to: toStatus as ProcessStatusValue, + hasUnlockReason: true, + }); + } + ); + + And('validating changes', () => { + executeValidation(); + }); + + Then('no "invalid-status-transition" violation is reported', () => { + const violation = getViolationForRule('invalid-status-transition'); + expect(violation).toBeUndefined(); + }); + } + ); }); // =========================================================================== From 12ae69696c66c5612cfbbee2773af860c2487915 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Darko=20Mijic=CC=81?= Date: Fri, 6 Mar 2026 13:27:27 +0100 Subject: [PATCH 2/3] =?UTF-8?q?chore:=20roadmap=20planning=20=E2=80=94=206?= =?UTF-8?q?=20spec=20closures,=203=20new=20roadmap=20specs,=20FSM=20short-?= =?UTF-8?q?circuit?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Spec closures (retroactive completions): - ArchitectureDiagramAdvanced, ArchitectureDiagramCore → completed - DocsConsolidationStrategy → completed (all 16 deliverables) - StepLintExtendedRules → completed (all 6 deliverables) - KebabCaseSlugs, RichContentHelpersTesting → completed - DataAPIPlatformIntegration → completed (split into dedicated specs) New roadmap specs: - SetupCommand (Phase 45) — interactive project initialization - MCPServerIntegration (Phase 46) — MCP server for Claude Code - MonorepoSupport (Phase 100) — cross-package queries (deferred) Other changes: - TraceabilityGenerator spec rewritten for codec architecture - FSM short-circuit in process-api CLI (~2x faster for static queries) - lint-steps help text updated (8 → 12 rules) - INDEX.md broken link fixed (PUBLISHING.md → MAINTAINERS.md) - Deleted obsolete _claude-md/workflow/session-workflows.md --- _claude-md/workflow/session-workflows.md | 97 -------- .../architecture-diagram-advanced.feature | 21 +- .../specs/architecture-diagram-core.feature | 3 +- .../specs/data-api-cli-ergonomics.feature | 2 +- .../data-api-platform-integration.feature | 25 +- .../specs/docs-consolidation-strategy.feature | 11 +- .../specs/mcp-server-integration.feature | 202 ++++++++++++++++ .../specs/monorepo-support.feature | 162 +++++++++++++ delivery-process/specs/setup-command.feature | 209 +++++++++++++++++ .../specs/step-lint-extended-rules.feature | 15 +- .../specs/traceability-enhancements.feature | 1 + .../specs/traceability-generator.feature | 221 +++++++++--------- docs/INDEX.md | 17 +- src/cli/lint-steps.ts | 4 + src/cli/process-api.ts | 118 ++++++++++ .../behavior/kebab-case-slugs.feature | 14 +- .../behavior/rich-content-helpers.feature | 16 +- .../steps/behavior/kebab-case-slugs.steps.ts | 8 +- .../behavior/rich-content-helpers.steps.ts | 8 +- 19 files changed, 897 insertions(+), 257 deletions(-) delete mode 100644 _claude-md/workflow/session-workflows.md create mode 100644 delivery-process/specs/mcp-server-integration.feature create mode 100644 delivery-process/specs/monorepo-support.feature create mode 100644 delivery-process/specs/setup-command.feature diff --git a/_claude-md/workflow/session-workflows.md b/_claude-md/workflow/session-workflows.md deleted file mode 100644 index f8f8b4c6..00000000 --- a/_claude-md/workflow/session-workflows.md +++ /dev/null @@ -1,97 +0,0 @@ -### SessionGuidesModuleSource - -#### SESSION-GUIDES.md is the authoritative public human reference - -**Invariant:** `docs/SESSION-GUIDES.md` exists and is not deleted, shortened, or replaced with a redirect. Its comprehensive checklists, CLI command examples, and session decision trees serve developers on libar.dev. - -**Rationale:** Session workflow guidance requires two formats for two audiences. Public developers need comprehensive checklists with full examples. AI sessions need compact invariants they can apply without reading 389 lines. - -#### CLAUDE.md session workflow content is derived, not hand-authored - -**Invariant:** After Phase 39 generation deliverables complete, the "Session Workflows" section in CLAUDE.md contains no manually-authored content. It is composed from generated `_claude-md/workflow/` modules. - -**Rationale:** A hand-maintained CLAUDE.md session section creates two copies of session workflow guidance with no synchronization mechanism. Regeneration from annotated source eliminates drift. - -#### Session type determines artifacts and FSM changes - -**Invariant:** Four session types exist, each with defined input, output, and FSM impact. Mixing outputs across session types (e.g., writing code in a planning session) violates session discipline. - -**Rationale:** Session type confusion causes wasted work — a design mistake discovered mid-implementation wastes the entire session. Clear contracts prevent scope bleeding between session types. - -| Session | Input | Output | FSM Change | -| ----------------- | ------------------- | --------------------------- | ------------------------------ | -| Planning | Pattern brief | Roadmap spec (.feature) | Creates roadmap | -| Design | Complex requirement | Decision specs + code stubs | None | -| Implementation | Roadmap spec | Code + tests | roadmap to active to completed | -| Planning + Design | Pattern brief | Spec + stubs | Creates roadmap | - -#### Planning sessions produce roadmap specs only - -**Invariant:** A planning session creates a roadmap spec with metadata, deliverables table, Rule: blocks with invariants, and scenarios. It must not produce implementation code, transition to active, or prompt for implementation readiness. - -**Rationale:** Planning is the cheapest session type — it produces .feature file edits, no compilation needed. Mixing implementation into planning defeats the cost advantage and introduces untested code without a locked scope. - -| Do | Do NOT | -| --------------------------------------------------- | -------------------------- | -| Extract metadata from pattern brief | Create .ts implementation | -| Create spec file with proper tags | Transition to active | -| Add deliverables table in Background | Ask Ready to implement | -| Convert constraints to Rule: blocks | Write full implementations | -| Add scenarios: 1 happy-path + 1 validation per Rule | | - -#### Design sessions produce decisions and stubs only - -**Invariant:** A design session makes architectural decisions and creates code stubs with interfaces. It must not produce implementation code. Context gathering via the Process Data API must precede any explore agent usage. - -**Rationale:** Design sessions resolve ambiguity before implementation begins. Code stubs in delivery-process/stubs/ live outside src/ to avoid TypeScript compilation and ESLint issues, making them zero-risk artifacts. - -| Use Design Session | Skip Design Session | -| -------------------------- | ------------------- | -| Multiple valid approaches | Single obvious path | -| New patterns/capabilities | Bug fix | -| Cross-context coordination | Clear requirements | - -#### Implementation sessions follow FSM-enforced execution order - -**Invariant:** Implementation sessions must follow a strict 5-step execution order. Transition to active must happen before any code changes. Transition to completed must happen only when ALL deliverables are done. Skipping steps causes Process Guard rejection at commit time. - -**Rationale:** The execution order ensures FSM state accurately reflects work state at every point. Writing code before transitioning to active means Process Guard sees changes to a roadmap spec (no scope protection). Marking completed with incomplete work creates a hard-locked state that requires unlock-reason to fix. - -| Do NOT | Why | -| ----------------------------------- | --------------------------------------- | -| Add new deliverables to active spec | Scope-locked state prevents scope creep | -| Mark completed with incomplete work | Hard-locked state cannot be undone | -| Skip FSM transitions | Process Guard will reject | -| Edit generated docs directly | Regenerate from source | - -#### FSM errors have documented fixes - -**Invariant:** Every Process Guard error code has a defined cause and fix. The error codes, causes, and fixes form a closed set — no undocumented error states exist. - -**Rationale:** Undocumented FSM errors cause session-blocking confusion. A lookup table from error code to fix eliminates guesswork and prevents workarounds that bypass process integrity. - -| Error | Cause | Fix | -| ------------------------- | ---------------------------------------------- | ------------------------------------------- | -| completed-protection | File has completed status but no unlock tag | Add libar-docs-unlock-reason tag | -| invalid-status-transition | Skipped FSM state (e.g., roadmap to completed) | Follow path: roadmap to active to completed | -| scope-creep | Added deliverable to active spec | Remove deliverable OR revert to roadmap | -| session-scope (warning) | Modified file outside session scope | Add to scope OR use --ignore-session | -| session-excluded | Modified excluded pattern during session | Remove from exclusion OR override | - -| Situation | Solution | Example | -| ---------------------------- | --------------------- | -------------------------------------- | -| Fix bug in completed spec | Add unlock reason tag | libar-docs-unlock-reason:Fix-typo | -| Modify outside session scope | Use ignore flag | lint-process --staged --ignore-session | -| CI treats warnings as errors | Use strict flag | lint-process --all --strict | - -#### Handoff captures session-end state for continuity - -**Invariant:** Multi-session work requires handoff documentation generated from the Process Data API. Handoff output always reflects actual annotation state, not manual notes. - -**Rationale:** Manual session notes drift from actual deliverable state. The handoff command derives state from annotations, ensuring the next session starts from ground truth rather than stale notes. - -#### ClaudeModuleGeneration is the generation mechanism - -**Invariant:** Phase 39 depends on ClaudeModuleGeneration (Phase 25). Adding `@libar-docs-claude-module` and `@libar-docs-claude-section:workflow` tags to this spec will cause ClaudeModuleGeneration to produce `_claude-md/workflow/` output files. The hand-written `_claude-md/workflow/` files are deleted after successful verified generation. - -**Rationale:** The annotation work (Rule blocks in this spec) is immediately useful — queryable via `pnpm process:query -- rules`. Generation deliverables cannot complete until Phase 25 ships the ClaudeModuleCodec. This sequencing is intentional: the annotation investment has standalone value regardless of whether the codec exists yet. diff --git a/delivery-process/specs/architecture-diagram-advanced.feature b/delivery-process/specs/architecture-diagram-advanced.feature index 9ce2ffb5..e3cba038 100644 --- a/delivery-process/specs/architecture-diagram-advanced.feature +++ b/delivery-process/specs/architecture-diagram-advanced.feature @@ -1,6 +1,7 @@ @libar-docs @libar-docs-pattern:ArchitectureDiagramAdvanced -@libar-docs-status:roadmap +@libar-docs-status:completed +@libar-docs-unlock-reason:Retroactive-completion @libar-docs-phase:23 @libar-docs-effort:1w @libar-docs-product-area:Generation @@ -29,9 +30,9 @@ Feature: Architecture Diagram Generation - Advanced Given the following deliverables: | Deliverable | Status | Location | Tests | Test Type | | ArchitectureCodec (layered) | complete | renderable/codecs/architecture.ts | Yes | unit | - | Architecture generator | pending | generators/built-in/architecture.ts | Yes | unit | - | Example app annotations | pending | examples/sample-project/src/ | No | - | - | Sequence diagram support | pending | renderable/codecs/architecture.ts | Yes | unit | + | Architecture generator | complete | generators/built-in/codec-generators.ts | Yes | unit | + | Example app annotations | n/a | examples/sample-project/src/ | No | - | + | Sequence diagram support | superseded | renderable/codecs/architecture.ts | Yes | unit | # ============================================================================ # RULE 5: Layered Diagram Generation @@ -43,9 +44,9 @@ Feature: Architecture Diagram Generation - Advanced layer (domain, application, infrastructure) with top-to-bottom flow. **Rationale:** Layered architecture visualization shows dependency direction - - infrastructure at top, domain at bottom - following conventional layer ordering. + domain at top, infrastructure at bottom - following conventional layer ordering. - **Verified by:** Generate subgraphs per layer, Layer order is infrastructure-application-domain, + **Verified by:** Generate subgraphs per layer, Layer order is domain-application-infrastructure, Include context label in node names @acceptance-criteria @happy-path @@ -61,11 +62,11 @@ Feature: Architecture Diagram Generation - Advanced And output contains subgraph "Infrastructure Layer" @acceptance-criteria @happy-path - Scenario: Layer order is infrastructure-application-domain + Scenario: Layer order is domain-application-infrastructure Given patterns with all three layers When the layered diagram codec runs - Then Infrastructure Layer appears before Application Layer in output - And Application Layer appears before Domain Layer in output + Then Domain Layer appears before Application Layer in output + And Application Layer appears before Infrastructure Layer in output @acceptance-criteria @happy-path Scenario: Include context label in node names @@ -118,7 +119,7 @@ Feature: Architecture Diagram Generation - Advanced Scenario: Generator option for layered diagram When running generate-docs with --generators architecture --diagram-type layered Then output contains layer subgraphs - And output follows infrastructure-application-domain order + And output follows domain-application-infrastructure order @acceptance-criteria @happy-path Scenario: Generator option for context filtering diff --git a/delivery-process/specs/architecture-diagram-core.feature b/delivery-process/specs/architecture-diagram-core.feature index f342152c..a5aa7b31 100644 --- a/delivery-process/specs/architecture-diagram-core.feature +++ b/delivery-process/specs/architecture-diagram-core.feature @@ -1,6 +1,7 @@ @libar-docs @libar-docs-pattern:ArchitectureDiagramCore -@libar-docs-status:roadmap +@libar-docs-status:completed +@libar-docs-unlock-reason:Retroactive-completion @libar-docs-phase:23 @libar-docs-effort:1w @libar-docs-product-area:Generation diff --git a/delivery-process/specs/data-api-cli-ergonomics.feature b/delivery-process/specs/data-api-cli-ergonomics.feature index f764eeb2..fd55db4f 100644 --- a/delivery-process/specs/data-api-cli-ergonomics.feature +++ b/delivery-process/specs/data-api-cli-ergonomics.feature @@ -37,7 +37,7 @@ Feature: Data API CLI Ergonomics - Performance and Interactive Mode | Deliverable | Status | Location | Tests | Test Type | | MasterDataset cache with mtime invalidation | pending | src/cli/dataset-cache.ts | Yes | unit | | REPL mode handler | pending | src/cli/repl.ts | Yes | integration | - | FSM short-circuit for static queries | pending | src/cli/process-api.ts | Yes | unit | + | FSM short-circuit for static queries | complete | src/cli/process-api.ts | Yes | unit | | Per-subcommand help system | pending | src/cli/process-api.ts | Yes | integration | | Dry-run mode | pending | src/cli/process-api.ts | Yes | integration | | Validation summary in metadata | pending | src/cli/process-api.ts | Yes | unit | diff --git a/delivery-process/specs/data-api-platform-integration.feature b/delivery-process/specs/data-api-platform-integration.feature index 69659a09..ad7eefab 100644 --- a/delivery-process/specs/data-api-platform-integration.feature +++ b/delivery-process/specs/data-api-platform-integration.feature @@ -1,6 +1,7 @@ @libar-docs @libar-docs-pattern:DataAPIPlatformIntegration -@libar-docs-status:roadmap +@libar-docs-status:completed +@libar-docs-unlock-reason:Split-into-dedicated-specs @libar-docs-phase:25d @libar-docs-product-area:DataAPI @libar-docs-effort:3d @@ -31,17 +32,23 @@ Feature: Data API Platform Integration - MCP Server and Monorepo Support | Cross-package views | Understand monorepo-wide dependencies | | Package-scoped queries | Focus on specific packages | + **Superseded:** This spec has been split into focused specs: + - MCPServerIntegration (Phase 46) -- MCP server mode (Rule 1) + - MonorepoSupport (Phase 100) -- Cross-package queries (Rule 3) + - Rule 2 (CLAUDE.md context layer) absorbed into existing ClaudeModuleGeneration + - Rule 4 (git hooks/watch) partially exists in lint-process, watch mode deferred + Background: Deliverables Given the following deliverables: | Deliverable | Status | Location | Tests | Test Type | - | MCP server entry point | pending | src/mcp/server.ts | Yes | integration | - | MCP tool definitions | pending | src/mcp/tools.ts | Yes | unit | - | MCP session state management | pending | src/mcp/session.ts | Yes | unit | - | CLAUDE.md context layer generator | pending | src/generators/claude-md-generator.ts | Yes | unit | - | Cross-package dependency analyzer | pending | src/api/cross-package.ts | Yes | unit | - | Package-scoped filter flag | pending | src/cli/process-api.ts | Yes | integration | - | Multi-package config support | pending | src/config/multi-package.ts | Yes | unit | - | Per-package coverage report | pending | src/api/coverage-analyzer.ts | Yes | unit | + | MCP server entry point | superseded | src/mcp/server.ts | Yes | integration | + | MCP tool definitions | superseded | src/mcp/tools.ts | Yes | unit | + | MCP session state management | superseded | src/mcp/session.ts | Yes | unit | + | CLAUDE.md context layer generator | superseded | src/generators/claude-md-generator.ts | Yes | unit | + | Cross-package dependency analyzer | superseded | src/api/cross-package.ts | Yes | unit | + | Package-scoped filter flag | superseded | src/cli/process-api.ts | Yes | integration | + | Multi-package config support | superseded | src/config/multi-package.ts | Yes | unit | + | Per-package coverage report | superseded | src/api/coverage-analyzer.ts | Yes | unit | # ============================================================================ # RULE 1: MCP Server Mode diff --git a/delivery-process/specs/docs-consolidation-strategy.feature b/delivery-process/specs/docs-consolidation-strategy.feature index 9bc09bcf..f3e5c65d 100644 --- a/delivery-process/specs/docs-consolidation-strategy.feature +++ b/delivery-process/specs/docs-consolidation-strategy.feature @@ -1,6 +1,7 @@ @libar-docs @libar-docs-pattern:DocsConsolidationStrategy -@libar-docs-status:roadmap +@libar-docs-status:completed +@libar-docs-unlock-reason:Retroactive-completion @libar-docs-phase:35 @libar-docs-effort:4w @libar-docs-product-area:Generation @@ -50,14 +51,14 @@ Feature: Documentation Consolidation Strategy Given the following deliverables: | Deliverable | Status | Location | Tests | Test Type | | Preamble capability on ReferenceDocConfig | complete | src/renderable/codecs/reference.ts | Yes | unit | - | Phase 1 - Taxonomy consolidation | pending | docs/TAXONOMY.md | No | n/a | + | Phase 1 - Taxonomy consolidation | complete | docs/TAXONOMY.md | No | n/a | | Phase 2 - Codec listings extraction | complete | delivery-process.config.ts, src/renderable/codecs/*.ts | Yes | integration | | Phase 3 - Process Guard consolidation | complete | src/renderable/codecs/validation-rules.ts | Yes | integration | | Phase 4 - Architecture decomposition | complete | docs/ARCHITECTURE.md | Yes | integration | - | Phase 5 - Guide trimming | pending | docs/ANNOTATION-GUIDE.md, docs/CONFIGURATION.md | No | n/a | - | Phase 6 - Index navigation update | pending | docs-live/INDEX.md, docs/INDEX.md | No | n/a | + | Phase 5 - Guide trimming | complete | docs/ANNOTATION-GUIDE.md, docs/CONFIGURATION.md | No | n/a | + | Phase 6 - Index navigation update | complete | docs-live/INDEX.md, docs/INDEX.md | No | n/a | | Phase 37 - docs-live/ directory consolidation | complete | delivery-process.config.ts | Yes | integration | - | Phase 38 - Generated doc quality improvements | pending | src/renderable/codecs/reference.ts | Yes | integration | + | Phase 38 - Generated doc quality improvements | complete | src/renderable/codecs/reference.ts | Yes | integration | | Phase 39 - Session workflow CLAUDE.md module generation | complete | delivery-process/specs/, _claude-md/workflow/ | No | n/a | | Phase 40 - PUBLISHING.md relocation to MAINTAINERS.md | complete | docs/PUBLISHING.md | No | n/a | | Phase 41 - GHERKIN-PATTERNS.md restructure | complete | docs/GHERKIN-PATTERNS.md, docs/VALIDATION.md | No | n/a | diff --git a/delivery-process/specs/mcp-server-integration.feature b/delivery-process/specs/mcp-server-integration.feature new file mode 100644 index 00000000..6a557fe6 --- /dev/null +++ b/delivery-process/specs/mcp-server-integration.feature @@ -0,0 +1,202 @@ +@libar-docs +@libar-docs-pattern:MCPServerIntegration +@libar-docs-status:roadmap +@libar-docs-phase:46 +@libar-docs-product-area:DataAPI +@libar-docs-effort:3d +@libar-docs-priority:high +@libar-docs-depends-on:DataAPICLIErgonomics +@libar-docs-see-also:DataAPIPlatformIntegration,DataAPICLIErgonomics +@libar-docs-business-value:native-claude-code-tool-integration-with-zero-subprocess-overhead +Feature: MCP Server Integration + + **Problem:** + Claude Code accesses ProcessStateAPI through subprocess calls to the process-api + CLI. Each invocation runs the full 8-step pipeline (config, scan, extract, merge, + hierarchy, workflow, transform, validate), taking 2-5 seconds. During a typical + session with 10-20 queries, this adds 30-90 seconds of pure pipeline overhead. + The subprocess model prevents stateful interaction -- there is no way to keep the + MasterDataset in memory between queries. + + **Solution:** + Implement an MCP (Model Context Protocol) server that wraps ProcessStateAPI: + 1. Load the pipeline ONCE and keep MasterDataset in memory + 2. Expose ProcessStateAPI methods and CLI subcommands as MCP tools + 3. Allow Claude Code to call them as native tools with sub-millisecond dispatch + 4. Optionally watch source files and rebuild the dataset on changes + + Background: Deliverables + Given the following deliverables: + | Deliverable | Status | Location | + | MCP server entry point and lifecycle | pending | src/mcp/server.ts | + | Tool registry with JSON Schema generation | pending | src/mcp/tool-registry.ts | + | Pipeline session manager | pending | src/mcp/pipeline-session.ts | + | File watcher with debounced rebuild | pending | src/mcp/file-watcher.ts | + | MCP server bin entry | pending | src/cli/mcp-server.ts | + | MCP configuration documentation | pending | docs/MCP-SETUP.md | + + Rule: MCP server starts via stdio transport and manages its own lifecycle + + **Invariant:** The MCP server communicates over stdio using JSON-RPC. It builds + the pipeline once during initialization, then enters a request-response loop. + No non-MCP output is written to stdout (no console.log, no pnpm banners). + + **Rationale:** MCP defines stdio as the standard transport for local tool + servers. Claude Code spawns the process and communicates over stdin/stdout pipes. + Any extraneous stdout output corrupts the JSON-RPC stream. Loading the pipeline + during initialization ensures the first tool call is fast. + + **Verified by:** Server starts and responds to initialize, + Server handles shutdown cleanly + + @acceptance-criteria @happy-path + Scenario: MCP server starts and responds to initialize + Given the MCP server is started with config auto-detection + When the client sends an MCP initialize request + Then the server responds with capabilities including tools + And the pipeline has been built with MasterDataset in memory + + @acceptance-criteria @happy-path + Scenario: Server handles shutdown cleanly + Given the MCP server is running with an active file watcher + When the client closes the connection + Then the file watcher is stopped + And the process exits with code 0 + + @acceptance-criteria @edge-case + Scenario: Server starts with explicit input globs + Given the MCP server is started with args "--input src/**/*.ts --features specs/**/*.feature" + When the client sends an MCP initialize request + Then the pipeline uses the explicit globs instead of config auto-detection + + Rule: ProcessStateAPI methods and CLI subcommands are registered as MCP tools + + **Invariant:** Every CLI subcommand is registered as an MCP tool with a JSON + Schema describing its input parameters. Tool names use snake_case with a "dp_" + prefix to avoid collisions with other MCP servers. + + **Rationale:** MCP tools are the unit of interaction. Each tool needs a name, + description (for LLM tool selection), and JSON Schema for input validation. + The "dp_" prefix prevents collisions in multi-server setups. + + **Verified by:** All CLI subcommands appear as MCP tools, + Tool schemas validate input parameters + + @acceptance-criteria @happy-path + Scenario: All CLI subcommands appear as MCP tools + Given the MCP server is initialized + When the client requests the tool list + Then at least 19 tools are registered + And each tool name starts with "dp_" + And each tool has a non-empty description + + @acceptance-criteria @happy-path + Scenario: Tool call executes successfully + Given the MCP server is initialized + When the client calls "dp_overview" + Then the response contains the overview text with progress and phases + + @acceptance-criteria @edge-case + Scenario: Tool call with missing required parameter returns error + Given the MCP server is initialized + When the client calls "dp_pattern" without the required "name" parameter + Then the response is an MCP error indicating invalid params + + Rule: MasterDataset is loaded once and reused across all tool invocations + + **Invariant:** The pipeline runs exactly once during server initialization. All + subsequent tool calls read from in-memory MasterDataset. A manual rebuild can + be triggered via a "dp_rebuild" tool. + + **Rationale:** The pipeline costs 2-5 seconds. Running it per tool call negates + MCP benefits. Pre-computed views provide O(1) access ideal for a query server. + + **Verified by:** Multiple tool calls share one pipeline build, + Rebuild refreshes the dataset + + @acceptance-criteria @happy-path + Scenario: Multiple tool calls share one pipeline build + Given the MCP server is initialized + When the client calls "dp_status" then "dp_list" then "dp_overview" + Then all three return results + And the pipeline was built exactly once + + @acceptance-criteria @happy-path + Scenario: Rebuild refreshes the dataset + Given the MCP server is running with a loaded dataset + When the client calls "dp_rebuild" + Then the pipeline runs again + And subsequent tool calls use the new dataset + + @acceptance-criteria @edge-case + Scenario: Concurrent reads during rebuild use previous dataset + Given a rebuild is in progress + When a tool call arrives for "dp_status" + Then the call uses the previous dataset + And the response metadata indicates rebuild in progress + + Rule: Source file changes trigger automatic dataset rebuild with debouncing + + **Invariant:** When --watch is enabled, changes to source files trigger an + automatic pipeline rebuild. Multiple rapid changes are debounced into a single + rebuild (default 500ms window). + + **Rationale:** During implementation sessions, source files change frequently. + Without auto-rebuild, agents must manually call dp_rebuild. Debouncing prevents + redundant rebuilds during rapid-fire saves. + + **Verified by:** File change triggers rebuild, + Rapid changes are debounced + + @acceptance-criteria @happy-path + Scenario: File change triggers rebuild + Given the MCP server is running with --watch enabled + When a TypeScript source file is modified + Then the pipeline rebuilds automatically + And subsequent tool calls reflect the updated source + + @acceptance-criteria @happy-path + Scenario: Rapid changes are debounced + Given the MCP server is running with --watch enabled + When 5 files are modified within 200ms + Then the pipeline rebuilds exactly once after the debounce window + + @acceptance-criteria @edge-case + Scenario: Rebuild failure during watch does not crash server + Given the MCP server is running with --watch enabled + When a source file change introduces a parse error + Then the server continues using the previous valid dataset + And an MCP notification indicates rebuild failure + + Rule: MCP server is configurable via standard client configuration + + **Invariant:** The server works with .mcp.json (Claude Code), claude_desktop_config.json + (Claude Desktop), and any MCP client. It accepts --input, --features, --base-dir + args and auto-detects delivery-process.config.ts. + + **Rationale:** MCP clients discover servers through configuration files. The + server must work with sensible defaults (config auto-detection) while supporting + explicit overrides for monorepo setups. + + **Verified by:** Default config auto-detection, + Server works when started via npx + + @acceptance-criteria @happy-path + Scenario: Default config auto-detection + Given a project with delivery-process.config.ts + When the MCP server is started without explicit arguments + Then it loads globs from the config file + And the pipeline builds successfully + + @acceptance-criteria @happy-path + Scenario: Server works when started via npx + Given the package is installed + When running "npx @libar-dev/delivery-process dp-mcp-server" + Then the server process starts and awaits MCP initialize + And no extraneous output appears on stdout + + @acceptance-criteria @edge-case + Scenario: No config file and no explicit globs + Given a directory without delivery-process.config.ts + When the MCP server is started without arguments + Then the server exits with a clear error message diff --git a/delivery-process/specs/monorepo-support.feature b/delivery-process/specs/monorepo-support.feature new file mode 100644 index 00000000..0baff322 --- /dev/null +++ b/delivery-process/specs/monorepo-support.feature @@ -0,0 +1,162 @@ +@libar-docs +@libar-docs-pattern:MonorepoSupport +@libar-docs-status:roadmap +@libar-docs-phase:100 +@libar-docs-product-area:Configuration +@libar-docs-effort:3d +@libar-docs-priority:low +@libar-docs-business-value:multi-package-config-and-scoped-queries-for-monorepo-consumers +Feature: Monorepo Cross-Package Support + + **Problem:** + The delivery-process package is consumed by a large monorepo (~600 files across + multiple packages), but the config system has no concept of "packages." The + consumer passes all source paths as repeated --input and --features CLI flags, + creating massive duplication across 15+ scripts. MasterDataset has no concept of + which package a pattern belongs to. There is no --package filter for scoping + queries, no cross-package dependency visibility, and no per-package coverage. + + **Solution:** + Extend config and pipeline with workspace-aware capabilities: + 1. Multi-package config mapping package names to source globs + 2. Package provenance derived from glob matching (not a new annotation tag) + 3. Package-scoped query filter composing with existing filters + 4. Cross-package dependency analysis aggregated from pattern relationships + 5. Per-package coverage reports + + Background: Deliverables + Given the following deliverables: + | Deliverable | Status | Location | + | PackageConfig type and Zod schema | pending | src/config/project-config.ts | + | Package-aware source resolver | pending | src/config/resolve-config.ts | + | Package provenance on ExtractedPattern | pending | src/validation-schemas/extracted-pattern.ts | + | Scanner package assignment | pending | src/scanner/pattern-scanner.ts | + | MasterDataset byPackage view | pending | src/generators/pipeline/transform-dataset.ts | + | CLI --package filter flag | pending | src/cli/output-pipeline.ts | + | Cross-package dependency subcommand | pending | src/api/cross-package.ts | + | Per-package coverage report | pending | src/api/coverage-analyzer.ts | + + Rule: Config supports workspace-aware package definitions + + **Invariant:** When a packages field is present in the config, each entry maps + a package name to its source globs. The top-level sources field becomes optional. + Packages without their own features or stubs inherit from top-level sources. + Repos without packages work exactly as before (backward compatible). + + **Rationale:** The consumer monorepo has no config file because the system only + supports flat glob arrays. Adding packages enables a single config file to + replace duplicated globs across 15+ scripts. + + **Verified by:** Multi-package config parsing, + Single-package backward compatibility + + @acceptance-criteria @happy-path + Scenario: Multi-package config is parsed and validated + Given a config file with two package entries + When the config is loaded and resolved + Then each package has resolved TypeScript and feature globs + And the total source set is the union of all package globs + + @acceptance-criteria @happy-path + Scenario: Single-package config works without packages field + Given a config file with sources but no packages field + When the config is loaded and resolved + Then resolution proceeds exactly as before + And no package provenance is assigned + + Rule: Extracted patterns carry package provenance from glob matching + + **Invariant:** When packages config is active, every ExtractedPattern has an + optional package field set from the matching glob. If no packages config exists, + the field is undefined. First match wins on overlapping globs. + + **Rationale:** Package provenance must be derived automatically from config, + not from manual annotation. This ensures zero additional developer effort. + + **Verified by:** Package derived from glob match, + No package when config lacks packages field + + @acceptance-criteria @happy-path + Scenario: Package field is set from matching glob + Given a multi-package config with "platform-core" and "platform-bc" + And a source file at "packages/platform-core/src/events.ts" + When the file is scanned and extracted + Then the resulting pattern has package "platform-core" + + @acceptance-criteria @edge-case + Scenario: Package field is undefined without packages config + Given a single-package config with no packages field + When a source file is scanned + Then the resulting pattern has no package field + + Rule: CLI commands accept a package filter that composes with existing filters + + **Invariant:** The --package flag filters patterns to those from a specific + package. It composes with --status, --phase, --category via logical AND. + + **Rationale:** In a 600-file monorepo, unscoped queries return too many results. + Package-scoped filtering lets developers focus on a single workspace member. + + **Verified by:** Package filter returns matching patterns, + Package filter composes with status filter + + @acceptance-criteria @happy-path + Scenario: Package filter returns only matching patterns + Given patterns from "platform-core" and "platform-bc" in the dataset + When running "process-api list --package platform-core" + Then only patterns with package "platform-core" are returned + + @acceptance-criteria @happy-path + Scenario: Package filter composes with status filter + Given active and roadmap patterns in both packages + When running "process-api list --package platform-core --status active" + Then only active patterns from "platform-core" are returned + + Rule: Cross-package dependencies are visible as a package-level graph + + **Invariant:** The cross-package subcommand aggregates pattern-level relationships + into package-level edges, showing source package, target package, and the patterns + forming the dependency. Intra-package dependencies are excluded. + + **Rationale:** Understanding cross-package dependencies is essential for release + planning and impact analysis. The relationship data already exists in + relationshipIndex -- this adds package-level aggregation. + + **Verified by:** Cross-package edges derived from pattern relationships, + Intra-package dependencies excluded + + @acceptance-criteria @happy-path + Scenario: Cross-package dependency view shows package edges + Given "OrderHandler" in "platform-bc" uses "EventStore" in "platform-core" + When running "process-api cross-package" + Then the output shows platform-bc depends on platform-core + + @acceptance-criteria @edge-case + Scenario: Intra-package dependencies are excluded + Given "Scanner" uses "ASTParser" and both are in "platform-core" + When running "process-api cross-package" + Then no self-referencing edge for platform-core appears + + Rule: Coverage analysis reports annotation completeness per package + + **Invariant:** When packages config is active, arch coverage reports per-package + annotation counts alongside the aggregate total. + + **Rationale:** Different packages have different annotation maturity. Per-package + breakdown lets teams track their own progress and identify which packages need + the most work. + + **Verified by:** Per-package coverage breakdown, + Single-package config shows flat report + + @acceptance-criteria @happy-path + Scenario: Coverage report includes per-package breakdown + Given a multi-package config with two packages + When running "process-api arch coverage" + Then the report shows per-package coverage with annotated counts and percentages + + @acceptance-criteria @edge-case + Scenario: Single-package config shows flat coverage report + Given a config with no packages field + When running "process-api arch coverage" + Then the report shows a single aggregate coverage number diff --git a/delivery-process/specs/setup-command.feature b/delivery-process/specs/setup-command.feature new file mode 100644 index 00000000..a30a4f16 --- /dev/null +++ b/delivery-process/specs/setup-command.feature @@ -0,0 +1,209 @@ +@libar-docs +@libar-docs-pattern:SetupCommand +@libar-docs-status:roadmap +@libar-docs-phase:45 +@libar-docs-product-area:Configuration +@libar-docs-effort:3d +@libar-docs-priority:high +@libar-docs-depends-on:ConfigLoader +@libar-docs-business-value:reduce-first-project-setup-from-55-minutes-to-under-2-minutes +Feature: Interactive Setup Command + + **Problem:** + Setting up a new project to use delivery-process requires 7 manual steps spanning + ~55 minutes: install the package plus dev dependencies, create tsconfig.json with + correct module settings, create delivery-process.config.ts with defineConfig() and + correct source globs, add 15+ npm scripts to package.json, create directory + structure, and annotate the first file with the correct opt-in marker and tags. + + Each step has failure modes: wrong module type (CommonJS instead of ESM), + incorrect moduleResolution (node instead of NodeNext), wrong glob patterns, + missing flags, typos in long dist paths. A single mistake produces silent failures + or cryptic errors that a new user cannot diagnose without reading the full tutorial. + + **Solution:** + Add an interactive setup CLI invoked via npx: + + npx @libar-dev/delivery-process init + + The command detects existing project context (package.json, TypeScript config, + monorepo markers), asks the user to select a preset, and generates all required + files and configuration in a single run. Non-interactive mode (--yes flag) uses + all defaults for CI and scripted adoption. + + Background: Deliverables + Given the following deliverables: + | Deliverable | Status | Location | + | Project context detector | pending | src/cli/init/detect-context.ts | + | Interactive prompt engine | pending | src/cli/init/prompts.ts | + | Config file generator | pending | src/cli/init/generate-config.ts | + | Package.json augmenter | pending | src/cli/init/augment-package-json.ts | + | Directory scaffolder | pending | src/cli/init/scaffold-dirs.ts | + | Example annotation generator | pending | src/cli/init/generate-example.ts | + | Setup validator | pending | src/cli/init/validate-setup.ts | + | Init CLI entry point | pending | src/cli/init.ts | + | Bin entry registration | pending | package.json | + + Rule: Init detects existing project context before making changes + + **Invariant:** The init command reads the target directory for package.json, + tsconfig.json, delivery-process.config.ts, and monorepo markers before prompting + or generating any files. Detection results determine which steps are skipped. + + **Rationale:** Blindly generating files overwrites user configuration and breaks + working setups. Context detection enables safe adoption into existing projects by + skipping steps that are already complete. + + **Verified by:** Detects existing package.json and skips creation, + Fails gracefully when run outside a project directory + + @acceptance-criteria @happy-path + Scenario: Detects existing package.json and adapts behavior + Given a directory with an existing package.json containing "type": "module" + And a tsconfig.json with moduleResolution "NodeNext" + When running the init command + Then the command does not prompt for package.json creation + And the command does not modify tsconfig.json + And the command proceeds to preset selection + + @acceptance-criteria @validation + Scenario: Fails gracefully when no package.json exists + Given an empty directory with no package.json + When running the init command + Then the command prints "No package.json found. Run npm init first." + And exits with code 1 + + Rule: Interactive prompts configure preset and source paths with smart defaults + + **Invariant:** The init command prompts for preset selection from the three + available presets (generic, libar-generic, ddd-es-cqrs) with descriptions, and + for source glob paths with defaults inferred from project structure. The --yes + flag skips all prompts and uses defaults. + + **Rationale:** New users do not know which preset to choose or what glob patterns + to use. Smart defaults reduce decisions to confirmations. The --yes flag enables + scripted adoption in CI. + + **Verified by:** Preset selection shows all three presets, + Non-interactive mode uses defaults without prompting + + @acceptance-criteria @happy-path + Scenario: Preset selection prompt shows all three presets + Given a project directory with package.json + When running the init command + Then the prompt displays three preset choices: + | Preset | Description | + | generic | Minimal categories with docs- prefix | + | libar-generic | Minimal categories with libar-docs- prefix | + | ddd-es-cqrs | Full 21-category DDD taxonomy | + And the default selection is "libar-generic" + + @acceptance-criteria @validation + Scenario: Non-interactive mode uses defaults without prompting + Given a project directory with package.json + When running the init command with --yes flag + Then no interactive prompts are displayed + And preset defaults to "libar-generic" + And source globs use sensible defaults + + Rule: Generated config file uses defineConfig with correct imports + + **Invariant:** The generated delivery-process.config.ts imports defineConfig + from the correct path, uses the selected preset, and includes configured source + globs. An existing config file is never overwritten without confirmation. + + **Rationale:** The config file is the most important artifact. An incorrect + import path or malformed glob causes every subsequent command to fail. The + overwrite guard prevents destroying custom configuration. + + **Verified by:** Generated config is valid TypeScript, + Existing config is not overwritten without confirmation + + @acceptance-criteria @happy-path + Scenario: Generated config file is valid TypeScript + Given the user selected preset "libar-generic" + And TypeScript source glob is "src/**/*.ts" + When the config file is generated + Then delivery-process.config.ts imports from "@libar-dev/delivery-process/config" + And contains defineConfig with the selected preset + And contains the configured source globs + + @acceptance-criteria @validation + Scenario: Existing config file is not overwritten without confirmation + Given a directory with an existing delivery-process.config.ts + When running the init command + Then the command prompts for overwrite confirmation + And answering "no" preserves the existing file + + Rule: Npm scripts are injected using bin command names + + **Invariant:** Injected scripts reference bin names (process-api, generate-docs) + resolved via node_modules/.bin, not dist paths. Existing scripts are preserved. + The package.json "type" field is set to "module" if not already present. + + **Rationale:** The tutorial uses long fragile dist paths. Bin commands are the + stable public API. Setting type:module ensures ESM imports work for the config. + + **Verified by:** Injected scripts use bin names, + Existing scripts are preserved + + @acceptance-criteria @happy-path + Scenario: Injected scripts use bin command names + Given a package.json with no delivery-process scripts + When the init command injects scripts + Then package.json contains process:query using "process-api" + And contains docs:all using "generate-docs" + And contains "type" set to "module" + + @acceptance-criteria @validation + Scenario: Existing scripts in package.json are preserved + Given a package.json with existing "build" and "test" scripts + When the init command injects scripts + Then existing scripts are unchanged + And new process and docs scripts are added alongside them + + Rule: Directory structure and example annotation enable immediate first run + + **Invariant:** The init command creates directories for configured source globs + and generates one example annotated TypeScript file with the minimum annotation + set (opt-in marker, pattern tag, status, category, description). + + **Rationale:** Empty source globs produce a confusing "0 patterns" result. An + example file proves the pipeline works and teaches annotation syntax by example. + + **Verified by:** Directories created for configured globs, + Example file is detected by the scanner + + @acceptance-criteria @happy-path + Scenario: Example annotation file is detected by the pipeline + Given the init command generated an example annotated file + When running process-api overview + Then the output shows 1 pattern detected + + Rule: Init validates the complete setup by running the pipeline + + **Invariant:** After all files are generated, init runs process-api overview and + reports whether the pipeline detected the example pattern. Success prints a + summary and next steps. Failure prints diagnostic information. + + **Rationale:** Generating files without verification produces false confidence. + Running the pipeline as the final step proves config, globs, directories, and + the example annotation all work together. + + **Verified by:** Successful setup prints summary, + Failed validation prints diagnostics + + @acceptance-criteria @happy-path + Scenario: Successful setup prints summary and next steps + Given all init steps completed without errors + When the validation step detects 1 pattern + Then the command prints a setup summary with config file and preset + And prints next steps for annotating files and generating docs + And exits with code 0 + + @acceptance-criteria @validation + Scenario: Failed validation prints diagnostic information + Given init completed but the example file has an invalid glob match + When the validation step detects 0 patterns + Then the command prints a diagnostic message about source glob configuration + And exits with code 1 diff --git a/delivery-process/specs/step-lint-extended-rules.feature b/delivery-process/specs/step-lint-extended-rules.feature index 67b90a6a..ad4af35d 100644 --- a/delivery-process/specs/step-lint-extended-rules.feature +++ b/delivery-process/specs/step-lint-extended-rules.feature @@ -1,6 +1,7 @@ @libar-docs @libar-docs-pattern:StepLintExtendedRules -@libar-docs-status:roadmap +@libar-docs-status:completed +@libar-docs-unlock-reason:Retroactive-completion @libar-docs-phase:51 @libar-docs-effort:1d @libar-docs-depends-on:StepLintVitestCucumber @@ -35,12 +36,12 @@ Feature: Step Lint Extended Rules - Additional vitest-cucumber Traps Background: Deliverables Given the following deliverables: | Deliverable | Status | Tests | Location | - | Hash-in-step-text check | pending | Yes | src/lint/steps/feature-checks.ts | - | Keyword-in-description check | pending | Yes | src/lint/steps/feature-checks.ts | - | Outline-quoted-values check | pending | Yes | src/lint/steps/cross-checks.ts | - | Repeated-step-pattern check | pending | Yes | src/lint/steps/step-checks.ts | - | Rule definitions for 4 new rules | pending | No | src/lint/steps/types.ts | - | Gherkin executable specs | pending | Yes | tests/features/lint/step-lint-extended.feature | + | Hash-in-step-text check | complete | Yes | src/lint/steps/feature-checks.ts | + | Keyword-in-description check | complete | Yes | src/lint/steps/feature-checks.ts | + | Outline-quoted-values check | complete | Yes | src/lint/steps/cross-checks.ts | + | Repeated-step-pattern check | complete | Yes | src/lint/steps/step-checks.ts | + | Rule definitions for 4 new rules | complete | No | src/lint/steps/types.ts | + | Gherkin executable specs | complete | Yes | tests/features/lint/step-lint-extended.feature | Rule: Hash in step text is detected diff --git a/delivery-process/specs/traceability-enhancements.feature b/delivery-process/specs/traceability-enhancements.feature index 4ea6983c..f65da277 100644 --- a/delivery-process/specs/traceability-enhancements.feature +++ b/delivery-process/specs/traceability-enhancements.feature @@ -7,6 +7,7 @@ @libar-docs-product-area:Generation @libar-docs-business-value:detect-coverage-gaps-and-requirements-drift @libar-docs-priority:medium +@libar-docs-depends-on:TraceabilityGenerator Feature: Traceability Enhancements - Requirements ↔ Tests Loop **Problem:** diff --git a/delivery-process/specs/traceability-generator.feature b/delivery-process/specs/traceability-generator.feature index 0760df47..88626d55 100644 --- a/delivery-process/specs/traceability-generator.feature +++ b/delivery-process/specs/traceability-generator.feature @@ -4,135 +4,128 @@ @libar-docs-phase:18 @libar-docs-effort:2d @libar-docs-product-area:Generation -Feature: Traceability Generator - Map Rules to Scenarios - - **Business Value:** Provide audit-ready traceability matrices that demonstrate - test coverage for business rules without manual documentation. - - **How It Works:** - - Parse `**Verified by:**` annotations in Rule descriptions - - Match scenario names to actual scenarios in feature files - - Generate traceability matrix showing Rule → Scenario mappings - - Report coverage gaps (rules without scenarios, orphan scenarios) - - **Why It Matters:** - | Benefit | How | - | Audit compliance | Demonstrates which tests verify which business rules | - | Coverage visibility | Identifies rules without verification scenarios | - | Orphan detection | Finds scenarios not linked to any rule | - | Impact analysis | Shows which scenarios to run when a rule changes | +Feature: Traceability Generator - Rule-to-Scenario Coverage via Codec + + **Problem:** + The existing TraceabilityCodec in `src/renderable/codecs/reporting.ts` only checks + timeline-to-behavior file coverage (does a pattern have an associated .feature file?). + It does NOT cross-reference `**Verified by:**` annotations against actual scenario names. + This means rules can claim verification by scenarios that do not exist, and orphan + scenarios (not referenced by any rule) go undetected. + + **Solution:** + Extend the existing TraceabilityCodec (ADR-005 codec architecture) to add + Rule-to-Scenario traceability. The `parseBusinessRuleAnnotations()` helper in + `src/renderable/codecs/helpers.ts` already extracts `verifiedBy` strings from Rule + descriptions. The remaining work is: + 1. Cross-reference those strings against actual scenario names in MasterDataset + 2. Build a traceability matrix section showing Rule-to-Scenario mappings + 3. Detect coverage gaps (unverified rules, orphan scenarios, dangling references) + 4. Wire the codec output into `docs:all` via config and npm script + + **Architecture:** + | Component | Location | Status | + | Annotation parser | `src/renderable/codecs/helpers.ts` parseBusinessRuleAnnotations() | Exists | + | TraceabilityCodec | `src/renderable/codecs/reporting.ts` | Exists (timeline coverage only) | + | Codec registry | `src/renderable/generate.ts` | Registered as 'traceability' | + | Config wiring | `delivery-process.config.ts` | NOT wired | + | npm script | `package.json` docs:traceability | NOT wired | Background: Deliverables Given the following deliverables: | Deliverable | Status | Location | Tests | Test Type | - | Traceability extractor | pending | @libar-dev/delivery-process/src/generators/traceability/ | Yes | unit | - | Traceability matrix renderer | pending | @libar-dev/delivery-process/src/generators/traceability/ | Yes | unit | - | CLI integration | pending | @libar-dev/delivery-process/src/cli/generate-docs.ts | Yes | unit | - | docs:traceability script | pending | package.json | No | - | + | Traceability extractor | superseded | src/renderable/codecs/helpers.ts | Yes | unit | + | CLI integration | complete | src/renderable/generate.ts | Yes | unit | + | Rule-to-Scenario cross-reference | pending | src/renderable/codecs/reporting.ts | Yes | unit | + | Coverage gap detection | pending | src/renderable/codecs/reporting.ts | Yes | unit | + | Config pipeline wiring | pending | delivery-process.config.ts | No | - | + | docs:traceability npm script | pending | package.json | No | - | # =========================================================================== - # RULE 1: Parse Verified by annotations + # RULE 1: Rule-to-Scenario traceability matrix # =========================================================================== - Rule: Parses Verified by annotations to extract scenario references + Rule: Cross-references Verified by annotations against actual scenarios - **Invariant:** Scenario names in `**Verified by:**` are matched against actual - scenarios in feature files. Unmatched references are reported as warnings. + **Invariant:** Every `verifiedBy` string extracted from a Rule description is + matched against scenario names in the MasterDataset. The traceability matrix + shows each Rule with its verification status: verified (all references resolve), + partially verified (some resolve), or unverified (none resolve or no annotation). - **Rationale:** Verified by annotations create explicit traceability. Validating - references ensures the traceability matrix reflects actual test coverage. + **Rationale:** `parseBusinessRuleAnnotations()` already extracts `verifiedBy` + arrays from Rule descriptions. Without cross-referencing against actual scenario + names, the traceability report cannot distinguish between claimed and actual + test coverage. A dangling reference (scenario name that does not exist) is worse + than no annotation because it creates false confidence. - **Verified by:** Parses comma-separated scenarios, Reports unmatched references + **Verified by:** Cross-references verified-by against scenarios, Reports dangling references, Shows verification status per rule @acceptance-criteria @happy-path - Scenario: Parses comma-separated scenario list + Scenario: Cross-references verified-by against scenarios Given a Rule with Verified by annotation: """gherkin Rule: Reservations prevent race conditions - **Verified by:** Concurrent reservations, Expired reservation cleanup, User cancels + **Verified by:** Concurrent reservations, Expired reservation cleanup """ - When the traceability generator parses the Rule - Then it should extract 3 scenario references: - | Scenario Reference | + And the MasterDataset contains scenarios: + | Scenario Name | | Concurrent reservations | | Expired reservation cleanup | - | User cancels | + When the TraceabilityCodec decodes the dataset + Then the matrix should show the Rule as "verified" with 2 matched scenarios @acceptance-criteria @validation - Scenario: Reports unmatched scenario references - Given a Rule references scenario "Non-existent test" - And no scenario with that name exists in any feature file - When the traceability generator runs - Then a warning should be generated for "Non-existent test" - And the matrix should mark it as "unverified" - - # =========================================================================== - # RULE 2: Generate traceability matrix - # =========================================================================== - - Rule: Generates Rule-to-Scenario traceability matrix - - **Invariant:** Every Rule appears in the matrix with its verification status. - Scenarios are linked by name and file location. - - **Rationale:** A matrix format enables quick scanning of coverage status and - supports audit requirements for bidirectional traceability. - - **Verified by:** Matrix includes all rules, Matrix shows verification status - - @acceptance-criteria @happy-path - Scenario: Matrix includes all rules from feature files - Given feature files with Rules: - | Feature | Rule | - | reservation-pattern.feature | Reservations prevent race conditions | - | reservation-pattern.feature | TTL enables auto-cleanup | - | event-store.feature | Events are immutable | - When the traceability generator runs - Then the matrix should include 3 rows for each Rule + Scenario: Reports dangling references + Given a Rule references scenario "Non-existent test" in Verified by + And no scenario with that name exists in the MasterDataset + When the TraceabilityCodec decodes the dataset + Then a "Dangling References" section should list "Non-existent test" + And the Rule should show status "partially verified" or "unverified" @acceptance-criteria @happy-path - Scenario: Matrix shows verification status with scenario count - Given a Rule "Reservations prevent race conditions" with Verified by: - | Scenario | - | Concurrent reservations | - | Expired reservation cleanup | - When the traceability generator generates the matrix - Then the Rule row should show "2 scenarios" - And the Rule row should show status "verified" - - @acceptance-criteria @validation - Scenario: Matrix marks unverified rules - Given a Rule without Verified by annotation - When the traceability generator generates the matrix - Then the Rule row should show "0 scenarios" - And the Rule row should show status "unverified" + Scenario: Shows verification status per rule + Given Rules with varying coverage: + | Rule | Verified by scenarios | Matched | + | Rule A | Scenario X, Scenario Y | both exist | + | Rule B | Scenario Z | does not exist | + | Rule C | (none) | no annotation | + When the TraceabilityCodec decodes the dataset + Then the matrix should show: + | Rule | Status | + | Rule A | verified | + | Rule B | unverified | + | Rule C | unverified | # =========================================================================== - # RULE 3: Detect coverage gaps + # RULE 2: Coverage gap detection # =========================================================================== - Rule: Detects and reports coverage gaps + Rule: Detects orphan scenarios and unverified rules - **Invariant:** Orphan scenarios (not referenced by any Rule) and unverified - rules are listed in dedicated sections. + **Invariant:** Orphan scenarios (acceptance-criteria scenarios not referenced by + any Rule's Verified by annotation) and unverified rules (Rules without a Verified + by annotation or with zero matched scenarios) are listed in dedicated sections of + the traceability output. - **Rationale:** Coverage gaps indicate either missing traceability annotations - or actual missing test coverage. Surfacing them enables remediation. + **Rationale:** Coverage gaps indicate either missing traceability annotations or + actual missing test coverage. Orphan scenarios may be valuable tests that lack + traceability links, or dead tests that should be removed. Unverified rules are + business constraints with no demonstrated test coverage. **Verified by:** Reports orphan scenarios, Reports unverified rules @acceptance-criteria @happy-path Scenario: Reports orphan scenarios not linked to any rule - Given scenarios exist: - | Scenario | Referenced by Rule | + Given acceptance-criteria scenarios exist: + | Scenario | Referenced by any Rule | | Concurrent reservations | Yes | | Random utility test | No | | Internal helper scenario | No | - When the traceability generator runs - Then output should include "Orphan Scenarios" section - And section should list "Random utility test" - And section should list "Internal helper scenario" - And section should NOT list "Concurrent reservations" + When the TraceabilityCodec decodes the dataset + Then output should include an "Orphan Scenarios" section + And the section should list "Random utility test" + And the section should list "Internal helper scenario" + And the section should NOT list "Concurrent reservations" @acceptance-criteria @happy-path Scenario: Reports unverified rules @@ -140,32 +133,40 @@ Feature: Traceability Generator - Map Rules to Scenarios | Rule | Has Verified by | | Reservations prevent race conditions | Yes | | Legacy rule without annotation | No | - When the traceability generator runs - Then output should include "Unverified Rules" section - And section should list "Legacy rule without annotation" + When the TraceabilityCodec decodes the dataset + Then output should include an "Unverified Rules" section + And the section should list "Legacy rule without annotation" + And the section should NOT list "Reservations prevent race conditions" # =========================================================================== - # RULE 4: Support filtering and output formats + # RULE 3: Traceability output wired into production pipeline # =========================================================================== - Rule: Supports filtering by phase and domain + Rule: Traceability output is wired into the docs pipeline - **Invariant:** CLI flags allow filtering the matrix by phase number or domain - category to generate focused traceability reports. + **Invariant:** The TraceabilityCodec output is generated as part of `pnpm docs:all` + via a `docs:traceability` npm script backed by a ReferenceDocConfig entry in + `delivery-process.config.ts`. The output file lands in `docs-live/TRACEABILITY.md`. - **Rationale:** Large codebases have many rules. Filtering enables relevant - subset extraction for specific audits or reviews. + **Rationale:** The TraceabilityCodec is registered in the CodecRegistry but not + wired into `delivery-process.config.ts` or `package.json`. Without config wiring, + the codec is only usable programmatically or via tests. Adding it to the docs + pipeline makes traceability output a first-class generated artifact alongside + CHANGELOG.md, OVERVIEW.md, and other reporting codecs. - **Verified by:** Filters by phase, Filters by domain + **Verified by:** Config entry generates traceability output, npm script runs codec @acceptance-criteria @happy-path - Scenario: Filters matrix by phase - Given Rules from phases 15, 16, and 20 - When running `pnpm docs:traceability --phase 16` - Then matrix should only include Phase 16 rules + Scenario: Config entry generates traceability output + Given a ReferenceDocConfig entry for traceability in delivery-process.config.ts + When running `pnpm docs:traceability` + Then `docs-live/TRACEABILITY.md` should be generated + And it should contain a "Coverage Statistics" section + And it should contain a "Rule-to-Scenario Traceability" section @acceptance-criteria @happy-path - Scenario: Filters matrix by domain category - Given Rules with domain tags @libar-docs-ddd and @libar-docs-cqrs - When running `pnpm docs:traceability --domain ddd` - Then matrix should only include rules from DDD-tagged features + Scenario: npm script runs codec via generate-docs CLI + Given `package.json` has script `docs:traceability` + And `docs:all` includes `pnpm docs:traceability` + When running `pnpm docs:all` + Then traceability output is generated alongside other docs diff --git a/docs/INDEX.md b/docs/INDEX.md index 8a67f680..92617102 100644 --- a/docs/INDEX.md +++ b/docs/INDEX.md @@ -29,7 +29,7 @@ | Validate annotation quality | [VALIDATION.md](./VALIDATION.md) | 1-281 | | Query process state via CLI | [PROCESS-API.md](./PROCESS-API.md) | 1-507 | | Understand the taxonomy | [TAXONOMY.md](./TAXONOMY.md) | 1-105 | -| Publish to npm | [PUBLISHING.md](./PUBLISHING.md) | 1-144 | +| Publish to npm | [MAINTAINERS.md](../MAINTAINERS.md) | — | | Learn annotation patterns | [ANNOTATION-GUIDE.md](./ANNOTATION-GUIDE.md) | 1-268 | | Review the changelog | [CHANGELOG.md](../CHANGELOG.md) | 1-26 | | Security policy | [SECURITY.md](../SECURITY.md) | 1-21 | @@ -257,18 +257,9 @@ --- -### PUBLISHING.md (Lines 1-144) +### MAINTAINERS.md (repo root) -| Section | Lines | Key Topics | -| ----------------------------- | ------- | ------------------------------------ | -| Prerequisites | 5-9 | npm account, login, tests | -| Version Strategy | 11-18 | Semantic versioning, pre/latest tags | -| Publishing Workflow | 20-67 | Pre-releases, subsequent, stable | -| Automated Publishing | 69-85 | GitHub Actions, provenance | -| Pre-commit and Pre-push Hooks | 87-99 | Husky hooks, lint-staged, typecheck | -| Dry Run | 101-109 | Test before publishing | -| Verifying a Published Package | 111-126 | npm view, test install | -| Troubleshooting | 128-144 | Auth errors, package not found | +Publishing and maintainer workflows have moved to [MAINTAINERS.md](../MAINTAINERS.md) at the repository root. --- @@ -335,7 +326,7 @@ pnpm process:query -- handoff --pattern MyPattern # Capture session | VALIDATION.md | CI/CD | Quality — automated checks | | TAXONOMY.md | Reference | Lookup — tag taxonomy and API | | ANNOTATION-GUIDE.md | Developers | Reference — annotation mechanics | -| PUBLISHING.md | Maintainers | Release — npm publishing | +| MAINTAINERS.md | Maintainers | Release — npm publishing | | CHANGELOG.md | Everyone | Version history and changes | | SECURITY.md | Everyone | Security policy and reporting | diff --git a/src/cli/lint-steps.ts b/src/cli/lint-steps.ts index 15273ef8..f19f003e 100644 --- a/src/cli/lint-steps.ts +++ b/src/cli/lint-steps.ts @@ -131,6 +131,10 @@ Rules: error scenario-outline-function-params Function params in ScenarioOutline error missing-and-destructuring And steps but no And destructured error missing-rule-wrapper Rule: blocks but no Rule() wrapper + warning hash-in-step-text Mid-line # in step text silently truncates + error keyword-in-description Description line starts with Given/When/Then + warning outline-quoted-values Quoted values in Outline suggest wrong pattern + error repeated-step-pattern Same step pattern registered twice in scenario Examples: # Standard check diff --git a/src/cli/process-api.ts b/src/cli/process-api.ts index 92caa9f1..4e118d51 100644 --- a/src/cli/process-api.ts +++ b/src/cli/process-api.ts @@ -52,6 +52,15 @@ import { handleCliError } from './error-handler.js'; import { printVersionAndExit } from './version.js'; import { CLI_SCHEMA } from './cli-schema.js'; import type { CLIOptionGroup } from './cli-schema.js'; +import { + isValidTransition as fsmIsValidTransition, + getValidTransitionsFrom as fsmGetValidTransitionsFrom, +} from '../validation/fsm/transitions.js'; +import { + validateTransition as fsmValidateTransition, + getProtectionSummary as fsmGetProtectionSummary, +} from '../validation/fsm/validator.js'; +import type { ProcessStatusValue } from '../taxonomy/index.js'; import { fuzzyMatchPatterns } from '../api/fuzzy-match.js'; import { allPatternNames, @@ -491,6 +500,104 @@ async function buildPipeline(config: ProcessAPICLIConfig): Promise` matches one of these, we dispatch directly to the FSM + * module, saving the 2-5 second pipeline build. + */ +const FSM_SHORT_CIRCUIT_METHODS: ReadonlySet = new Set([ + 'isValidTransition', + 'checkTransition', + 'getValidTransitionsFrom', + 'getProtectionInfo', +]); + +/** + * Attempt to handle an FSM query without building the pipeline. + * + * @returns The FSM result data if this is a short-circuit candidate, or + * `undefined` if the query should go through the normal pipeline path. + */ +function tryFsmShortCircuit(subcommand: string, subArgs: readonly string[]): unknown { + if (subcommand !== 'query') return undefined; + + const methodName = subArgs[0]; + if (methodName === undefined || !FSM_SHORT_CIRCUIT_METHODS.has(methodName)) { + return undefined; + } + + const fsmArgs = subArgs.slice(1); + + switch (methodName) { + case 'isValidTransition': { + const from = fsmArgs[0]; + const to = fsmArgs[1]; + if (from === undefined || to === undefined) { + throw new QueryApiError( + 'INVALID_ARGUMENT', + 'Usage: process-api query isValidTransition ' + ); + } + return fsmIsValidTransition(from as ProcessStatusValue, to as ProcessStatusValue); + } + + case 'checkTransition': { + const from = fsmArgs[0]; + const to = fsmArgs[1]; + if (from === undefined || to === undefined) { + throw new QueryApiError( + 'INVALID_ARGUMENT', + 'Usage: process-api query checkTransition ' + ); + } + const result = fsmValidateTransition(from, to); + return { + from: result.from, + to: result.to, + valid: result.valid, + error: result.error, + validAlternatives: result.validAlternatives, + }; + } + + case 'getValidTransitionsFrom': { + const status = fsmArgs[0]; + if (status === undefined) { + throw new QueryApiError( + 'INVALID_ARGUMENT', + 'Usage: process-api query getValidTransitionsFrom ' + ); + } + return fsmGetValidTransitionsFrom(status as ProcessStatusValue); + } + + case 'getProtectionInfo': { + const status = fsmArgs[0]; + if (status === undefined) { + throw new QueryApiError( + 'INVALID_ARGUMENT', + 'Usage: process-api query getProtectionInfo ' + ); + } + const summary = fsmGetProtectionSummary(status as ProcessStatusValue); + return { + status, + level: summary.level, + description: summary.description, + canAddDeliverables: summary.canAddDeliverables, + requiresUnlock: summary.requiresUnlock, + }; + } + + default: + return undefined; + } +} + // ============================================================================= // Subcommand Handlers // ============================================================================= @@ -1408,6 +1515,17 @@ async function main(): Promise { // Validate output modifiers before any expensive work validateModifiers(opts.modifiers); + // FSM short-circuit: bypass pipeline for static FSM queries (2-5s saving) + if (opts.subcommand === 'query') { + const fsmResult = tryFsmShortCircuit(opts.subcommand, opts.subArgs); + if (fsmResult !== undefined) { + const envelope = createSuccess(fsmResult, 0); + const output = formatOutput(envelope, opts.format); + console.log(output); + return; + } + } + // Resolve config file defaults if --input and --features not provided await applyConfigDefaults(opts); diff --git a/tests/features/behavior/kebab-case-slugs.feature b/tests/features/behavior/kebab-case-slugs.feature index 486862fc..ebd0295a 100644 --- a/tests/features/behavior/kebab-case-slugs.feature +++ b/tests/features/behavior/kebab-case-slugs.feature @@ -1,6 +1,7 @@ @libar-docs @libar-docs-pattern:KebabCaseSlugs -@libar-docs-status:roadmap +@libar-docs-status:completed +@libar-docs-unlock-reason:Retroactive-completion @libar-docs-phase:44 @libar-docs-product-area:CoreTypes @libar-docs-include:core-types @@ -18,12 +19,20 @@ Feature: Slug Generation for Progressive Disclosure - Special characters removal - Proper phase prefixing for requirements + Background: Deliverables + Given the following deliverables: + | Deliverable | Status | Location | + | toKebabCase utility | complete | src/utils/string-utils.ts | + | requirementToSlug function | complete | src/renderable/codecs/requirements.ts | + | getPhaseSlug function | complete | src/renderable/codecs/timeline.ts | + Rule: CamelCase names convert to kebab-case **Invariant:** CamelCase pattern names must be split at word boundaries and joined with hyphens in lowercase. **Rationale:** Generated file names and URL fragments must be human-readable and URL-safe; unsplit CamelCase produces opaque slugs that are difficult to scan in directory listings. **Verified by:** Convert pattern names to readable slugs + @acceptance-criteria Scenario Outline: Convert pattern names to readable slugs Given pattern name "" When converting to kebab-case slug @@ -48,6 +57,7 @@ Feature: Slug Generation for Progressive Disclosure **Rationale:** Unhandled edge cases produce malformed file names (double hyphens, leading dashes) that break cross-platform path resolution and make generated links inconsistent. **Verified by:** Handle edge cases in slug generation + @acceptance-criteria Scenario Outline: Handle edge cases in slug generation Given pattern name "" When converting to kebab-case slug @@ -67,6 +77,7 @@ Feature: Slug Generation for Progressive Disclosure **Rationale:** Phase prefixes enable lexicographic sorting of requirement files by delivery order, so directory listings naturally reflect the roadmap sequence. **Verified by:** Requirement slugs include phase number, Requirement without phase uses phase 00 + @acceptance-criteria Scenario Outline: Requirement slugs include phase number Given pattern "" with phase "" When generating requirement slug @@ -90,6 +101,7 @@ Feature: Slug Generation for Progressive Disclosure **Rationale:** A consistent "phase-NN-name" format ensures phase files sort numerically and remain identifiable even when the phase number alone would be ambiguous across roadmap versions. **Verified by:** Phase slugs combine number and kebab-case name, Phase without name uses "unnamed" + @acceptance-criteria Scenario Outline: Phase slugs combine number and kebab-case name Given phase number "" with name "" When generating phase slug diff --git a/tests/features/behavior/rich-content-helpers.feature b/tests/features/behavior/rich-content-helpers.feature index 8fd46845..fa2888fc 100644 --- a/tests/features/behavior/rich-content-helpers.feature +++ b/tests/features/behavior/rich-content-helpers.feature @@ -1,9 +1,11 @@ @libar-docs @libar-docs-pattern:RichContentHelpersTesting @libar-docs-implements:RichContentHelpers -@libar-docs-status:roadmap +@libar-docs-status:completed +@libar-docs-unlock-reason:Retroactive-completion @libar-docs-phase:44 @libar-docs-product-area:Generation +@behavior Feature: Rich Content Rendering Helpers As a document codec author @@ -16,6 +18,13 @@ Feature: Rich Content Rendering Helpers - Empty inputs (graceful handling) - Missing table cells (empty string fallback) + Background: Deliverables + Given the following deliverables: + | Deliverable | Status | Location | + | parseDescriptionWithDocStrings helper | complete | src/renderable/codecs/helpers.ts | + | renderDataTable helper | complete | src/renderable/codecs/helpers.ts | + | renderScenarioContent helper | complete | src/renderable/codecs/helpers.ts | + Rule: DocString parsing handles edge cases **Invariant:** DocString parsing must gracefully handle empty input, missing language hints, unclosed delimiters, and non-LF line endings without throwing errors. @@ -33,6 +42,7 @@ Feature: Rich Content Rendering Helpers Then the result contains 1 block And block 1 is a paragraph with text "This is plain text without any code blocks." + @acceptance-criteria Scenario: Single DocString parses correctly Given a description with embedded DocString containing typescript code When parsing for DocStrings @@ -70,6 +80,7 @@ Feature: Rich Content Rendering Helpers When rendering the DataTable Then the output is a table block with 1 row + @acceptance-criteria Scenario: Multi-row DataTable renders correctly Given a DataTable with headers "A" and "B" and "C" And rows: @@ -91,6 +102,7 @@ Feature: Rich Content Rendering Helpers **Rationale:** Ignoring the includeSteps option would bloat summary views with unwanted detail, and dropping embedded DataTables would lose structured test data. **Verified by:** Render scenario with steps, Skip steps when includeSteps is false, Render scenario with DataTable in step + @acceptance-criteria Scenario: Render scenario with steps Given a scenario "Test Scenario" with steps: | keyword | text | @@ -118,6 +130,7 @@ Feature: Rich Content Rendering Helpers **Rationale:** Omitting the rule name makes rendered output unnavigable, and skipping DocString parsing would output raw delimiter syntax instead of formatted code blocks. **Verified by:** Rule with simple description, Rule with no description, Rule with embedded DocString in description + @acceptance-criteria Scenario: Rule with simple description Given a business rule "Must validate input" with description "Ensures all input is validated." When rendering the business rule @@ -146,6 +159,7 @@ Feature: Rich Content Rendering Helpers **Rationale:** Without dedentation, code blocks inherit the Gherkin indentation level, rendering as deeply indented and unreadable in generated markdown. **Verified by:** Code block preserves internal relative indentation, Empty lines in code block are preserved, Trailing whitespace is trimmed from each line, Code with mixed indentation is preserved + @acceptance-criteria Scenario: Code block preserves internal relative indentation Given a description with DocString containing nested code When parsing for DocStrings diff --git a/tests/steps/behavior/kebab-case-slugs.steps.ts b/tests/steps/behavior/kebab-case-slugs.steps.ts index c2a2ae33..a47ff11b 100644 --- a/tests/steps/behavior/kebab-case-slugs.steps.ts +++ b/tests/steps/behavior/kebab-case-slugs.steps.ts @@ -48,11 +48,17 @@ function initState(): SlugTestState { const feature = await loadFeature('tests/features/behavior/kebab-case-slugs.feature'); -describeFeature(feature, ({ Rule, AfterEachScenario }) => { +describeFeature(feature, ({ Rule, Background, AfterEachScenario }) => { AfterEachScenario(() => { state = null; }); + Background(({ Given }) => { + Given('the following deliverables:', () => { + // Background deliverables table is for spec documentation only + }); + }); + // =========================================================================== // Rule: CamelCase names convert to kebab-case // =========================================================================== diff --git a/tests/steps/behavior/rich-content-helpers.steps.ts b/tests/steps/behavior/rich-content-helpers.steps.ts index a67d8707..14d9634e 100644 --- a/tests/steps/behavior/rich-content-helpers.steps.ts +++ b/tests/steps/behavior/rich-content-helpers.steps.ts @@ -57,7 +57,13 @@ function initState(): RichContentTestState { const feature = await loadFeature('tests/features/behavior/rich-content-helpers.feature'); -describeFeature(feature, ({ Rule, AfterEachScenario }) => { +describeFeature(feature, ({ Background, Rule, AfterEachScenario }) => { + Background(({ Given }) => { + Given('the following deliverables:', () => { + // Background deliverables table is for spec documentation only + }); + }); + AfterEachScenario(() => { state = null; }); From 6019788284568f37615c2bd171fba7d0acb45fbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Darko=20Mijic=CC=81?= Date: Fri, 6 Mar 2026 14:28:53 +0100 Subject: [PATCH 3/3] =?UTF-8?q?fix:=20address=20PR=20#33=20review=20commen?= =?UTF-8?q?ts=20=E2=80=94=20tighten=20unlock=20bypass,=20validate=20FSM=20?= =?UTF-8?q?input?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Scope unlock-reason FSM bypass to validated reasons targeting completed only (was unconditionally bypassing on raw presence bit from detect-changes) - Add parseProcessStatus() to validate CLI input before FSM short-circuit calls (invalid status values previously caused runtime TypeError crashes) - Expand keyword-in-description help text to include And/But keywords - Clarify monorepo config inheritance wording and add overlapping globs scenario - Detect .config.js alongside .config.ts in setup-command spec - Resolve --yes vs overwrite conflict with --force flag for destructive ops - Make type:module opt-in via --esm flag instead of default mutation --- .../specs/monorepo-support.feature | 13 +++- delivery-process/specs/setup-command.feature | 27 +++++--- src/cli/lint-steps.ts | 2 +- src/cli/process-api.ts | 64 +++++++++++-------- src/lint/process-guard/decider.ts | 9 ++- 5 files changed, 77 insertions(+), 38 deletions(-) diff --git a/delivery-process/specs/monorepo-support.feature b/delivery-process/specs/monorepo-support.feature index 0baff322..a7836aa8 100644 --- a/delivery-process/specs/monorepo-support.feature +++ b/delivery-process/specs/monorepo-support.feature @@ -40,7 +40,8 @@ Feature: Monorepo Cross-Package Support **Invariant:** When a packages field is present in the config, each entry maps a package name to its source globs. The top-level sources field becomes optional. - Packages without their own features or stubs inherit from top-level sources. + Packages without their own feature or stub globs inherit those values from the + corresponding top-level feature and stub settings. Repos without packages work exactly as before (backward compatible). **Rationale:** The consumer monorepo has no config file because the system only @@ -74,7 +75,8 @@ Feature: Monorepo Cross-Package Support not from manual annotation. This ensures zero additional developer effort. **Verified by:** Package derived from glob match, - No package when config lacks packages field + No package when config lacks packages field, + First matching package wins when globs overlap @acceptance-criteria @happy-path Scenario: Package field is set from matching glob @@ -89,6 +91,13 @@ Feature: Monorepo Cross-Package Support When a source file is scanned Then the resulting pattern has no package field + @acceptance-criteria @edge-case + Scenario: First matching package wins when globs overlap + Given a multi-package config where "platform-core" and "platform-shared" both match the same source file + And "platform-core" is defined before "platform-shared" + When the file is scanned and extracted + Then the resulting pattern has package "platform-core" + Rule: CLI commands accept a package filter that composes with existing filters **Invariant:** The --package flag filters patterns to those from a specific diff --git a/delivery-process/specs/setup-command.feature b/delivery-process/specs/setup-command.feature index a30a4f16..036db37b 100644 --- a/delivery-process/specs/setup-command.feature +++ b/delivery-process/specs/setup-command.feature @@ -47,7 +47,7 @@ Feature: Interactive Setup Command Rule: Init detects existing project context before making changes **Invariant:** The init command reads the target directory for package.json, - tsconfig.json, delivery-process.config.ts, and monorepo markers before prompting + tsconfig.json, delivery-process.config.ts (or .js), and monorepo markers before prompting or generating any files. Detection results determine which steps are skipped. **Rationale:** Blindly generating files overwrites user configuration and breaks @@ -78,7 +78,9 @@ Feature: Interactive Setup Command **Invariant:** The init command prompts for preset selection from the three available presets (generic, libar-generic, ddd-es-cqrs) with descriptions, and for source glob paths with defaults inferred from project structure. The --yes - flag skips all prompts and uses defaults. + flag skips non-destructive selection prompts and uses defaults. Destructive + overwrites require an explicit --force flag; otherwise init exits without + modifying existing files. **Rationale:** New users do not know which preset to choose or what glob patterns to use. Smart defaults reduce decisions to confirmations. The --yes flag enables @@ -106,11 +108,19 @@ Feature: Interactive Setup Command And preset defaults to "libar-generic" And source globs use sensible defaults + @acceptance-criteria @validation + Scenario: Non-interactive mode refuses to overwrite existing config + Given a directory with an existing delivery-process config file + When running the init command with --yes flag + Then the command prints a message requiring --force to overwrite + And exits with code 1 + Rule: Generated config file uses defineConfig with correct imports - **Invariant:** The generated delivery-process.config.ts imports defineConfig - from the correct path, uses the selected preset, and includes configured source - globs. An existing config file is never overwritten without confirmation. + **Invariant:** The generated delivery-process.config.ts (or .js) imports + defineConfig from the correct path, uses the selected preset, and includes + configured source globs. An existing config file is never overwritten without + confirmation. **Rationale:** The config file is the most important artifact. An incorrect import path or malformed glob causes every subsequent command to fail. The @@ -130,7 +140,7 @@ Feature: Interactive Setup Command @acceptance-criteria @validation Scenario: Existing config file is not overwritten without confirmation - Given a directory with an existing delivery-process.config.ts + Given a directory with an existing delivery-process config file When running the init command Then the command prompts for overwrite confirmation And answering "no" preserves the existing file @@ -139,7 +149,8 @@ Feature: Interactive Setup Command **Invariant:** Injected scripts reference bin names (process-api, generate-docs) resolved via node_modules/.bin, not dist paths. Existing scripts are preserved. - The package.json "type" field is set to "module" if not already present. + The package.json "type" field is preserved. ESM migration is an explicit + opt-in via --esm flag. **Rationale:** The tutorial uses long fragile dist paths. Bin commands are the stable public API. Setting type:module ensures ESM imports work for the config. @@ -153,7 +164,7 @@ Feature: Interactive Setup Command When the init command injects scripts Then package.json contains process:query using "process-api" And contains docs:all using "generate-docs" - And contains "type" set to "module" + And preserves the existing "type" field @acceptance-criteria @validation Scenario: Existing scripts in package.json are preserved diff --git a/src/cli/lint-steps.ts b/src/cli/lint-steps.ts index f19f003e..c5ae509c 100644 --- a/src/cli/lint-steps.ts +++ b/src/cli/lint-steps.ts @@ -132,7 +132,7 @@ Rules: error missing-and-destructuring And steps but no And destructured error missing-rule-wrapper Rule: blocks but no Rule() wrapper warning hash-in-step-text Mid-line # in step text silently truncates - error keyword-in-description Description line starts with Given/When/Then + error keyword-in-description Description line starts with Given/When/Then/And/But warning outline-quoted-values Quoted values in Outline suggest wrong pattern error repeated-step-pattern Same step pattern registered twice in scenario diff --git a/src/cli/process-api.ts b/src/cli/process-api.ts index 4e118d51..3eb312d0 100644 --- a/src/cli/process-api.ts +++ b/src/cli/process-api.ts @@ -53,6 +53,7 @@ import { printVersionAndExit } from './version.js'; import { CLI_SCHEMA } from './cli-schema.js'; import type { CLIOptionGroup } from './cli-schema.js'; import { + VALID_TRANSITIONS, isValidTransition as fsmIsValidTransition, getValidTransitionsFrom as fsmGetValidTransitionsFrom, } from '../validation/fsm/transitions.js'; @@ -516,6 +517,28 @@ const FSM_SHORT_CIRCUIT_METHODS: ReadonlySet = new Set([ 'getProtectionInfo', ]); +/** + * Validate and parse a CLI string as a ProcessStatusValue. + * Rejects unknown status values with a helpful error message. + */ +function parseProcessStatus( + value: string | undefined, + usage: string, + label: string +): ProcessStatusValue { + if (value === undefined) { + throw new QueryApiError('INVALID_ARGUMENT', usage); + } + if (!(value in VALID_TRANSITIONS)) { + const valid = Object.keys(VALID_TRANSITIONS).join(', '); + throw new QueryApiError( + 'INVALID_ARGUMENT', + `Unknown ${label}: "${value}". Expected one of: ${valid}` + ); + } + return value as ProcessStatusValue; +} + /** * Attempt to handle an FSM query without building the pipeline. * @@ -534,15 +557,10 @@ function tryFsmShortCircuit(subcommand: string, subArgs: readonly string[]): unk switch (methodName) { case 'isValidTransition': { - const from = fsmArgs[0]; - const to = fsmArgs[1]; - if (from === undefined || to === undefined) { - throw new QueryApiError( - 'INVALID_ARGUMENT', - 'Usage: process-api query isValidTransition ' - ); - } - return fsmIsValidTransition(from as ProcessStatusValue, to as ProcessStatusValue); + const usage = 'Usage: process-api query isValidTransition '; + const from = parseProcessStatus(fsmArgs[0], usage, 'fromStatus'); + const to = parseProcessStatus(fsmArgs[1], usage, 'toStatus'); + return fsmIsValidTransition(from, to); } case 'checkTransition': { @@ -565,25 +583,21 @@ function tryFsmShortCircuit(subcommand: string, subArgs: readonly string[]): unk } case 'getValidTransitionsFrom': { - const status = fsmArgs[0]; - if (status === undefined) { - throw new QueryApiError( - 'INVALID_ARGUMENT', - 'Usage: process-api query getValidTransitionsFrom ' - ); - } - return fsmGetValidTransitionsFrom(status as ProcessStatusValue); + const status = parseProcessStatus( + fsmArgs[0], + 'Usage: process-api query getValidTransitionsFrom ', + 'status' + ); + return fsmGetValidTransitionsFrom(status); } case 'getProtectionInfo': { - const status = fsmArgs[0]; - if (status === undefined) { - throw new QueryApiError( - 'INVALID_ARGUMENT', - 'Usage: process-api query getProtectionInfo ' - ); - } - const summary = fsmGetProtectionSummary(status as ProcessStatusValue); + const status = parseProcessStatus( + fsmArgs[0], + 'Usage: process-api query getProtectionInfo ', + 'status' + ); + const summary = fsmGetProtectionSummary(status); return { status, level: summary.level, diff --git a/src/lint/process-guard/decider.ts b/src/lint/process-guard/decider.ts index 4ce60d4a..25591b08 100644 --- a/src/lint/process-guard/decider.ts +++ b/src/lint/process-guard/decider.ts @@ -298,8 +298,13 @@ function checkStatusTransitions(state: ProcessState, changes: ChangeDetection): const violations: ProcessViolation[] = []; for (const [file, transition] of changes.statusTransitions) { - // Files with unlock-reason bypass FSM check (supports retroactive completions and file splits) - if (transition.hasUnlockReason === true) { + // Only validated unlock reasons bypass FSM, and only for retroactive completion paths + const fileState = state.files.get(file); + if ( + transition.to === 'completed' && + transition.hasUnlockReason === true && + fileState?.hasUnlockReason === true + ) { continue; }