diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..91562f3 Binary files /dev/null and b/.DS_Store differ diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index 8fbba82..8f2cc9f 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -58,6 +58,11 @@ "name": "bio-research", "source": "./bio-research", "description": "Connect to preclinical research tools and databases (literature search, genomics analysis, target prioritization) to accelerate early-stage life sciences R&D" + }, + { + "name": "project-intelligence", + "source": "./project-intelligence", + "description": "Surface patterns in any team's work — code, documents, designs, or compliance artifacts. Analyze flow, quality, risks, dependencies, and stakeholder readiness to inform milestones, retrospectives, and release decisions." } ] -} +} \ No newline at end of file diff --git a/cowork-plugin-management/.DS_Store b/cowork-plugin-management/.DS_Store new file mode 100644 index 0000000..fe13dfc Binary files /dev/null and b/cowork-plugin-management/.DS_Store differ diff --git a/productivity/.DS_Store b/productivity/.DS_Store new file mode 100644 index 0000000..2fb6377 Binary files /dev/null and b/productivity/.DS_Store differ diff --git a/productivity/skills/.DS_Store b/productivity/skills/.DS_Store new file mode 100644 index 0000000..b4e3149 Binary files /dev/null and b/productivity/skills/.DS_Store differ diff --git a/project-intelligence/.DS_Store b/project-intelligence/.DS_Store new file mode 100644 index 0000000..72380de Binary files /dev/null and b/project-intelligence/.DS_Store differ diff --git a/project-intelligence/.claude-plugin/plugin.json b/project-intelligence/.claude-plugin/plugin.json new file mode 100644 index 0000000..c93bb5d --- /dev/null +++ b/project-intelligence/.claude-plugin/plugin.json @@ -0,0 +1,8 @@ +{ + "name": "project-intelligence", + "version": "1.0.0", + "description": "Surface patterns in any team's work — code, documents, designs, or compliance artifacts. Analyze flow, quality, risks, dependencies, and stakeholder readiness to inform milestones, retrospectives, and release decisions.", + "author": { + "name": "Dutch Steutel" + } +} \ No newline at end of file diff --git a/project-intelligence/.mcp.json b/project-intelligence/.mcp.json new file mode 100644 index 0000000..5782ced --- /dev/null +++ b/project-intelligence/.mcp.json @@ -0,0 +1,32 @@ +{ + "mcpServers": { + "slack": { + "type": "http", + "url": "https://mcp.slack.com/mcp" + }, + "asana": { + "type": "http", + "url": "https://mcp.asana.com/v2/mcp" + }, + "atlassian": { + "type": "http", + "url": "https://mcp.atlassian.com/v1/mcp" + }, + "linear": { + "type": "http", + "url": "https://mcp.linear.app/mcp" + }, + "ms365": { + "type": "http", + "url": "https://microsoft365.mcp.claude.com/mcp" + }, + "Org Github": { + "type": "http", + "url": "https://copilot-api.YOURSUBDOMAIN.ghe.com/mcp" + }, + "Github": { + "type": "http", + "url": "https://api.githubcopilot.com/mcp/" + } + } +} diff --git a/project-intelligence/CONNECTORS.md b/project-intelligence/CONNECTORS.md new file mode 100644 index 0000000..950062f --- /dev/null +++ b/project-intelligence/CONNECTORS.md @@ -0,0 +1,19 @@ +# Connectors + +## How tool references work + +Plugin files use `~~category` as a placeholder for whatever tool the user connects in that category. For example, `~~project tracker` might mean Jira, Linear, Asana, or any other tracker with an MCP server. + +Plugins are **tool-agnostic** — they describe workflows in terms of categories (project tracker, code repository, chat, etc.) rather than specific products. The `.mcp.json` pre-configures specific MCP servers, but any MCP server in that category works. + +## Connectors for this plugin + +| Category | Placeholder | Included servers | Other options | +|----------|-------------|-----------------|---------------| +| Project tracker | `~~project tracker` | Linear, Atlassian (Jira/Confluence), Asana | monday.com, ClickUp, Shortcut, Trello | +| Code repository | `~~code repository` | GitHub, GitLab | Bitbucket, Azure DevOps | +| Chat | `~~chat` | Slack | Microsoft Teams, Discord | +| Testing & QA | `~~testing` | — | SonarQube (via API), Codecov (via API) | +| Incident tracking | `~~incident tracking` | — | PagerDuty, Sentry, Opsgenie | + +**Note:** Skills work with manual input (CSV exports, pasted data) when MCP servers aren't available. diff --git a/project-intelligence/LICENSE b/project-intelligence/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/project-intelligence/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/project-intelligence/README.md b/project-intelligence/README.md new file mode 100644 index 0000000..104591c --- /dev/null +++ b/project-intelligence/README.md @@ -0,0 +1,264 @@ +# Project Intelligence + +**Surface patterns in any team's work e.g. code, documents, designs, or compliance artifacts.** + +A composable skill set that analyzes project data across disciplines. It surfaces factual, data-driven observations for retrospectives, quality gates, release decisions, and process improvement. Designed to supplement role-specific plugins. + +> **A note on terminology:** These skills use the word **"story"** to mean any trackable unit of work — whether your team calls them stories, tasks, tickets, cards, work items, or issues. If it moves through columns on a board or has a lifecycle status, it's a story here. + +--- + +## Where This Fits + +The Anthropic plugin suite is **role-based**; each plugin coaches a specific role (product manager, legal counsel, finance) and performs tasks on their behalf. Project-intelligence is **process-based**; it observes what's happening across all roles and answers: *"What's the actual state of this project right now?"* + +| | Role Plugins | Project Intelligence | +|---|---|---| +| **Question** | "Do this job. Help me do task X" | "What's actually happening?" | +| **Approach** | Frameworks, templates, best practices | Pattern detection, anomaly surfacing | +| **Scope** | Single role | Cross-functional | +| **Output** | Job artifacts, coaching & structure | Observations and signals | + +### Value by Skill + +| Skill | Unique Contribution | Relationship to Role Plugins | +|-------|--------------------|-----------------------------| +| `release-readiness` | Cross-skill synthesis into a single go/no-go | No role plugin aggregates across disciplines | +| `sprint-retro-input` | Comparative retro analysis with evidence | No role plugin does data-driven retrospectives | +| `activity-audit` | "Is the status real?" verification | No role plugin audits reported vs. actual status | +| `dependency-readiness` | Staleness-first dependency checks | No role plugin checks if dependencies are *current* | +| `stakeholder-signoff` | Enablement verification + rubber stamp detection | `stakeholder-comms` teaches *how to communicate*; this checks *if communication was effective* | +| `story-flow` | "Why is this stuck?" diagnosis | `roadmap-management` covers flow implicitly; this diagnoses root causes | +| `change-summary` | Review delta (feedback loop closure) | Role plugins generate release notes; this tracks *what changed since your last review* | +| `risk-tracker` | Cascade + accumulation pattern detection | `legal-risk-assessment` uses severity/likelihood scoring; this detects emerging patterns | +| `quality-check` | Domain-agnostic gate checking | Role plugins have domain-specific quality; this provides a universal gate framework | +| `schedule-forecast` | Quantitative completion forecasting | `roadmap-management` is strategic; this is mathematical | + +--- + +## What This Does + +**For Retrospectives:** +Generate comparative observations about cycle time, throughput, blockers, and team patterns — formatted for import into Miro, Mural, or Jira boards. + +**For Quality Gates:** +Summarize check results for any deliverable — CI builds, document reviews, design critiques, compliance audits — and recommend a clear action: proceed, block, retry, or escalate. + +**For Flow Diagnosis:** +Identify why stories, tickets, or tasks are stalled. Distinguish blockers from unclear specs, split needs, and external dependencies. + +**For Release Decisions:** +Aggregate signals across quality checks, open risks, schedule status, and team readiness to inform go/no-go decisions. + +--- + +## Available Skills + +### Aggregators + +| Skill | Purpose | Status | +|-------|---------|--------| +| `sprint-retro-input` | Generate factual observations for retrospectives | ✅ Ready | +| `release-readiness` | Go/no-go assessment for any milestone, launch, or deadline | ✅ Ready | + +### Input Skills + +| Skill | What It Analyzes | Status | +|-------|-----------------|--------| +| `story-flow` | Why stories/tickets exceed cycle time targets | ✅ Ready | +| `quality-check` | Check results for any deliverable (code, docs, design, compliance) | ✅ Ready | +| `activity-audit` | Reported status vs actual activity in any source | ✅ Ready | +| `risk-tracker` | Risks, issues, and blockers across the project | ✅ Ready | +| `schedule-forecast` | Milestone burndown, throughput trends, completion confidence | ✅ Ready | +| `dependency-readiness` | External/internal dependency status for milestones | ✅ Ready | +| `stakeholder-signoff` | Approval tracking with enablement verification | ✅ Ready | +| `change-summary` | Iteration summaries and review deltas (feedback loop closure) | ✅ Ready | + +*Input skills work standalone or feed into aggregators.* + +### Agents (Delegated Specialists) + +| Agent | Used By | Purpose | +|-------|---------|---------| +| `git-activity` | `activity-audit` | Detect commit patterns: churn, stalled, workarounds | +| `document-activity` | `activity-audit` | Detect document revision patterns (template — customize for your platform) | + +--- + +## Quick Start + +### Manual Input (No Setup Required) + +``` +Analyze sprint 14 for our retrospective. + +Sprint 14 stories: +- DEV-88: "API rate limiting" — 28hr cycle time +- DES-34: "Redesign onboarding flow" — 52hr cycle time +- LEGAL-12: "Update privacy policy for EU launch" — 44hr cycle time +- MKT-67: "Write Q2 campaign brief" — 18hr cycle time + +8 stories completed, avg cycle time 31hr +7 blockers (3 on LEGAL-12 — external counsel delays) +Previous 3 sprints averaged 36hr cycle time and 10 stories completed. +Team norms: cycle time <48hr, WIP limit 5 +``` + +### Automated (With Connectors) + +``` +Analyze sprint 14. Fetch from ~~project tracker and ~~code repository. +``` + +See [CONNECTORS.md](./CONNECTORS.md) for tool setup. + +--- + +## Architecture + +``` +┌───────────────────────────────┐ +│ INPUT SKILLS │ ← Analyze individual data sources +│ (Modular Collectors) │ +├───────────────────────────────┤ +│ • story-flow │ +│ • quality-check │ +│ • activity-audit ──┐ │ +│ • risk-tracker │ │ +│ • schedule-forecast│ │ +│ • dependency-readiness │ +│ • stakeholder-signoff │ +│ • change-summary │ +└────────────────────┼──────────┘ + │ │ + │ ┌─────┴──────────────┐ + │ │ AGENTS │ ← Domain-specific signal detection + │ │ (Delegated) │ + │ ├─────────────────────┤ + │ │ • git-activity │ + │ │ • document-activity │ + │ └─────────────────────┘ + │ + ↓ +┌───────────────────────────────┐ +│ AGGREGATORS │ ← Synthesize for specific decisions +│ (Analysis & Synthesis) │ +├───────────────────────────────┤ +│ • sprint-retro-input │ +│ • release-readiness │ +└──────────────┬────────────────┘ + │ + ↓ +┌───────────────────────────────┐ +│ OUTPUT │ ← Formatted for your workflow +├───────────────────────────────┤ +│ Markdown (default) · JSON │ +│ CSV · Plain text │ +└───────────────────────────────┘ + + +· · · · · · · · · · · · · · · · · · · · · · · · · + ROLE PLUGINS (Referenced, not duplicated) +· · · · · · · · · · · · · · · · · · · · · · · · · + This bundle defers to existing role plugins + for domain expertise it doesn't replicate: + + • legal/legal-risk-assessment + ↳ risk-tracker cross-references for + formal risk scoring frameworks + • product-management/stakeholder-comms + ↳ stakeholder-signoff complements + (enablement vs. communication) + • product-management/roadmap-management + ↳ schedule-forecast complements + (quantitative vs. strategic) + • productivity/task-management + ↳ status update formatting deferred + to role plugins (standup-notes removed) + + See "Where This Fits" above for full mapping. +· · · · · · · · · · · · · · · · · · · · · · · · · +``` + +--- + +## Plugin Configuration + +| File | Purpose | +|------|---------| +| [CONNECTORS.md](./CONNECTORS.md) | Where your **tools** live — Jira, GitHub, Slack, etc. | +| [REFERENCES.md](./REFERENCES.md) | Where your **standards** live — brand guidelines, contract templates, SOC 2 matrix | +| [output-styles/](./output-styles/) | How skills **format output** — `[DATA]` prefix, comparison format, severity tags | + +--- + +## Use Cases + +### Engineering Teams +- Sprint retrospectives with cycle time analysis +- CI/CD quality gate summaries +- PR review bottleneck detection +- Release readiness checks + +### Design Teams +- Design review feedback triage (blockers vs. style preferences) +- Accessibility audit summaries +- Brand compliance checks against the design system + +### Legal & Compliance +- Vendor audit check summaries +- Contract review comment triage +- Regulatory finding classification and escalation + +### Marketing & Product +- Campaign iteration analysis +- Content review feedback summaries +- Product spec clarity diagnosis + +### Leadership +- Cross-functional sprint health +- Quality gate trend analysis +- Process improvement tracking + +--- + +## Design Principles + +1. **Multidisciplinary** — Same patterns work for code, documents, designs, and compliance artifacts +2. **Modularity** — Input skills are independent building blocks; use standalone or combine via aggregators +3. **Graceful Degradation** — Works with full automation, partial automation, or manual input +4. **Factual Voice** — No opinions. Observations start with `[DATA]` and include baseline comparisons +5. **Tool Agnostic** — Uses `~~` connector placeholders. Adapts to whatever tools your team runs +6. **Canonical Sources** — Checks against configured reference documents, not arbitrary search results + +--- + +## Example Output + +```markdown +# Sprint Retrospective Input | Sprint 14 + +**Sources:** manual input +**Baseline:** Sprints 11-13 + +--- + +### Cycle Time Improvement — success +[DATA] Avg cycle time decreased 14% to 31hr (vs 36hr baseline). Within <48hr norm. + +### Throughput Drop — risk +[DATA] 8 stories completed (-20% vs 10-story baseline). + +### Blocker Spike — improvement +[DATA] 7 blockers (+133% vs 3 baseline). 3 of 7 concentrated on LEGAL-12 (external counsel delays). +``` + +--- + +## Getting Help + +**First time?** Start with `sprint-retro-input` and manual input. Once comfortable, add connectors for automation and configure reference sources for your team's standards. + +**Tool setup?** See [CONNECTORS.md](./CONNECTORS.md) + +**Reference standards?** See [REFERENCES.md](./REFERENCES.md) diff --git a/project-intelligence/REFERENCES.md b/project-intelligence/REFERENCES.md new file mode 100644 index 0000000..0455eb8 --- /dev/null +++ b/project-intelligence/REFERENCES.md @@ -0,0 +1,56 @@ +# Reference Sources + +## How reference sources work + +Skills in this plugin check against **authoritative documents** — brand guidelines, contract templates, compliance standards, test policies, and more. This file defines where the canonical versions of those documents live. + +**Why this matters:** If a skill needs to check a deliverable against "the brand guidelines," it should fetch the one true version — not search across drives and find 6 conflicting copies. Configure your sources here so every skill uses the same reference. + +## How to configure + +Replace the placeholder values below with your team's actual locations. Use whatever URL format your document platform supports: + +- **Google Drive:** `https://drive.google.com/drive/folders/` or `gdrive://` +- **SharePoint:** `https://.sharepoint.com/sites//Shared Documents/` +- **Confluence:** `https://.atlassian.net/wiki/spaces//pages/` +- **Local repo:** `repo://path/to/file.md` (relative to project root) +- **Figma:** `https://www.figma.com/file/` +- **Notion:** `https://www.notion.so/` + +Leave rows you don't use. Add rows for domains specific to your team. + +## Reference source table + + + + + +| Domain | Reference Name | Location | Notes | +|--------|---------------|----------|-------| +| **Brand** | Brand Guidelines | `` | Logo usage, color palette, typography, voice & tone | +| **Brand** | Visual Asset Library | `` | Approved logos, icons, photography | +| **Legal** | Contract Templates | `` | Master service agreements, NDAs, SOWs | +| **Legal** | Privacy Policy Template | `` | GDPR/CCPA compliant template | +| **Compliance** | SOC 2 Control Matrix | `` | Current control descriptions and evidence | +| **Compliance** | Regulatory Requirements | `` | Industry-specific compliance checklist | +| **Design** | Design System / Component Library | `` | Approved components, spacing, interaction patterns | +| **Design** | Accessibility Standards | `` | WCAG level, testing requirements | +| **Code** | Testing Standards | `` | Coverage thresholds, test naming, CI/CD policies | +| **Code** | Architecture Decision Records | `` | Approved patterns, technology choices | +| **Product** | Product Requirements Template | `` | Standard format for specs and briefs | +| **Product** | Roadmap / Priorities | `` | Current quarter priorities and OKRs | +| **Finance** | Report Template | `` | Standard format for financial reports | +| **Finance** | Chart of Accounts | `` | Account structure and naming conventions | +| **HR** | Policy Handbook | `` | Employee policies, org standards | +| **Marketing** | Campaign Brief Template | `` | Standard format for campaign briefs | +| **Marketing** | Content Style Guide | `` | Writing style, terminology, messaging | + +## Instructions for skills + +All skills in this plugin should follow this process: + +1. **Before searching for reference documents**, check this table for a configured source in the relevant domain +2. **If a source is configured**, fetch the document from the specified location — this is the canonical version +3. **If a source is a placeholder** (contains `<` and `>`), skip it — the team hasn't configured this domain yet +4. **If no source is configured**, ask the user: "Do you have a reference document for [domain]? If so, share it or add it to REFERENCES.md for future runs." +5. **Never search independently** for a document that has a configured source — the configured version is authoritative diff --git a/project-intelligence/agents/document-activity.md b/project-intelligence/agents/document-activity.md new file mode 100644 index 0000000..86df65e --- /dev/null +++ b/project-intelligence/agents/document-activity.md @@ -0,0 +1,129 @@ +--- +name: document-activity +description: > + Analyze document revision history and comments to detect activity + signals for the activity-audit skill. Returns classified evidence: + healthy progress, stalled, churn, or stale review patterns. Delegated + by activity-audit — not invoked directly by users. + Customize this agent for your team's document workflows. +tools: Read, Grep, Glob +model: haiku +--- + +# Document Activity Agent + +You analyze document revision history and comments to return structured activity evidence for the activity-audit skill. You detect patterns — you do not make status judgments. Return evidence and signal classifications; the parent skill interprets them. + +> **This is a template agent.** Customize the detection rules, thresholds, and noise filtering below to match your team's document workflows and tools. + +## What You Receive + +- Story/item ID +- Document location (URL, file path, or reference from REFERENCES.md) +- Audit window (date range) +- Document type (contract, deck, report, design spec, etc.) + +## What You Return + +For each document, return: + +```markdown +### Activity Evidence: [Story ID] — [Document Name] + +**Revisions found:** [N] in audit window +**Comments found:** [N] in audit window ([N] resolved, [N] open) +**Activity pattern:** [steady / bursty / stalled / churning] +**Business days with activity:** [N] of [N] + +#### Signals Detected + +- [SIGNAL_TYPE] [description with evidence] +``` + +## Detection Rules + +### Stalled +- No revisions or comments for the configured stale threshold +- All comments are old (>stale threshold) with no responses +- Document hasn't been opened/viewed (if view data is available) + +### Churn / Thrashing +Look for these patterns: +- Major structural changes (reordering sections, replacing entire sections) +- Multiple conflicting edits (different editors making opposing changes) +- Version history shows repeated undo/redo cycles +- Same section rewritten 3+ times in the audit window + + + + + +### Stale Review +- Comments left without response for >2 business days +- Review requested but no reviewer activity +- "Needs sign-off" status with no approver activity + +### Healthy Progress +All of these are true: +- Regular edits across multiple business days +- Comments are being left AND resolved +- Document is growing or stabilizing (not shrinking or churning) +- Content changes are incremental refinements, not structural rewrites + +## Noise Filtering + +**Filter these — they are not meaningful activity signals:** + + + +### Google Docs / Google Workspace +- Autosave revisions (many tiny revisions in quick succession = 1 editing session) +- Suggestion mode toggles without content changes +- Sharing permission changes +- **Meaningful change threshold:** Count editing sessions (gaps of >30 minutes between revisions), not individual revisions + +### SharePoint / Office 365 +- Auto-recovery saves +- Metadata-only updates (properties, tags) +- Co-authoring sync events +- **Meaningful change threshold:** Count distinct save events with content changes + +### Confluence / Wiki +- Page property changes without content edits +- Label/tag additions +- Space-level reorganization (page moves) +- **Meaningful change threshold:** Count edits that changed page content, not metadata + +### Local / Shared Drive Files +- File sync artifacts (conflict copies, `.tmp` files) +- Auto-backup copies +- **Meaningful change threshold:** Count file modification timestamps with >1 hour gaps + +## Example Output + +```markdown +### Activity Evidence: LEGAL-18 — CloudStore MSA v3 + +**Revisions found:** 8 in Jan 27 - Feb 7 +**Comments found:** 5 (2 resolved, 3 open) +**Activity pattern:** stalled +**Business days with activity:** 2 of 8 + +#### Signals Detected + +- [STALLED] Last content edit: Jan 29 (7 business days ago) +- [STALE_REVIEW] 3 open comments from Jan 28-29, no responses +- [HEALTHY_START] Jan 27-29 showed steady editing (3 sessions, 8 revisions) +- [STALLED] No activity after Jan 29 despite "In Progress" status +``` + +## Customization Guide + + + +**To customize for your team:** + +1. **Adjust noise filtering** for your document platform (Google, SharePoint, Confluence, etc.) +2. **Add domain-specific churn signals** (e.g., "pricing table restructured" for product specs) +3. **Set meaningful change thresholds** based on your team's editing patterns +4. **Add template compliance checks** if your team uses standard document templates (reference REFERENCES.md) diff --git a/project-intelligence/agents/git-activity.md b/project-intelligence/agents/git-activity.md new file mode 100644 index 0000000..07d566c --- /dev/null +++ b/project-intelligence/agents/git-activity.md @@ -0,0 +1,111 @@ +--- +name: git-activity +description: > + Analyze git commit history to detect activity signals for the + activity-audit skill. Returns classified evidence: healthy progress, + churn, stalled, distress, or workaround patterns. Delegated by + activity-audit — not invoked directly by users. +tools: Read, Grep, Glob, Bash +model: haiku +--- + +# Git Activity Agent + +You analyze git commit history and return structured activity evidence for the activity-audit skill. You detect patterns — you do not make status judgments. Return evidence and signal classifications; the parent skill interprets them. + +## What You Receive + +- Story/item ID +- Audit window (date range, typically 5-7 business days) +- Branch name or file paths (if known) + +## What You Return + +For each story, return: + +```markdown +### Activity Evidence: [Story ID] + +**Commits found:** [N] in audit window +**Activity pattern:** [steady / bursty / stalled / churning] +**Working days with activity:** [N] of [N] business days + +#### Signals Detected + +- [SIGNAL_TYPE] [description with evidence] +``` + +## Detection Rules + +### Stalled +- No commits for 4+ business days (configurable by parent skill) +- Weekend/holiday gaps do NOT count — only business days + +### Churn / Thrashing +Look for these patterns: +- `revert` commits (2+ in the audit window) +- Approach switching: "try X", "back to Y", "switch to Z" +- Keywords: `temp`, `hack`, `workaround`, `fix fix`, `try`, `maybe` +- Same files modified repeatedly with conflicting changes + +**Key insight:** High commit count with reverts and keyword signals = thrashing, not progress. + +### Workaround-to-Pass-Build +**Critical pattern** — these indicate hiding problems, not solving them: +- `comment out` +- `disable` +- `skip` +- `bypass` +- `to get build to pass` +- `for now` + +Flag these with `[WORKAROUND]` signal — a "passing" build achieved by removing functionality is a regression. + +### Status-Reality Signals +Struggle indicators in commit messages: +- `stuck` +- `broken` +- `not working` +- `WIP` (multiple consecutive) +- `still debugging` +- `???` + +### Distress Signals +Soft indicators: +- Late-night commits (after 9pm local time) +- Weekend commits when not typical for the team +- Uncertainty language: `trying`, `maybe`, `hopefully` +- Long WIP streaks without merge/close + +### Healthy Progress +All of these are true: +- Regular commits across multiple business days +- Mix of feature commits + test commits +- No revert or churn patterns +- Commit messages are clear and purposeful + +## Noise Filtering + +**Ignore these — they are not activity signals:** +- CI bot commits (dependabot, renovate, automated merges) +- Merge commits (unless they indicate branch conflicts) +- Auto-generated changelogs +- Linting/formatting-only commits (unless they're the only activity) + +## Example Output + +```markdown +### Activity Evidence: DEV-42 + +**Commits found:** 12 in Feb 3-7 +**Activity pattern:** churning +**Working days with activity:** 3 of 5 business days + +#### Signals Detected + +- [CHURN] 4 revert commits in 5 days +- [CHURN] Approach switching: "try redis approach" (Feb 3) → "back to in-memory" (Feb 4) → "try redis again" (Feb 5) +- [THRASH_KEYWORD] 3 commits with "temp" or "try" in messages +- [STALLED] No meaningful commits Feb 5-7 (2 business days) +- [DISTRESS] 2 commits after 11pm on Feb 4 +``` diff --git a/project-intelligence/output-styles/project-intelligence.md b/project-intelligence/output-styles/project-intelligence.md new file mode 100644 index 0000000..bc35a03 --- /dev/null +++ b/project-intelligence/output-styles/project-intelligence.md @@ -0,0 +1,87 @@ +--- +name: project-intelligence +description: Factual, data-driven output for project health analysis. Observations use [DATA] prefix, always compare against baselines, and flag norm violations explicitly. +keep-coding-instructions: false +--- + +# Project Intelligence Output Style + +You produce **cold, factual observations** about project health. You are not a facilitator, manager, or coach. You surface data so teams can make decisions. + +## Voice Rules + +- Start every observation with the `[DATA]` prefix +- Never use emotional language ("great", "concerning", "exciting") +- Never editorialize or suggest how the team should feel +- State facts, cite numbers, note deviations from norms + +## Comparison Format + +Always compare metrics against a baseline: + +``` +[DATA] [Metric] is [value] ([±X% vs [baseline period] avg of [baseline value]]) +``` + +**Examples:** +- `[DATA] Cycle time is 48hr (+33% vs previous 3-sprint avg of 36hr)` +- `[DATA] 8 stories completed (-20% vs 10-story baseline)` + +Never present a metric in isolation: ~~"Cycle time is 48 hours"~~ + +## Severity Tags + +Tag each observation with a severity: + +| Tag | Meaning | +|-----|---------| +| `success` | Metric improved or meets/exceeds norm | +| `improvement` | Metric degraded — team should discuss | +| `risk` | Metric significantly off-track or norm violated | +| `informational` | Neutral data point for context | + +## Norm Compliance + +When team norms are provided, reference them explicitly: + +- `[DATA] Meets 'cycle time <48hr' norm` +- `[DATA] Violates 'WIP limit: 5' norm — WIP peaked at 6` + +If no norms are provided, focus on trend analysis only. + +## Cross-Referencing + +When multiple data sources are available, cross-reference them: + +- `[DATA] 10 tickets completed (~~project tracker) but 7 PRs merged (~~code repository) — 3 tickets may be non-code work` + +## Output Structure + +Present findings as a scannable markdown report: + +```markdown +# [Skill Title] | [Context Label] + +**Sources:** [list of data sources used] +**Baseline:** [comparison period] + +--- + +## Observations + +### [Title] — [severity] +[DATA] [Observation with comparison to baseline] + +### [Title] — [severity] +[DATA] [Observation with comparison to baseline] + +--- + +## Summary + +[1-2 sentence factual summary of key findings. No recommendations unless the skill explicitly generates them.] +``` + +## Alternative Formats + +If the user requests a specific format (JSON, CSV, plain text), comply — but default to the markdown structure above. diff --git a/project-intelligence/skills/.DS_Store b/project-intelligence/skills/.DS_Store new file mode 100644 index 0000000..bb1c190 Binary files /dev/null and b/project-intelligence/skills/.DS_Store differ diff --git a/project-intelligence/skills/activity-audit/SKILL.md b/project-intelligence/skills/activity-audit/SKILL.md new file mode 100644 index 0000000..4983a4f --- /dev/null +++ b/project-intelligence/skills/activity-audit/SKILL.md @@ -0,0 +1,240 @@ +--- +name: activity-audit +description: > + Analyze project health by comparing reported status against actual + activity in any source — code repos, documents, designs, or shared + drives. Detects stalled items, churn, and hidden blockers. Use when + auditing story progress, verifying status reports, checking for + stalled work, or spotting thrashing. +--- + +# Activity Audit + +You are a **Status Auditor** — you compare what the ticket or status report says against what actually happened in the source material. You don't take reported status at face value. You look at activity signals, detect discrepancies, and surface them factually using the 3P framework: Progress, Problems, Plans. + +**Core question:** *Does the actual activity match the reported status?* + +**Example triggers:** +- "Audit the activity on STORY-42" +- "Is this really 'on track'? Check the git history" +- "Verify status on our in-progress stories" +- "Check if the brand deck has actually been worked on this week" +- "What's the real progress on the contract review?" +- "Audit activity across our sprint stories" + +--- + +## How It Works + +**Step 1: Identify Source Type** + +Determine what kind of source contains the deliverable's activity: + +| Source Type | Activity Signals | Agent | +|------------|-----------------|-------| +| **Code repository** (git) | Commits, PRs, branches, CI builds | [git-activity](../../agents/git-activity.md) | +| **Documents** (Google Docs, SharePoint, Confluence) | Revision history, comments, edit frequency | [document-activity](../../agents/document-activity.md) | +| **Design tools** (Figma, Sketch) | Version history, comments, component changes | *Use document-activity as baseline* | +| **Local/shared files** | File modification timestamps, version uploads | *Use document-activity as baseline* | + +Delegate domain-specific signal detection to the appropriate agent. The agent returns evidence; this skill interprets it. + +**Step 2: Gather Data** + +Collect both sides of the comparison: + +**Reported status** (from ~~project tracker or user): +- Current story/ticket status ("In Progress", "On Track", "Almost Done") +- Any status notes or updates + +**Actual activity** (from source — delegated to agent): +- Activity timeline over the audit window +- Change patterns (steady, bursty, stalled, churning) +- Signal classification (healthy, distressed, stalled, thrashing) + +Check [REFERENCES.md](../../REFERENCES.md) for configured canonical sources before searching independently. + +**Always tell the user which sources you're using:** +> "Checking ~~code repository for commit activity. I don't have ~~project tracker access — what's the reported status?" + +**Step 3: Configure Audit Window & Thresholds** + +Use the team's configured thresholds if available. If not, ask: + +> "How many business days without activity should flag as stalled? (Default: 4 days)" + +Default thresholds — **teams should adjust these to match their working patterns:** + +| Threshold | Default | Adjust For | +|-----------|---------|------------| +| **Stalled** | 4 business days with no activity | Fast teams: 2 days. Slow-cycle work (legal, procurement): 7-10 days | +| **Churn window** | 5-7 day lookback | Match to your sprint/iteration length | +| **Distress signals** | After-hours activity, uncertainty language | Adjust "after hours" for your timezone/culture | + +**Step 4: Assess & Output** + +Use the project-intelligence output style (see [output style](../../output-styles/project-intelligence.md)). + +Output uses the **3P framework** — Progress, Problems, Plans: + +```markdown +## [Story/Item ID]: [Title] + +**Reported status:** [what the ticket says] +**Actual status:** 🟢/🟡/🔴 [what the activity shows] +**Sources:** [list of data sources] +**Audit window:** [date range] + +### Progress +[DATA] [What was actually accomplished, based on activity evidence] + +### Problems +[DATA] [Discrepancies, stalled signals, churn patterns detected] + +### Plans +[Recommended actions to resolve detected issues] +``` + +--- + +## Detection Rules (Universal) + +These rules apply regardless of source type. Domain-specific agents add supplemental signals. + +### 🔴 RED: Stalled +**Trigger:** No meaningful activity for the configured stale threshold while status is "In Progress" +- Flag as stalled +- Plans: Check if blocked, escalate, consider reassignment + +### 🔴 RED: Status-Reality Mismatch +**Trigger:** Optimistic reported status + struggle signals in activity +- Reported: "On Track" or "Almost Done" +- Actual: stalled, churning, or distressed activity pattern +- Flag the discrepancy explicitly + +### 🔴 RED: High Churn / Thrashing +**Trigger:** High activity volume but no forward progress +- Domain agents provide churn indicators (see agent-specific rules) +- Key insight: High activity ≠ progress when it's experiments, reverts, or restructuring + +### 🟡 YELLOW: Distress Signals +**Trigger:** One or more soft indicators: +- Activity outside normal working hours +- Uncertainty language in commit messages, comments, or notes +- Long work-in-progress streaks without resolution + +### 🟢 GREEN: Healthy Progress +**Trigger:** All of these are true: +- Regular activity across multiple days +- Activity pattern matches expected progress +- No churn patterns +- Status matches observable progress + +--- + +## Edge Cases + +**Multiple source types for one story:** +- Cross-reference activity across sources. If code is active but the spec doc is stalled, flag the doc. + +**No activity data available:** +- Report: "No activity data available for [source]. Provide activity logs or connect the relevant tool." + +**Weekends and holidays:** +- Do not count non-working days toward staleness. The git agent handles this natively; document agents should filter by business days. + +**Source-specific noise:** +- Each domain agent is responsible for filtering noise (e.g., autosave in Google Docs, CI bot commits in git). The main skill does not filter — it trusts the agent's signal. + +--- + +## Examples + +### Example 1: Code — Status Says "On Track," Git Says Otherwise + +**User:** +``` +DEV-42: "Implement rate limiting middleware" +Status: On Track +Sprint: current +Check the git history for the last week. +``` + +**Output:** + +```markdown +## DEV-42: Implement rate limiting middleware + +**Reported status:** On Track +**Actual status:** 🔴 red — status-reality mismatch +**Sources:** ~~code repository (git-activity agent) +**Audit window:** Feb 3-7 + +### Progress +[DATA] 12 commits over 5 days — but 4 are reverts and 3 contain "try" or "temp" keywords +[DATA] Net forward progress: 5 meaningful commits, concentrated on Feb 3-4. No meaningful commits Feb 5-7. + +### Problems +[DATA] High churn detected: 4 reverts in 5 days (approach switching between rate-limit libraries) +[DATA] Last meaningful commit 3 business days ago despite "On Track" status +[DATA] Commit messages include "try redis approach", "back to in-memory", "temp disable rate check" + +### Plans +- Update status to reflect current state (blocked or needs help) +- Decide on rate-limiting approach before writing more code +- Consider pairing session to unblock +``` + +### Example 2: Document — Contract Review Stalled + +**User:** +``` +LEGAL-18: "Review MSA for CloudStore vendor" +Status: In Progress +The contract doc is in Google Drive. +Our stale threshold for legal reviews is 7 business days. +``` + +**Output:** + +```markdown +## LEGAL-18: Review MSA for CloudStore vendor + +**Reported status:** In Progress +**Actual status:** 🔴 red — stalled +**Sources:** manual input (document-activity) +**Audit window:** Jan 27 - Feb 7 + +### Progress +[DATA] Last document edit: Jan 29 (7 business days ago) +[DATA] 3 comments added Jan 27-29, all from internal counsel. No activity since. + +### Problems +[DATA] No edits or comments for 7 business days — exceeds 7-day stale threshold +[DATA] Status "In Progress" contradicts 0 activity. Likely awaiting external response. + +### Plans +- Clarify if waiting on vendor counter-redlines +- If awaiting external: update status to "Blocked — External" and set follow-up date +- If deprioritized: flag for re-prioritization +``` + +### Example 3: Automated Multi-Story Audit + +**User:** "Audit activity across all in-progress stories" + +**Claude:** +1. Fetches in-progress stories from ~~project tracker +2. For each story, identifies source type and delegates to appropriate agent +3. Compares reported status against actual activity +4. Outputs 3P assessment per story, sorted by severity (red → yellow → green) + +--- + +## Key Principles + +1. **Be skeptical of reported status** — Activity is the source of truth, not the ticket +2. **High activity ≠ progress** — Churn, reverts, and experiments can produce lots of commits with zero forward motion +3. **Delegate domain details** — The main skill interprets; agents detect signals +4. **Thresholds are team-specific** — What's "stalled" varies by discipline and team speed +5. **Non-working days don't count** — Weekends, holidays, and planned time off are not staleness diff --git a/project-intelligence/skills/change-summary/SKILL.md b/project-intelligence/skills/change-summary/SKILL.md new file mode 100644 index 0000000..b9dc506 --- /dev/null +++ b/project-intelligence/skills/change-summary/SKILL.md @@ -0,0 +1,311 @@ +--- +name: change-summary +description: > + Summarize what changed at two levels: iteration summaries for internal + teams, and review deltas showing what changed since a specific + stakeholder last reviewed. Works for any deliverable: software, + creative assets, documents, hardware firmware, training materials, + or legal agreements. For external-facing release notes or themed + milestone summaries, use the relevant role plugin (product-management, + marketing, etc.). +--- + +# Change Summary + +You are a **Change Summarizer** — you compile what actually changed and present it at the right level of detail for the audience. The same body of work looks different to a team reviewing their iteration and a reviewer checking whether their feedback was addressed. + +This skill operates in two tiers: + +| Tier | Scope | Audience | Granularity | +|------|-------|----------|-------------| +| **Iteration** | Since last minor milestone | Internal team, iteration reviews | Listed by item | +| **Delta** | Since a specific point (last review, last approval, feedback) | Specific reviewer or approver | Focused on what's different | + +> [!NOTE] +> For external-facing release notes or themed milestone summaries, use the relevant role plugin (e.g., `product-management/stakeholder-comms`, `marketing/content-creation`). This skill focuses on internal team alignment and reviewer feedback loops. + +**Core question:** *What changed, for whom, and since when?* + +**Example triggers:** +- "What changed this iteration?" → Iteration tier +- "What changed since the stakeholder review on Feb 5?" → Delta tier +- "Summarize revisions to the training materials since the last review" +- "What's different in the firmware since the last certification submission?" +- "What changed on the MSA since opposing counsel's last redline?" +- "Summarize creative changes since the client review" + +--- + +## How It Works + +**Step 1: Determine Tier and Timeframe** + +Identify the tier from the request: + +| Signal | Tier | +|--------|------| +| "This sprint", "this iteration", "this cycle", "since last deploy" | **Iteration** | +| "Since [person] reviewed", "since last feedback", "since [date]" | **Delta** | + +If ambiguous, ask: *"Who is this summary for, and since when should I look?"* + +**Step 2: Gather Change Sources** + +Pull from whatever sources are available. Different domains have different change trails: + +| Source Type | What It Provides | Domain Examples | +|------------|-----------------|-----------------| +| ~~project tracker | Completed items, resolved findings, scope changes | Any tracked work | +| ~~revision history | File changes, version diffs, edit history | Code repos, document systems, design tools, DAWs | +| `story-flow` | Items that moved to done with cycle time context | Any tracked items | +| `quality-check` | Items that passed/failed quality gates | Any gated deliverable | +| `risk-tracker` | Risks resolved, issues closed | Any project | +| `stakeholder-signoff` | Feedback addressed, conditions met (for delta tier) | Any approval flow | +| Review comments | Feedback from reviewers, redline changes | Documents, creative, legal | +| User input | Manual additions or corrections | Any | + +**Step 3: Categorize Changes** + +Group changes into categories that match the domain. Don't force software categories onto non-software work. + +**Starting categories by domain:** + +| Domain | Suggested Categories | +|--------|---------------------| +| **Software** | Features, Improvements, Fixes, Internal, Security | +| **Creative/Design** | New assets, Revisions, Direction changes, Format/specs | +| **Legal/Compliance** | New clauses, Revised terms, Removed provisions, Formatting | +| **Training/Content** | New material, Updated content, Corrections, Restructured | +| **Hardware/IoT** | New capabilities, Spec changes, Defect fixes, Certification | +| **Marketing** | New campaigns, Revised messaging, Updated assets, Analytics | + +**Universal fallback categories** (work across any domain): + +| Category | Description | +|----------|-------------| +| **New** | Things that didn't exist before | +| **Changed** | Things that existed but were modified | +| **Removed** | Things that were taken out | +| **Deferred** | Things moved to a future milestone | +| **Resolved** | Previously flagged issues that are now closed | + +Let the domain drive the language. If the team calls them "tracks" not "features", or "redlines" not "fixes", use their vocabulary. + +--- + +## Tier 1: Iteration Summary + +**Purpose:** Keep the internal team aligned on what shipped this iteration. + +**Characteristics:** +- Listed by item with tracking references +- Includes internal details (who did it, duration, blockers hit) +- Covers scope changes — what was added, removed, or deferred +- Honest about what didn't get done + +**Output template:** + +```markdown +# [Iteration Name] Summary | [Team/Project] + +**Period:** [start date] → [end date] +**Completed:** [N] items | **Carried over:** [N] items | **Deferred:** [N] items + +--- + +## Completed + +### [Category 1] +- [TRACKING-ID] **[Title]** — [1-line description] ([owner], [duration/cycle time]) + +### [Category 2] +- [TRACKING-ID] **[Title]** — [1-line description] ([owner], [duration/cycle time]) + +--- + +## Scope Changes +- ➕ **Added:** [ID] [Title] — [why it was added mid-cycle] +- ➖ **Deferred:** [ID] [Title] — [why, and where it moved to] +- 🔄 **Rescoped:** [ID] [Title] — [what changed about the scope] + +--- + +## Carried Over +- [ID] **[Title]** — [status, what's remaining, why it didn't complete] + +--- + +## Resolved Issues +- [ID] [Title] — [how it was resolved] + +--- + +## Notes +- [Anything noteworthy: team changes, process observations, dependency shifts] +``` + +**Audience rules:** +- Include tracking IDs and owner names +- Include duration/cycle time for context +- Be honest about what carried over and why + +--- + +## Tier 2: Review Delta + +**Purpose:** Show a specific stakeholder what changed since they last reviewed, especially if they gave feedback or a conditional approval. + +**Characteristics:** +- Anchored to a specific point in time (their last review) +- Highlights feedback addressed vs. feedback outstanding +- Compact — only what changed, not the full picture +- Designed to help them decide if their concerns are resolved + +**Output template:** + +```markdown +# Changes Since Your Last Review | [Milestone/Deliverable Name] + +**Last review:** [date] by [stakeholder] +**Changes since:** [N] items + +--- + +## Feedback Addressed +- ✅ **"[Their feedback/condition]"** — [How it was addressed] +- ✅ **"[Their feedback/condition]"** — [How it was addressed] + +## Feedback Outstanding +- ⏳ **"[Their feedback/condition]"** — [Status, ETA] + +--- + +## Other Changes Since [date] +- [Change 1] — [brief description] +- [Change 2] — [brief description] + +--- + +## Net Impact +[1-2 sentences: what's different now vs. when they last looked. Help them assess whether a re-review is needed or their prior approval still holds.] +``` + +**Audience rules:** +- Lead with their specific feedback — did we address it? +- Only include other changes that are material enough to affect their decision +- End with a clear recommendation: "Your prior approval still holds" or "Changes are significant enough to warrant re-review" +- Connect to `stakeholder-signoff` — if their conditional approval had conditions, show condition status + +--- + +## Edge Cases + +**No changes found:** +- Report it: "No completed items found between [start] and [end]. This may indicate a data source gap — verify access to revision history and tracker." + +**Scope change heavy iteration:** +- If >30% of items were scope changes (added, deferred, rescoped), highlight this pattern: "Significant scope churn this cycle — 5 of 12 items were scope changes." + +**No feedback to reference (delta tier):** +- Fall back to a time-based delta: "No specific feedback on record. Here's everything that changed since [date]." + +**Multiple revision cycles (creative, legal):** +- Track which revision cycle this is: "Round 3 of creative review. 8 assets revised since Round 2." Helps stakeholders understand where they are in the process. + +**Cross-domain milestones:** +- Use the categories that match the domain. Let the content itself guide the category names. + +--- + +## Examples + +### Example 1: Iteration — Creative Development Cycle + +**User:** "Summarize what changed in round 3 of the rebrand" + +```markdown +# Rebrand Round 3 Summary | Visual Identity + +**Period:** Feb 18 → Feb 28 (Round 3 of 4) +**Completed:** 11 assets | **Carried over:** 3 assets | **Deferred:** 1 asset + +--- + +## Completed + +### Revised Assets +- BRAND-22 **Primary logo — dark variant** — Increased contrast ratio per accessibility feedback (Maya, 2 days) +- BRAND-24 **Color palette — secondary tones** — Shifted secondary blue from #2B6CB0 to #2A5FA5 per brand committee direction (Maya, 1 day) +- BRAND-25 **Typography scale — mobile** — Reduced body text from 16px to 15px, increased line height to 1.6 (Sam, 1 day) + +### New Assets +- BRAND-30 **Social media templates** — 6 templates for Instagram, LinkedIn, and X (Maya, 3 days) +- BRAND-31 **Email header variants** — Light and dark mode versions (Sam, 2 days) + +### Direction Changes +- BRAND-26 **Icon style** — Switched from outlined to filled per Round 2 feedback. Full icon set rebuilt (Maya, 4 days) +- BRAND-27 **Photography guidelines** — Added warm filter direction, removed cool-tone examples (Sam, 1 day) + +--- + +## Scope Changes +- ➕ **Added:** BRAND-31 Email headers — added after marketing identified gap +- ➖ **Deferred:** BRAND-29 Motion guidelines — moved to Round 4, needs video team input + +--- + +## Carried Over +- BRAND-28 **Slide deck template** — 60% complete, complex layouts remaining (est. 2 days) +- BRAND-32 **Favicon variants** — Not started, blocked by final logo approval +- BRAND-33 **Print guidelines** — Waiting on final CMYK color specs from printer + +--- + +## Notes +- Icon style rebuild (BRAND-26) was the biggest effort — 48 icons rebuilt from outlined to filled +- Photography direction now aligns with Round 2 feedback from brand committee +- Round 4 (final) should focus on motion, remaining templates, and print specs +``` + +### Example 2: Delta — Legal Document Review + +**User:** "What changed on the MSA since opposing counsel's redline on Feb 10?" + +```markdown +# Changes Since Your Last Review | Master Services Agreement v4 + +**Last review:** Feb 10 by Martinez & Associates (opposing counsel) +**Changes since:** 7 clauses + +--- + +## Feedback Addressed +- ✅ **"Indemnification scope too broad (§8.2)"** — Narrowed to direct damages only. Removed consequential damages from indemnification obligations. +- ✅ **"Termination for convenience needs 60-day notice (§12.1)"** — Changed from 30-day to 60-day notice period as requested. +- ✅ **"Data handling clause needs GDPR specificity (§15.3)"** — Added explicit GDPR Article 28 processor obligations, sub-processor notification, and DPA reference. + +## Feedback Outstanding +- ⏳ **"Liability cap should be 12 months, not 24 (§9.1)"** — Under internal review. Our risk team is evaluating. Expected response by Feb 25. + +--- + +## Other Changes Since Feb 10 +- §6.4 Payment terms — Changed net-45 to net-30 (our finance team requirement, separate from your redline) +- §14.2 Insurance minimums — Updated cyber liability minimum from $2M to $5M per our underwriter's new policy +- Exhibit B — Updated SOW template to match revised scope from Feb 12 call + +--- + +## Net Impact +3 of 4 redline items addressed. Liability cap (§9.1) is still under review — this is the remaining substantive issue. Two other changes (payment terms, insurance) are unrelated to your redline but affect commercial terms. Recommend re-review of §6.4, §9.1, and §14.2 before signing. +``` + +--- + +## Key Principles + +1. **Use the domain's language** — "Redlines" not "fixes" for legal. "Rounds" not "sprints" for creative. "Revision cycles" not "deploys" for training. +2. **Honesty about scope** — Include what was deferred, removed, or didn't get done. Silence on scope changes erodes trust. +3. **Feedback closes the loop** — The delta tier exists to show stakeholders their input was heard and acted on. +4. **Track revision depth** — For iterative work (creative, legal, content), note which cycle/round this is. "Round 3 of 4" gives context that a flat summary doesn't. +5. **Connect to other skills** — Pull from `stakeholder-signoff` for feedback to address, `story-flow` for cycle time context, `risk-tracker` for resolved issues. diff --git a/project-intelligence/skills/dependency-readiness/SKILL.md b/project-intelligence/skills/dependency-readiness/SKILL.md new file mode 100644 index 0000000..d8e68ad --- /dev/null +++ b/project-intelligence/skills/dependency-readiness/SKILL.md @@ -0,0 +1,328 @@ +--- +name: dependency-readiness +description: > + Proactively check whether external and internal dependencies are ready + for a milestone. Tracks vendor deliverables, partner integrations, + cross-team handoffs, infrastructure provisioning, and third-party + services. Use before a launch, release, or deadline to catch ecosystem + gaps that internal checks won't surface. +--- + +# Dependency Readiness + +You are a **Dependency Tracker** — you check whether everything *outside* the team's direct control is ready for the milestone. In modern teams, dependencies are rarely entirely missing — environments exist, integrations are built, vendors are engaged. The real problems are subtler: a runbook that hasn't been updated, a config that's awaiting latest input, a partner briefed on v1 but not v2, a feature flag pointing at the wrong environment. Your job is to catch these freshness and correctness gaps before they become launch-day surprises. + +**Core question:** *Is everything we depend on — but don't directly control — actually ready?* + +**Example triggers:** +- "Check if our dependencies are ready for launch" +- "Are all vendor deliverables in for the Q2 milestone?" +- "What's the status of our external dependencies?" +- "Pre-launch dependency check" +- "Any stale configs or out-of-date integrations?" +- "Check partner integration status" + +--- + +## Assumptions + +This skill assumes teams follow modern practices: + +- **Environments exist** — Production is already running. Staging mirrors prod. Blue-green or canary deployments are normal. The question isn't "is the environment provisioned?" but "is the environment configured correctly for this release?" +- **Integrations are built** — Partner/vendor integrations are already in place. The question isn't "does the integration exist?" but "is it pointing at the right version/endpoint/config?" +- **People are engaged** — Vendors, partners, and cross-team contacts have been briefed. The question isn't "do they know about this?" but "do they have the *latest* information?" +- **Automation is normal** — CI/CD, feature flags, automated tests are standard. The question isn't "can we deploy?" but "are the automation configs (flags, pipelines, rollback playbooks) current?" + +**The typical dependency problem is not absence — it's staleness.** Things that were correct last week may not reflect this week's changes. + +--- + +## How It Works + +**Step 1: Identify Dependencies** + +Build the dependency list from available sources: + +| Source | What to Look For | +|--------|-----------------| +| ~~project tracker | Items tagged as dependencies, blocked-by links, external labels | +| `risk-tracker` | External dependency issues, cross-team blockers | +| REFERENCES.md | Configured vendor/partner references | +| User input | "We depend on X, Y, Z for this milestone" | + +If no dependencies are tracked, ask: *"What does this milestone depend on that your team doesn't directly control? Think: vendors, other teams, infrastructure, approvals, external services."* + +**Step 2: Classify Each Dependency** + +Every dependency falls into one of these categories: + +| Type | Description | Examples | +|------|-------------|---------| +| **Vendor** | Third-party deliverables or services | API versions, vendor software updates, agency assets, contracted work | +| **Partner** | External organizations you're integrating with | Partner API versions, co-marketing materials, joint deliverables | +| **Cross-team** | Internal teams outside your direct control | Platform team configs, security review sign-off, design handoff | +| **Config** | Environment configs, feature flags, automation | Feature flags, deployment configs, rollback playbooks, CI/CD pipelines | +| **Regulatory** | External bodies or processes | Certification, compliance approval, regulatory response | +| **Documentation** | Runbooks, playbooks, briefing materials | Launch runbook, support playbook, partner briefing docs, release notes | + +**Step 3: Check Readiness** + +For each dependency, determine its state: + +| State | Meaning | Icon | +|-------|---------|------| +| **Ready** | Verified correct and current for this milestone | ✅ | +| **In Progress** | Being updated, expected on time | 🔄 | +| **Stale** | Exists but not updated for this milestone — needs refresh | 🔶 | +| **At Risk** | May not be ready or current by milestone date | ⚠️ | +| **Not Ready** | Missing, incorrect, or will not be ready | ❌ | +| **Unknown** | Status hasn't been checked or confirmed | ❓ | + +**Stale is the most common state.** Dependencies that existed for the last milestone often carry forward with outdated configs, old versions, or last-cycle briefing materials. Always check for currency, not just existence. + +**Unknown is dangerous.** An unchecked dependency is not a ready dependency. Always surface unknowns prominently. + +**How to check:** + +| Dependency Type | What to Verify | +|----------------|---------------| +| Vendor | Is the version/endpoint current? Has config changed since last verified? | +| Partner | Are they briefed on latest scope? Is their deliverable current? | +| Cross-team | Has their config been updated for this milestone? Is their sign-off current? | +| Config | Are feature flags, deployment configs, and rollback playbooks updated? | +| Regulatory | Is submission on track? Any changes to requirements since filing? | +| Documentation | Is the runbook current? Does it reflect latest architecture/process changes? | + +**Step 4: Identify Critical Path** + +Not all dependencies are equal. Identify which are on the critical path: + +``` +A dependency is critical-path if: + - The milestone CANNOT proceed without it, AND + - It has no workaround or fallback +``` + +Mark critical-path dependencies explicitly. A single ❌, 🔶, or ❓ on a critical-path dependency = milestone is not ready. + +**Step 5: Output** + +Use the project-intelligence output style (see [output style](../../output-styles/project-intelligence.md)). + +```markdown +# Dependency Readiness | [Milestone Name] + +**Target:** [date] +**Dependencies checked:** [N] of [N] verified +**Critical path clear:** ✅ Yes / ❌ No + +--- + +## Dependency Status + +| # | Dependency | Type | Owner | Status | Critical Path | Notes | +|---|-----------|------|-------|--------|--------------|-------| +| 1 | [Name] | [type] | [who] | ✅/🔄/🔶/⚠️/❌/❓ | Yes/No | [brief note] | +| 2 | [Name] | [type] | [who] | ✅/🔄/🔶/⚠️/❌/❓ | Yes/No | [brief note] | + +--- + +## Issues + +### Not Ready +- ❌ [Dependency]: [why it's not ready + impact on milestone] + +### Stale (Needs Refresh) +- 🔶 [Dependency]: [what's outdated + what needs updating] + +### At Risk +- ⚠️ [Dependency]: [what's at risk + expected resolution] + +### Unknown (Unverified) +- ❓ [Dependency]: [hasn't been checked — recommend verifying immediately] + +--- + +## Actions +1. [Highest priority action] +2. [Next action] + +--- + +## Assessment +[1-2 sentence summary: critical path status and biggest dependency risk] +``` + +--- + +## Edge Cases + +**No dependencies identified:** +- Ask the user explicitly. If they confirm none, note: "No external dependencies identified. This milestone is fully within team control." + +**Dependency has a workaround:** +- Note the workaround. If viable, the dependency is not critical-path even if not ready: "Payment API not ready, but can launch with PayPal fallback." + +**Dependency owned by nobody:** +- Flag prominently: "No owner identified for [dependency]. Assign an owner immediately." + +**Dependency was ready but regressed:** +- Note the regression: "Was ✅ on [date], now ❌. [What changed]." This is worse than never being ready — escalate. + +**Cascading dependencies:** +- If Dependency A depends on Dependency B, note the chain. Same cascade logic as `risk-tracker`. + +--- + +## Examples + +### Example 1: Software Deployment — Stale Configs + +**User:** "Check dependencies for v3.1 deployment Friday" + +```markdown +# Dependency Readiness | v3.1 Deployment + +**Target:** Feb 14 +**Dependencies checked:** 5 of 6 verified +**Critical path clear:** ⚠️ At risk — 2 stale items + +--- + +## Dependency Status + +| # | Dependency | Type | Owner | Status | Critical Path | Notes | +|---|-----------|------|-------|--------|--------------|-------| +| 1 | Blue-green deploy config | Config | DevOps | 🔶 | Yes | Last updated for v3.0 — needs new env vars for auth changes | +| 2 | Auth0 tenant config | Vendor | Auth0 support | ✅ | Yes | Updated and tested for v3.1 | +| 3 | Rollback runbook | Documentation | SRE | 🔶 | Yes | Still references v2.x rollback steps — needs update | +| 4 | Stripe webhook endpoint | Partner | Payments team | ✅ | Yes | Endpoint updated, integration tests pass | +| 5 | Feature flags | Config | Engineering | ✅ | No | All v3.1 flags configured in LaunchDarkly | +| 6 | Amplitude event schema | Vendor | Analytics | ❓ | No | Not checked — new events in v3.1 may need schema update | + +--- + +## Issues + +### Stale (Needs Refresh) +- 🔶 **Blue-green deploy config (critical path):** Config still has v3.0 env vars. New auth service endpoint and secrets need to be added before deploy. +- 🔶 **Rollback runbook (critical path):** Runbook references v2.x database migration rollback steps. v3.1 changes auth tables — rollback procedure is different. + +### Unknown +- ❓ **Amplitude event schema:** v3.1 adds 3 new tracking events. Schema may need updating in Amplitude. Low risk but should verify. + +--- + +## Actions +1. **UPDATE:** Blue-green config — add new auth service env vars and secrets +2. **UPDATE:** Rollback runbook — document v3.1 auth table migration rollback +3. **VERIFY:** Check Amplitude event schema for new v3.1 events + +--- + +## Assessment +Infrastructure is healthy — blue-green environments are running, integrations work. Two stale configs need updating: deploy config (wrong env vars) and rollback runbook (wrong procedure). Not blocked, but deploying with stale rollback docs is risky. +``` + +### Example 2: Marketing Campaign Launch — Last-Mile Config + +**User:** "Are our dependencies ready for the Q2 campaign launch March 15?" + +```markdown +# Dependency Readiness | Q2 Campaign Launch + +**Target:** March 15 +**Dependencies checked:** 5 of 5 verified +**Critical path clear:** ⚠️ At risk — 1 stale, 1 at risk + +--- + +## Dependency Status + +| # | Dependency | Type | Owner | Status | Critical Path | Notes | +|---|-----------|------|-------|--------|--------------|-------| +| 1 | Agency creative assets | Vendor | Acme Agency | ✅ | Yes | Final assets delivered and deployed Feb 28 | +| 2 | Ad platform campaign config | Config | Media team | ⚠️ | Yes | Configs built but awaiting final targeting input from product | +| 3 | Influencer briefing materials | Documentation | PR team | 🔶 | No | Influencers briefed on v1 messaging — v2 positioning not shared | +| 4 | Landing page | Cross-team | Engineering | ✅ | Yes | Deployed and tested on staging, ready for prod swap | +| 5 | Email sequences | Config | Growth team | ✅ | No | All sequences configured and tested in staging | + +--- + +## Issues + +### Stale (Needs Refresh) +- 🔶 **Influencer briefing materials:** 3 influencers were briefed on v1 messaging (Jan). Product positioning shifted in Feb — updated talking points not shared yet. + +### At Risk +- ⚠️ **Ad campaign config (critical path):** Campaign structure is built in Meta/Google Ads but final audience targeting awaits product team input on segment priorities. If not received by March 8, launch configs won't be tested in time. + +--- + +## Actions +1. **URGENT:** Get final targeting input from product team for ad configs — deadline March 8 +2. **UPDATE:** Send updated v2 briefing materials to influencers — not urgent but affects message consistency + +--- + +## Assessment +Assets are delivered and deployed. Infrastructure is ready. Two last-mile items: ad campaign targeting config needs final input (critical path), and influencer messaging is one version behind (not critical but affects consistency). Resolve targeting input by March 8. +``` + +### Example 3: Compliance Milestone — Multiple Unknowns + +**User:** "Dependency check for SOC 2 audit March 31" + +```markdown +# Dependency Readiness | SOC 2 Audit Submission + +**Target:** March 31 +**Dependencies checked:** 2 of 5 verified +**Critical path clear:** ❌ No — 3 dependencies unverified + +--- + +## Dependency Status + +| # | Dependency | Type | Owner | Status | Critical Path | Notes | +|---|-----------|------|-------|--------|--------------|-------| +| 1 | Auditor availability | Regulatory | Deloitte | ✅ | Yes | Confirmed March 24-28 window | +| 2 | Cloud provider SOC 2 report | Vendor | AWS | ✅ | Yes | Bridge letter received Feb 15 | +| 3 | HR background check evidence | Cross-team | HR | ❓ | Yes | Not checked — need employee records | +| 4 | Vendor security assessments | Cross-team | Procurement | ❓ | Yes | Not checked — need vendor questionnaires | +| 5 | Physical security logs | Cross-team | Facilities | ❓ | No | Not checked — need access logs | + +--- + +## Issues + +### Unknown (Unverified) +- ❓ **HR background check evidence (critical path):** Auditor will request. Status unknown — verify HR has current records for all employees. +- ❓ **Vendor security assessments (critical path):** Auditor will review. Unknown if procurement has completed questionnaires for all critical vendors. +- ❓ **Physical security logs:** Lower priority but auditor may request. Verify facilities can provide. + +--- + +## Actions +1. **URGENT:** Contact HR to confirm background check evidence availability — critical path +2. **URGENT:** Contact Procurement for vendor security assessment status — critical path +3. **VERIFY:** Ask Facilities about physical security log access +4. **GOOD NEWS:** Auditor and AWS bridge letter are confirmed + +--- + +## Assessment +Only 2 of 5 dependencies verified. 3 unknowns on the critical path — this is a blind spot. The external dependencies (auditor, AWS) are solid, but internal cross-team evidence gathering is unverified. Contact HR and Procurement immediately to assess readiness. +``` + +--- + +## Key Principles + +1. **Currency over existence** — The question is rarely "does this exist?" It's "is this current and correct for *this* milestone?" +2. **Stale is the default** — Assume dependencies from the last milestone need refreshing until verified otherwise. +3. **Unknown ≠ Ready** — An unchecked dependency is a blind spot, not a green light. +4. **Critical path focus** — A non-critical dependency being stale is a warning; a critical-path one stale is a blocker. +5. **Check early, check often** — Run this skill early in the milestone, not just at the gate. +6. **Own every dependency** — Every dependency needs an owner. An unowned dependency is the most dangerous kind. +7. **Regressions are worse than gaps** — A dependency that was verified and drifted needs urgent attention. diff --git a/project-intelligence/skills/quality-check/SKILL.md b/project-intelligence/skills/quality-check/SKILL.md new file mode 100644 index 0000000..bb19c75 --- /dev/null +++ b/project-intelligence/skills/quality-check/SKILL.md @@ -0,0 +1,319 @@ +--- +name: quality-check +description: > + Summarize quality check results for any deliverable — code, documents, + designs, or compliance artifacts. Distinguishes real issues from noise, + surfaces blockers, and provides actionable recommendations. Use when + reviewing test results, document feedback, design critiques, audit + findings, or any quality gate output. +--- + +# Quality Check Summary + +You are a **Quality Gate Analyst** — a dispassionate reviewer who reads check results, separates signal from noise, and tells the team exactly what blocks progress and what doesn't. You don't judge the work. You assess the checks. + +Summarize quality check results for any deliverable and provide a clear action recommendation. + +**Example triggers:** +- "Summarize our test results for this build" +- "Review the comments on the Q2 brand guidelines deck" +- "What's the status of the legal compliance review?" +- "Are we clear to ship based on these QA results?" +- "Summarize the design review feedback on the mobile mockups" +- "What's blocking our release?" + +--- + +## How It Works + +**Step 1: Gather Check Results** + +First, check for configured reference sources in [REFERENCES.md](../../REFERENCES.md) for the relevant domain. Use the configured canonical version — do not search independently. + +Then, check what data sources are available: +- Attempt to fetch test/build results from ~~code repository or CI system +- Attempt to fetch review comments from ~~project tracker or ~~document tool +- Attempt to fetch discussion from ~~chat + +If tools unavailable, ask user for: check results, review feedback, or pasted findings. + +**Always tell the user which sources you're using:** +> "Fetching build results from ~~code repository. I don't have ~~document tool access — paste review comments if you want those included." + +**Step 2: Classify Findings** + +For each finding, classify it using the categories below. The same logic applies whether the finding is a failing test, a review comment, or an audit observation. + +**Step 3: Output** + +Use the project-intelligence output style (see [output style](../../output-styles/project-intelligence.md)). + +Default output: + +```markdown +# Quality Check Summary | [Deliverable Name] + +**Sources:** [list of data sources] +**Check date:** [date] + +**Status:** [🟢 green / 🟡 yellow / 🔴 red] +**Action:** [PROCEED / BLOCK / RETRY / ESCALATE] + +--- + +## Blockers +[List of blocking findings, or "None"] + +## Findings to Address +[Non-blocking issues that need attention] + +## Noise / Deferred +[Items that don't require action now — flaky tests, environmental issues, style nits] + +--- + +## Summary +[1-2 sentence assessment with action recommendation] +``` + +--- + +## Finding Categories + +### By Deliverable Type + +Quality checks come from many domains. Here's how common check types map to findings: + +| Domain | Check Type | Example Findings | Reference Source | +|--------|-----------|-----------------|------------------| +| **Code** | Unit tests, integration tests, CI builds | Test failures, coverage gaps, build errors | Testing Standards | +| **Documents** | Comments, tracked changes, format checks | Missing sections, inconsistent terminology, template violations | Content Style Guide, Report Template | +| **Design** | Design review, accessibility audit, brand check | Contrast failures, missing states, off-brand elements | Design System, Brand Guidelines | +| **Legal/Compliance** | Regulatory review, policy audit | Non-compliant clauses, missing disclosures, expired certifications | Contract Templates, SOC 2 Controls | +| **Data** | Validation rules, schema checks, reconciliation | Data type mismatches, failed reconciliations, missing required fields | Chart of Accounts | + +> Reference sources are configured in [REFERENCES.md](../../REFERENCES.md). If a source is configured, use it as the canonical standard. If not, ask the user. + +### By Severity + +#### 🟢 GREEN: Clean Pass +**Trigger:** All checks pass, no blocking findings +- Action: **PROCEED** + +#### 🟡 YELLOW: Intermittent / Environmental +**Trigger:** Issues caused by tooling, environment, or timing — not the deliverable itself +- Code: Tests that fail once but pass on retry (flaky) +- Documents: Comments from outdated template version +- Design: Review tool rendering issues +- Action: **PROCEED** (note in deferred items) + +**Key insight:** Don't block progress for problems in the checking tool, not the deliverable. + +#### 🟡 YELLOW: Tooling / Platform Failure +**Trigger:** The check itself couldn't run properly +- Code: CI environment down, database connection refused +- Documents: Review tool offline, comment sync failed +- Design: Design system plugin crashed +- Action: **RETRY** — re-run the check when the platform is stable + +#### 🔴 RED: Blocking Finding +**Trigger:** A finding that must be resolved before the deliverable can proceed +- Code: Critical test failure, security vulnerability +- Documents: Missing required section, factual error, legal risk +- Design: Accessibility failure, missing critical user flow +- Compliance: Regulatory non-compliance, expired certification +- Action: **BLOCK** + +#### 🔴 RED: Stale Finding (>2 days) +**Trigger:** Any finding open longer than 2 working days, regardless of severity +- Action: **ESCALATE** + +**Why 2 days?** A finding that lingers >2 days is either: +1. Blocked and needs escalation +2. Deprioritized and needs visibility +3. Harder than expected and needs help + +--- + +## Detection Rules + +### Noise Detection +``` +if (finding failed once AND passed on retry): + classify as "intermittent" → PROCEED +if (finding caused by tool/platform/environment error): + classify as "tooling" → RETRY +``` + +### Blocker Detection +``` +if (finding severity is critical/severe/blocking): + classify as "blocker" → BLOCK +if (finding involves safety, security, legal, or regulatory compliance): + classify as "blocker" → BLOCK +``` + +### Staleness Detection +``` +if (finding open > 2 working days): + classify as "stale" → ESCALATE + note: "Open [X] days — exceeds 2-day threshold" +``` + +--- + +## Edge Cases + +**Mixed results across domains:** +- Summarize each domain separately, then give an overall status based on the worst domain + +**No check results available:** +- Report: "No quality check data available. Provide test results, review feedback, or audit findings." + +**All findings are noise:** +- Status: green. Note deferred items for future cleanup. + +--- + +## Examples + +### Example 1: Code — CI Build with Flaky Tests + +**User:** +``` +Auth Service build results from today: +- 142 tests total, 140 passed, 2 failed +- test_webhook_timeout: failed attempt 1, passed attempt 2 +- test_session_refresh: failed attempt 1, passed attempt 2 +- No open bugs +``` + +**Output:** + +```markdown +# Quality Check Summary | Auth Service Build + +**Sources:** manual input +**Check date:** today + +**Status:** 🟡 yellow +**Action:** PROCEED + +--- + +## Blockers +None + +## Findings to Address +None + +## Noise / Deferred +[DATA] 2 flaky tests (passed on retry): test_webhook_timeout, test_session_refresh — add to tech debt backlog + +--- + +## Summary +[DATA] 140/142 tests passed. 2 failures are flaky (passed on retry) — not blockers. Clear to proceed. +``` + +### Example 2: Document — Brand Guidelines Review + +**User:** +``` +Q2 Brand Guidelines deck reviewed by 3 stakeholders: +- 12 comments total +- 2 comments: "Logo usage section is missing the new co-brand rules" (both reviewers flagged) +- 1 comment: "Color palette page doesn't match the approved Pantone spec" +- 4 comments: minor typos and formatting +- 5 comments: "looks good", "approved", style preferences +``` + +**Output:** + +```markdown +# Quality Check Summary | Q2 Brand Guidelines Deck + +**Sources:** manual input (3 stakeholder reviews) +**Check date:** today + +**Status:** 🔴 red +**Action:** BLOCK + +--- + +## Blockers +[DATA] Missing co-brand rules in logo usage section — flagged by 2 of 3 reviewers +[DATA] Color palette page doesn't match approved Pantone spec — factual accuracy issue + +## Findings to Address +[DATA] 4 minor typos and formatting issues — fix before final version + +## Noise / Deferred +[DATA] 5 comments are approvals or style preferences — no action needed + +--- + +## Summary +[DATA] 2 blocking findings: missing co-brand rules (consensus across reviewers) and Pantone spec mismatch. 4 minor fixes. 5 comments are noise. Resolve blockers before distribution. +``` + +### Example 3: Compliance — Vendor Security Audit + +**User:** +``` +Annual vendor security audit results for CloudStore Inc: +- 28 controls assessed +- 25 passed +- 2 findings: SOC 2 Type II report expired 3 months ago, + data retention policy doesn't specify EU deletion timelines +- 1 finding: SSO configuration uses SHA-1 (low severity, noted 8 days ago) +``` + +**Output:** + +```markdown +# Quality Check Summary | CloudStore Inc Vendor Security Audit + +**Sources:** manual input +**Check date:** today + +**Status:** 🔴 red +**Action:** ESCALATE + +--- + +## Blockers +[DATA] SOC 2 Type II report expired 3 months ago — regulatory compliance gap +[DATA] Data retention policy missing EU deletion timelines — GDPR risk + +## Findings to Address +[DATA] SSO uses SHA-1 — low severity but open 8 days (exceeds 2-day threshold). Escalate. + +## Noise / Deferred +None + +--- + +## Summary +[DATA] 25/28 controls passed. 2 blocking compliance gaps (expired SOC 2, missing EU data retention). 1 low-severity finding stale at 8 days. Escalate all three — do not proceed with vendor onboarding. +``` + +### Example 4: Design — Accessibility Review + +**User:** "Summarize the accessibility audit results for the new checkout flow" + +**Claude:** +1. Fetches audit results from ~~project tracker +2. Classifies findings by severity (WCAG A/AA/AAA violations) +3. Separates blockers (A/AA violations) from recommendations (AAA) +4. Outputs markdown report with action recommendation + +--- + +## Key Principles + +1. **Signal over noise** — Only surface what requires action +2. **Intermittent ≠ broken** — Checks that pass on retry are deferred items, not blockers +3. **Age matters** — Stale findings need visibility more than new ones +4. **Tooling ≠ deliverable** — Don't blame the work for environment failures +5. **Be actionable** — Every summary ends with a clear action: PROCEED, BLOCK, RETRY, or ESCALATE diff --git a/project-intelligence/skills/release-readiness/SKILL.md b/project-intelligence/skills/release-readiness/SKILL.md new file mode 100644 index 0000000..7d9f643 --- /dev/null +++ b/project-intelligence/skills/release-readiness/SKILL.md @@ -0,0 +1,371 @@ +--- +name: release-readiness +description: > + Aggregator skill that combines outputs from all input skills into a + single go/no-go decision for a milestone: launch, release, gate, + campaign, or deadline. Produces a unified readiness assessment with + a clear recommendation and all supporting evidence in one view. +--- + +# Release / Milestone Readiness + +You are a **Readiness Assessor** — you pull signals from every input skill and synthesize them into a single decision: **GO**, **PROCEED WITH CAUTION**, or **HOLD**. You don't gather raw data yourself — you consume what the input skills produce and apply decision logic. + +This skill works for any milestone with a go/no-go gate: +- Software releases, deployments, or cutover events +- Campaign launches or go-to-market dates +- Compliance deadlines or audit submissions +- Contract signings or vendor commitments +- Product launches, events, or public announcements + +**Core question:** *Should we proceed with this milestone, or not?* + +**Example triggers:** +- "Are we ready to launch?" +- "Release readiness check for v2.0" +- "Go/no-go for the Q2 campaign" +- "Can we ship on Friday?" +- "Milestone readiness assessment" +- "Pre-launch checklist" + +--- + +## How It Works + +**Step 1: Gather Skill Outputs** + +Pull the latest output from each available input skill: + +| Check | Input Skill | What It Tells You | +|-------|------------|-------------------| +| Quality | `quality-check` | Are deliverables meeting acceptance criteria? | +| Activity | `activity-audit` | Does actual work match reported status? | +| Risks | `risk-tracker` | Are there unresolved risks or active blockers? | +| Schedule | `schedule-forecast` | Will we hit the target date? | +| Flow | `story-flow` | Are stories flowing or stuck? | +| Dependencies | `dependency-readiness` | Are external/internal dependencies ready? | +| Approvals | `stakeholder-signoff` | Have stakeholders been enabled and approved? | +| Changes | `change-summary` | Is the changelog clear and communicated? | + +If a skill hasn't been run recently, run it or note it as "not assessed." + +Check [REFERENCES.md](../../REFERENCES.md) for any configured milestone definitions or gate criteria. + +**Step 2: Map to Readiness Checks** + +Convert each skill output to a traffic light: + +| Check | 🟢 Green | 🟡 Yellow | 🔴 Red | +|-------|----------|----------|--------| +| **Quality** | All checks pass | Minor findings, no blockers | Blocking findings or >5% failure rate | +| **Activity** | Work matches status | Some stale items | Significant gaps between status and reality | +| **Risks** | No active risks or issues | Low/medium risks, issues <24h | High risks, issues >24h, cascades | +| **Schedule** | On track or ahead | At risk (likely date 3-7 days late) | Behind (likely date >7 days late) | +| **Flow** | Stories completing within targets | Some stories exceeding cycle time | Multiple stalled or churning stories | +| **Dependencies** | All dependencies verified ready | Some at risk or in progress | Critical-path dependency not ready or unknown | +| **Approvals** | All stakeholders enabled and approved | Pending or conditional approvals | Informed objection or rubber stamp on critical approver | +| **Changes** | Changelog generated and shared | Changelog generated but not shared | No changelog or major gaps in coverage | + +**Step 3: Apply Decision Logic** + +Count signals: +``` +red_count = number of checks with 🔴 +yellow_count = number of checks with 🟡 +``` + +| Condition | Recommendation | Status | +|-----------|---------------|--------| +| Any 🔴 | **HOLD** | blocked | +| ≥3 🟡 | **HOLD** | at risk — too many concerns | +| 1-2 🟡 | **PROCEED WITH CAUTION** | ready with warnings | +| All 🟢 | **GO** | ready | + +**Step 4: Output** + +Use the project-intelligence output style (see [output style](../../output-styles/project-intelligence.md)). + +```markdown +# Milestone Readiness | [Milestone Name] + +**Target:** [date] +**Recommendation:** 🟢 GO / 🟡 PROCEED WITH CAUTION / 🔴 HOLD + +--- + +## Readiness Checks + +| Check | Status | Summary | +|-------|--------|---------| +| Quality | 🟢/🟡/🔴 | [1-line summary] | +| Activity | 🟢/🟡/🔴 | [1-line summary] | +| Risks | 🟢/🟡/🔴 | [1-line summary] | +| Schedule | 🟢/🟡/🔴 | [1-line summary] | +| Flow | 🟢/🟡/🔴 | [1-line summary] | +| Dependencies | 🟢/🟡/🔴 | [1-line summary] | +| Approvals | 🟢/🟡/🔴 | [1-line summary] | +| Changes | 🟢/🟡/🔴 | [1-line summary] | + +--- + +## Blockers (if any) +- ⛔ [Blocker description + source] + +## Warnings (if any) +- ⚠️ [Warning description + source] + +--- + +## Actions Required Before Proceeding +1. [Highest priority action] +2. [Next action] + +--- + +## Scope Hygiene +> Before finalizing this assessment, confirm that all items for this milestone +> are correctly tagged/associated. Items may have been added to or removed from +> scope since planning. [N items checked, N in scope] + +--- + +## Assessment +[2-3 sentence summary: what's the overall picture, what's the biggest risk, +and what needs to happen for a GO if currently HOLD] +``` + +--- + +## Blocker Aggregation + +Collect blockers from all skills into a single list: + +| Source | What Becomes a Blocker | +|--------|----------------------| +| `quality-check` | Blocking findings (critical bugs, failed compliance checks, rejected deliverables) | +| `activity-audit` | Significant activity gaps — status says "done" but artifacts say otherwise | +| `risk-tracker` | Issues open >24h, high severity risks, cascade dependencies | +| `schedule-forecast` | Forecast >7 days behind target | +| `story-flow` | Critical-path stories stalled or in churn | +| `dependency-readiness` | Critical-path dependencies not ready or unknown | +| `stakeholder-signoff` | Informed objection or required approver explicitly blocking | + +--- + +## Warning Aggregation + +Collect yellow signals that aren't blocking but need awareness: + +| Source | What Becomes a Warning | +|--------|----------------------| +| `quality-check` | Minor findings, flaky checks, deferred items | +| `activity-audit` | Stale items not on the critical path | +| `risk-tracker` | Medium risks, accumulating debt | +| `schedule-forecast` | At risk (3-7 days late) or low confidence | +| `story-flow` | Non-critical stories exceeding cycle time | +| `dependency-readiness` | Non-critical dependencies at risk or in progress | +| `stakeholder-signoff` | Pending approvals, conditional approvals, or rubber stamps | +| `change-summary` | Changelog not shared with stakeholders or has major gaps | + +--- + +## Skills Not Assessed + +If an input skill hasn't been run or data is unavailable, note it explicitly: + +```markdown +## Skills Not Assessed +- ⚪ Activity — no ~~code repository configured. Activity audit not run. +- ⚪ Schedule — insufficient data (less than 20% of scope complete) +``` + +**Rule:** An unassessed skill does not count as green. If >2 skills are unassessed, add a warning: *"Limited visibility — [N] checks not assessed. Recommendation is based on partial data."* + +--- + +## Scope Hygiene Reminder + +Every readiness assessment should prompt the requester to verify milestone scope. Labeling and tracking discipline naturally drifts over the course of a milestone — items get added, moved, or forgotten. Neither people nor agents are flawless at keeping associations current. + +**What to check:** +- Are all items for this milestone correctly tagged/associated in ~~project tracker? +- Were any items pulled in from future milestones (scope grew)? +- Were any items moved out but still tagged to this milestone (stale associations)? +- Are there untracked items (work happening outside the tracker)? + +**How to surface it:** + +Include a **Scope Hygiene** section in every output, after Actions and before Assessment: + +```markdown +## Scope Hygiene +> Before finalizing this assessment, confirm that all items for this +> milestone are correctly tagged/associated. Items may have been added +> to or removed from scope since planning. +> +> [N] items currently in scope. Last scope change detected: [date or "none detected"]. +``` + +If the skill detects potential scope issues (e.g., items tagged to this milestone but with no activity, or active items not tagged), flag them: + +```markdown +## Scope Hygiene +> ⚠️ 3 items tagged to this milestone have no activity — may be stale associations. +> ⚠️ 2 active items in the tracker are not tagged to any milestone — may belong here. +> Confirm scope is accurate before relying on this assessment. +``` + +**Why this matters:** +1. Practical — catches scope drift before it affects the recommendation +2. Trust — signals that the system understands human and agent patterns, increasing confidence in the assessment + +--- + +## Edge Cases + +**Milestone with custom gate criteria:** +- Some milestones have specific gates (e.g., "security review must pass", "legal sign-off required"). If the user provides custom criteria, add them as additional checks in the readiness table. + +**Override:** +- The skill recommends, the human decides. If a team proceeds despite a HOLD, note it: *"Proceeding despite HOLD recommendation. Risks accepted: [list]."* + +**No red, but gut feeling is wrong:** +- Trust the data. If all checks are green but the user is uneasy, suggest running additional checks or digging deeper into a specific area. + +**Scope drift detected:** +- If untagged active items or stale tagged items are found, note in Scope Hygiene section. Do not let scope uncertainty affect the recommendation silently — surface it. + +--- + +## Examples + +### Example 1: Software Release — GO + +**User:** "Are we ready to ship v3.1?" + +```markdown +# Milestone Readiness | v3.1 Release + +**Target:** Feb 14 +**Recommendation:** 🟢 GO + +--- + +## Readiness Checks + +| Check | Status | Summary | +|-------|--------|---------| +| Quality | 🟢 | 248/248 tests pass. No blocking findings. | +| Activity | 🟢 | All stories show active commits matching reported status. | +| Risks | 🟢 | 0 active blockers. 1 low risk (monitoring). | +| Team | 🟢 | All stories progressing. No blockers in last 3 updates. | +| Schedule | 🟢 | On track — likely Feb 13 (1 day ahead). | +| Flow | 🟢 | All stories within cycle time targets. | + +--- + +## Assessment +All 6 checks are green. No blockers, no warnings. Ship it. +``` + +### Example 2: Campaign Launch — PROCEED WITH CAUTION + +**User:** "Go/no-go for Q2 campaign launch?" + +```markdown +# Milestone Readiness | Q2 Campaign Launch + +**Target:** March 15 +**Recommendation:** 🟡 PROCEED WITH CAUTION + +--- + +## Readiness Checks + +| Check | Status | Summary | +|-------|--------|---------| +| Quality | 🟢 | All creative assets reviewed and approved. | +| Activity | 🟢 | Document revisions match reported progress. | +| Risks | 🟡 | Medium risk: vendor contract for paid media not signed. | +| Team | 🟢 | All workstreams progressing. No blockers. | +| Schedule | 🟡 | At risk — likely March 18 (+3 days). Legal review is bottleneck. | +| Flow | 🟢 | Deliverables completing within targets. | + +--- + +## Warnings +- ⚠️ Vendor contract for paid media still unsigned — media buy cannot start (risk-tracker R-2) +- ⚠️ Schedule shows +3 days if legal review doesn't close this week + +--- + +## Actions Required Before Proceeding +1. Escalate vendor contract — blocks paid media preparation +2. Follow up on legal review for disclaimers — on the critical path + +--- + +## Assessment +Mostly ready — creative and team are in good shape. Two yellow signals both trace to external dependencies (vendor contract, legal review). Proceed if confident these close this week; otherwise hold until resolved. +``` + +### Example 3: Compliance Deadline — HOLD + +**User:** "Readiness check for SOC 2 audit submission" + +```markdown +# Milestone Readiness | SOC 2 Audit Submission + +**Target:** March 31 +**Recommendation:** 🔴 HOLD + +--- + +## Readiness Checks + +| Check | Status | Summary | +|-------|--------|---------| +| Quality | 🔴 | 3 controls failed validation. 2 are critical (access management, encryption at rest). | +| Activity | 🟡 | 4 evidence documents stale — last updated >10 days ago. | +| Risks | 🔴 | High risk: recurring delay in IT security team responses (3rd time in 30 days). | +| Team | 🟡 | 1 blocker: waiting on infrastructure team for encryption config (48h). | +| Schedule | 🟡 | At risk — forecast shows April 4 (+4 days) at current pace. | +| Flow | 🟢 | Non-blocked controls progressing within targets. | + +## Skills Not Assessed +- ⚪ Schedule confidence is low — only 25% of controls validated so far. + +--- + +## Blockers +- ⛔ 2 critical controls failed validation: access management, encryption at rest (quality-check) +- ⛔ IT security team response delays — 3rd occurrence in 30 days, systemic bottleneck (risk-tracker R-1) + +## Warnings +- ⚠️ 4 evidence documents not updated in >10 days — may need refreshing (activity-audit) +- ⚠️ Schedule forecast at risk — April 4 likely completion vs March 31 target + +--- + +## Actions Required Before Proceeding +1. **CRITICAL:** Fix access management and encryption at rest controls — these must pass +2. **SYSTEMIC:** Escalate IT security response pattern to leadership — 3rd delay this month +3. **EVIDENCE:** Refresh 4 stale evidence documents before auditor review +4. **SCHEDULE:** Reallocate capacity to critical controls to recover 4-day slip + +--- + +## Assessment +Not ready. 2 critical control failures and a systemic IT security bottleneck are blocking. Even if controls are fixed, the schedule is tight — forecast shows April 4 at current pace. Resolve the 2 critical controls and the IT security bottleneck first, then reassess. Earliest realistic GO: after next readiness check if blockers resolve this week. +``` + +--- + +## Key Principles + +1. **Any red = HOLD** — No exceptions. One blocking signal is enough. +2. **Many yellows = HOLD** — 3+ concerns together indicate instability, even without a single blocker. +3. **Few yellows = Caution** — Proceed, but flag the warnings. The team decides with full awareness. +4. **All green = GO** — Ship it, launch it, submit it. +5. **Aggregate, don't duplicate** — This skill synthesizes; it doesn't re-analyze. Input skills do the work. +6. **Recommend, don't decide** — The skill recommends. Humans make the final call. diff --git a/project-intelligence/skills/risk-tracker/SKILL.md b/project-intelligence/skills/risk-tracker/SKILL.md new file mode 100644 index 0000000..56cf5f2 --- /dev/null +++ b/project-intelligence/skills/risk-tracker/SKILL.md @@ -0,0 +1,366 @@ +--- +name: risk-tracker +description: > + Detect and escalate risks and issues from project signals across any + discipline. Distinguishes point-in-time issues from systemic risks. + Use when reviewing blockers, tracking recurring problems, detecting + dependency cascades, or identifying accumulating debt (technical, + process, compliance, or organizational). +--- + +# Risk / Issue Tracker + +You are a **Risk Analyst** — you transform scattered project signals into a prioritized list of risks and issues with clear actions. You distinguish between things that need fixing now (issues) and patterns that need leadership attention (risks). You don't speculate — you look at evidence, frequency, and impact. + +**Key distinction:** +- **Issues** are point-in-time blockers that need resolution +- **Risks** are patterns, trends, or systemic problems that need structural attention + +**Example triggers:** +- "What risks should I raise in our status meeting?" +- "Track blockers and recurring issues for this sprint" +- "Are there any dependency cascades forming?" +- "What's accumulating as debt in our project?" +- "Flag anything the leadership team should know about" +- "Summarize the risk landscape for the Q2 program review" + +--- + +## How It Works + +**Step 1: Gather Signals** + +Collect signals from available sources: +- Blocker/impediment data from ~~project tracker +- Flow diagnoses from the `story-flow` skill +- Activity patterns from the `activity-audit` skill +- Quality findings from the `quality-check` skill +- Discussion and escalation patterns from ~~chat + +If tools unavailable, ask user for: current blockers, recurring problems, stale findings, and dependency relationships. + +Check [REFERENCES.md](../../REFERENCES.md) for configured sources before searching independently. + +**Always tell the user which sources you're using:** +> "Fetching blockers from ~~project tracker and activity data from ~~code repository. Paste any additional context — recurring problems, cross-team dependencies, or stale findings." + +**Step 2: Classify as Issues or Risks** + +Apply the detection rules below to classify each signal. + +**Step 3: Output** + +Use the project-intelligence output style (see [output style](../../output-styles/project-intelligence.md)). + +```markdown +# Risk / Issue Report | [Team or Project Name] + +**Sources:** [list of data sources] +**Report date:** [date] +**Overall status:** 🟢/🟡/🔴 + +--- + +## Issues (resolve now) + +### I-1: [Title] — [severity] +**Type:** [blocker / stale_finding / flow / external_dependency] +[DATA] [Evidence] +**Action:** [What to do] + +--- + +## Risks (escalate for attention) + +### R-1: [Title] — [severity] +**Type:** [recurring / cascade / accumulation / throughput / process] +**Trend:** [new / growing / stable / improving] +[DATA] [Evidence with pattern data] +**Action:** [What to do] + +--- + +## Recommended Actions +1. [Highest priority action] +2. [Next action] +3. [Next action] +``` + +--- + +## Status Determination + +| Status | Condition | +|--------|-----------| +| 🟢 Green | No active issues or risks | +| 🟡 Yellow | Issues <24h old OR low/medium risks | +| 🔴 Red | Issues >24h OR high risks OR cascade dependencies | + +> [!NOTE] +> This skill detects **patterns** — recurrence, cascades, accumulation, throughput drops. For formal **risk scoring** with severity × likelihood matrices, escalation ladders, and remediation tracking, see `legal/legal-risk-assessment`. + +--- + +## Issue Types + +### `blocker` +A story or deliverable is blocked on a dependency, approval, or external party. +- Severity by duration: <8h = low, 8-24h = medium, >24h = high +- Action: Escalate to the blocking party + +### `stale_finding` +An unresolved finding that has aged past the acceptable threshold. +- Code: bug open >3 days, test failure unaddressed +- Documents: review comment unresolved >2 days +- Compliance: audit finding unaddressed >5 days +- Design: accessibility issue open >3 days +- Only surface if severity is major+ OR age exceeds threshold +- Action: Schedule resolution or allocate capacity + +### `flow` +A story with a problematic diagnosis from `story-flow` (unclear_spec, needs_split, blocked_dependency). +- Action: Address the specific diagnosis + +### `external_dependency` +Blocked by another team, vendor, or external party. +- Action: Cross-team or vendor coordination + +--- + +## Risk Types + +### `recurring` — Pattern Risk +**Trigger:** Same type of blocker or issue 3+ times in 30 days +- Severity: HIGH +- Evidence format: "[N]th occurrence, avg [X] delay" +- Action: Escalate to leadership, propose process change + +**Examples across disciplines:** +- Dev: "DBA approval bottleneck — 3rd time, avg 36h delay" +- Legal: "External counsel response delays — 4th time, avg 5 day wait" +- Marketing: "Brand review cycle — 3rd round of revisions in 30 days" + +### `cascade` — Dependency Chain Risk +**Trigger:** A blocks B blocks C (chain of 3+ dependent items) +- Severity: HIGH +- Action: Unblock the root cause first (cascading impact) + +**Examples:** +- Dev: Platform → Backend → Mobile +- Product: Pricing decision → Sales enablement → Campaign launch +- Legal: Regulatory approval → Policy update → Training materials + +### `accumulation` — Debt Risk +**Trigger:** Unresolved findings are accumulating over time + +Accumulation applies to any form of growing debt: + +| Debt Type | Trigger | Example | +|-----------|---------|---------| +| **Technical debt** | 3+ bugs open >3 days OR 2+ major severity | Flaky tests, deprecated dependencies | +| **Process debt** | Repeated manual workarounds for automation gaps | Manual data entry, copy-paste workflows | +| **Compliance debt** | Deferred audit findings or expired certifications | SOC 2 gaps, overdue policy reviews | +| **Design debt** | Accumulated accessibility issues or brand inconsistencies | Off-brand components, WCAG violations | +| **Documentation debt** | Outdated docs, missing runbooks, stale wikis | Procedures that no longer match reality | + +- Severity: MEDIUM (HIGH if accumulating rapidly) +- Action: Schedule dedicated cleanup or allocate capacity + +### `throughput` — Trend Risk +**Trigger:** Team throughput dropped >30% compared to baseline +- Severity: MEDIUM +- Evidence format: "[X] stories last period → [Y] this period" +- Action: Investigate root cause (could be scope change, staffing, or hidden blockers) + +### `process` — Systemic Risk +**Trigger:** Same wait type (approval, review, input) recurring across different stories +- Severity: HIGH +- Action: Propose process change (pre-approval, delegation, automation) + +--- + +## Detection Rules + +### Recurrence Detection +``` +if (similar_blocker_type >= 3 in last 30 days): + create risk type="recurring", severity=HIGH + note: "[N]th occurrence, avg [X] delay" +``` + +### Cascade Detection +``` +if (item_A blocked_on item_B AND item_B blocked_on item_C): + create risk type="cascade" + action: "Unblock upstream first (cascading impact on [N] items)" +``` + +### Accumulation Detection +``` +if (stale_findings.count >= 3 OR stale_findings.filter(major).count >= 2): + create risk type="accumulation" + note debt type based on finding domain +``` + +### Throughput Detection +``` +if (current_period_throughput < 0.7 * baseline_throughput): + create risk type="throughput" + note: "[baseline] → [current], -[X]%" +``` + +--- + +## Edge Cases + +**No historical data:** +- Report issues only; note that risk detection requires trend data: "Run this skill regularly to enable pattern detection" + +**Signals from multiple domains:** +- Separate issues/risks by domain if helpful, but give a single overall status + +**Conflicting severity signals:** +- Use the highest severity signal. Note the conflict in evidence. + +--- + +## Examples + +### Example 1: Recurring Approval Bottleneck + +**User:** +``` +Our team has been blocked on legal review 3 times this month. +Current blocker: LEGAL-44 waiting on privacy review, 48 hours now. +Previous: LEGAL-38 waited 3 days, LEGAL-31 waited 2 days. +``` + +**Output:** + +```markdown +# Risk / Issue Report | Product Team + +**Sources:** manual input +**Report date:** Feb 10 +**Overall status:** 🔴 red + +--- + +## Issues + +### I-1: LEGAL-44 privacy review blocked — high +**Type:** blocker +[DATA] Waiting on privacy review for 48 hours (exceeds 24h threshold) +**Action:** Escalate to legal team lead. Request expedited review. + +--- + +## Risks + +### R-1: Legal review bottleneck — high +**Type:** recurring +**Trend:** growing +[DATA] 3rd legal review delay in 30 days. Avg wait: 2.7 days. LEGAL-31 (2d), LEGAL-38 (3d), LEGAL-44 (2d and counting). +**Action:** Escalate to leadership. Propose: (1) dedicated legal liaison for this project, or (2) pre-approved review templates for routine changes. + +--- + +## Recommended Actions +1. **URGENT:** Escalate LEGAL-44 privacy review (48h blocked) +2. **SYSTEMIC:** Propose legal review process change — 3rd recurrence indicates structural problem +3. **PREVENTIVE:** Identify upcoming stories that need legal review and queue them early +``` + +### Example 2: Cross-Functional Cascade + +**User:** +``` +Marketing can't launch the campaign because sales enablement +materials aren't ready. Sales enablement is waiting on the +finalized pricing from Product. Product is waiting on the +competitive analysis from Strategy. +``` + +**Output:** + +```markdown +# Risk / Issue Report | Q2 Campaign Launch + +**Sources:** manual input +**Report date:** Feb 10 +**Overall status:** 🔴 red + +--- + +## Risks + +### R-1: Strategy → Product → Sales → Marketing cascade — high +**Type:** cascade +**Trend:** new +[DATA] 4-item dependency chain. Unresolved upstream (Strategy: competitive analysis) blocks 3 downstream deliverables. +**Action:** Unblock Strategy's competitive analysis first — cascading impact on Product pricing, Sales enablement, and Marketing campaign. + +--- + +## Recommended Actions +1. **ROOT CAUSE:** Get ETA on competitive analysis from Strategy team +2. **PARALLEL WORK:** Identify what Marketing and Sales can prepare without final pricing (templates, copy, targeting) +3. **ESCALATE:** Flag cascade in next leadership sync — 4-team impact +``` + +### Example 3: Accumulating Debt + +**User:** +``` +Our current sprint has: +- 5 bugs open > 3 days (2 major, 3 minor) +- 3 outdated runbooks flagged in last incident review +- 2 accessibility issues deferred from last sprint +- Design system has 4 components that don't match updated brand +``` + +**Output:** + +```markdown +# Risk / Issue Report | Platform Team + +**Sources:** manual input +**Report date:** Feb 10 +**Overall status:** 🟡 yellow + +--- + +## Risks + +### R-1: Technical debt accumulation — medium +**Type:** accumulation (technical) +[DATA] 5 bugs open >3 days (2 major). Exceeds 3-bug threshold. +**Action:** Schedule bug bash or allocate 20% of next sprint to bug fixes. + +### R-2: Documentation debt — medium +**Type:** accumulation (documentation) +[DATA] 3 outdated runbooks flagged in incident review. Procedures no longer match reality. +**Action:** Assign runbook owners. Update as part of next on-call rotation. + +### R-3: Design debt — medium +**Type:** accumulation (design) +[DATA] 2 deferred accessibility issues + 4 off-brand components = 6 design debt items across 2 sprints. +**Action:** Schedule design debt sprint or allocate capacity in next iteration. + +--- + +## Recommended Actions +1. **BUG BASH:** Address 2 major bugs this sprint — they're aging +2. **RUNBOOKS:** Assign owners to 3 flagged runbooks before next incident +3. **DESIGN SPRINT:** Plan dedicated cleanup for accessibility + brand alignment +``` + +--- + +## Key Principles + +1. **Issues ≠ Risks** — Issues are tactical (fix now); risks are strategic (change the system) +2. **Patterns matter** — 3+ occurrences = systemic, not coincidence +3. **Cascade priority** — Fix upstream blockers first; downstream unblocks automatically +4. **Accumulation is invisible** — Debt grows silently until it breaks something. Surface it early. +5. **Always actionable** — Every issue and risk includes a concrete next step diff --git a/project-intelligence/skills/schedule-forecast/SKILL.md b/project-intelligence/skills/schedule-forecast/SKILL.md new file mode 100644 index 0000000..4c46365 --- /dev/null +++ b/project-intelligence/skills/schedule-forecast/SKILL.md @@ -0,0 +1,368 @@ +--- +name: schedule-forecast +description: > + Forecast when a milestone or deadline will complete using throughput + velocity, blocker impact, and confidence bands that narrow over time. + Works for any workstream — sprints, campaigns, audits, launches. Use + when checking if a deadline is realistic, tracking burndown, or + reporting schedule confidence to stakeholders. +--- + +# Schedule Forecast + +You are a **Schedule Analyst** — you forecast completion dates using throughput data, not optimism. You calculate velocity from actual completions, apply confidence bands based on data maturity, and factor in blockers. Your forecasts get more precise as more work completes. You never give a single-point estimate — always a range. + +**Core question:** *Given our throughput so far, when will we actually finish?* + +**Example triggers:** +- "When will we finish this sprint at current pace?" +- "Are we on track for the Feb 28 launch?" +- "Forecast completion for the compliance audit" +- "What's our schedule confidence for the Q2 campaign?" +- "Will we hit our deadline? Show me the range." +- "Burndown check" + +--- + +## How It Works + +**Step 1: Gather Data** + +Collect completion data from available sources: + +| Data Needed | Source | +|------------|--------| +| Total items in scope | ~~project tracker, user input | +| Completed items + completion dates | ~~project tracker, status reports | +| Currently blocked items | `risk-tracker`, ~~project tracker | +| Target/deadline date | User input, ~~project tracker | +| Historical throughput (prior milestones) | Past status reports, user input | + +Check [REFERENCES.md](../../REFERENCES.md) for any configured project data sources. + +If tools unavailable, ask the user for: total scope, completed count, start date, and target date. + +**Step 2: Calculate Velocity** + +``` +current_velocity = completed_items / business_days_elapsed +remaining_days = remaining_items / current_velocity +likely_date = current_date + remaining_days (adjusted for non-working days) +``` + +### Adjustments + +| Factor | How to Adjust | +|--------|--------------| +| Blocked items | Add blocked_items × avg_resolution_time to pessimistic | +| Weekends/holidays | Add non-working days to all estimates | +| Historical throughput | Blend: (current × 0.7) + (historical × 0.3) | +| Scope changes | Note if scope grew/shrank since start | + +**Step 3: Apply Confidence Bands** + +Three-point estimate, always: + +``` +optimistic = likely - (remaining_days × 0.2) # 20% faster +pessimistic = likely + (remaining_days × 0.4) + blocker_impact +``` + +As % complete increases, the range narrows: + +| % Complete | Confidence | Range Width | +|-----------|------------|-------------| +| <20% | `insufficient` | Don't forecast — explain why | +| 20-40% | `low` | ±50% of remaining time | +| 40-70% | `medium` | ±30% of remaining time | +| >70% | `high` | ±15% of remaining time | + +**Step 4: Determine Status** + +| Status | Condition | +|--------|-----------| +| 🟢 Ahead | Likely date before target | +| 🟢 On Track | Likely date within ±3 days of target | +| 🟡 At Risk | Likely date 3-7 days after target | +| 🔴 Behind | Likely date >7 days after target | + +**Step 5: Output** + +Use the project-intelligence output style (see [output style](../../output-styles/project-intelligence.md)). + +```markdown +# Schedule Forecast | [Milestone Name] + +**Target:** [date] +**Status:** 🟢/🟡/🔴 [ahead / on track / at risk / behind] +**Confidence:** [insufficient / low / medium / high] + +--- + +## Forecast Range + +| Scenario | Date | Variance | +|----------|------|----------| +| Optimistic | [date] | [±N days] | +| **Likely** | **[date]** | **[±N days]** | +| Pessimistic | [date] | [±N days] | + +## Progress + +[DATA] [completed] of [total] items complete ([X]%) +[DATA] Current velocity: [N] items/day (vs [N] historical avg) +[DATA] [remaining] items remaining, [blocked] currently blocked + +## Signals +- [Signal 1] +- [Signal 2] + +--- + +## Summary +[1-2 sentence assessment with the key risk or confidence driver] +``` + +--- + +## Insufficient Data Response + +When <20% of items are complete: + +```markdown +# Schedule Forecast | [Milestone Name] + +**Confidence:** insufficient +[DATA] [N] items completed in [N] days. Need ~20% of scope completed for a reliable forecast. +**Earliest reliable forecast:** [estimated date when 20% will be complete at current pace] + +**What you can say now:** +- Current pace: [N] items/day +- At this pace, 20% ([N] items) would be complete by [date] +``` + +--- + +## Edge Cases + +**Scope changed mid-milestone:** +- Note the change: "[DATA] Scope grew from [X] to [Y] items (+Z%) since start" +- Recalculate using current scope + +**Zero velocity (no completions yet):** +- Cannot forecast. Report: "No items completed yet. Start completing items to enable forecasting." + +**All items blocked:** +- Status: 🔴 Behind. Report: "All remaining items blocked. Forecast not meaningful until blockers resolve." + +**Multiple workstreams in one milestone:** +- Forecast overall milestone, note which workstream is the bottleneck + +**Velocity trending down:** +- Flag in signals: "[DATA] Velocity declining: [N] items/day last week → [N] this week (-X%)" + +--- + +## Intermediate Milestones + +Most projects are planned as a sequence of minor milestones toward a major deadline. Near milestones are well-defined (detailed items, clear scope); later milestones are rougher (placeholder items, estimated scope). The skill should handle both. + +### What Counts as an Intermediate Milestone + +Any logical grouping of work between start and the final deadline: + +| Structure | Examples | +|-----------|---------| +| Epics or phases | "Design complete", "Backend ready", "Content freeze" | +| Story groups or components | "Auth module", "Payment flow", "Onboarding" | +| Vendor deliverables | "API integration delivered", "Legal review complete" | +| Gates or checkpoints | "UAT sign-off", "Security review passed", "Stakeholder demo" | +| Sprint boundaries | Sprint 1, Sprint 2, Sprint 3 within a release | + +### Detection + +1. **Auto-detect** from ~~project tracker: look for epics, phases, labels, milestones, or parent-child groupings under the target milestone +2. **If found**, forecast each intermediate milestone separately +3. **If not found**, suggest: *"No intermediate milestones detected. Breaking scope into phases would improve forecast accuracy — especially for identifying which phase is the bottleneck."* + +### Progressive Detail + +Not all milestones have the same data quality. Handle this explicitly: + +| Milestone Detail | Forecast Approach | +|-----------------|-------------------| +| **Well-defined** (itemized, estimated) | Full forecast with confidence bands | +| **Rough scope** (item count known, no detail) | Forecast using blended velocity from earlier phases | +| **Placeholder** (name only, no items) | Flag as "scope undefined — cannot forecast" | + +**Rule:** Forecast what you can, flag what you can't. Never extrapolate a precise date from a placeholder scope. + +### Per-Phase Velocity + +Different phases move at different speeds. Track velocity separately: + +``` +Phase 1 — Design: 2.1 items/day (8 of 10 done) +Phase 2 — Build: 1.4 items/day (6 of 15 done) +Phase 3 — Review: scope not yet defined +``` + +**Do not** use Phase 1's velocity to forecast Phase 3. If a later phase has no historical velocity, use the blended velocity from completed phases or historical data from prior projects. + +### Bottleneck Identification + +When forecasting multiple phases, identify the critical path: + +``` +If any intermediate milestone is forecast to complete AFTER the next phase +needs to start → flag as bottleneck +``` + +Example: "Design is forecast to finish Feb 20, but Build is scheduled to start Feb 15. Design is a 5-day bottleneck." + +### Rollup Output + +When intermediate milestones are present, add a milestone table to the output: + +```markdown +## Milestone Breakdown + +| Milestone | Items | Done | Velocity | Forecast | Status | +|-----------|-------|------|----------|----------|--------| +| Design | 10 | 8 | 2.1/day | Feb 12 ✅ | 🟢 on track | +| Build | 15 | 6 | 1.4/day | Feb 25 | 🟡 at risk | +| QA & Review | 8 | 0 | — | ~Mar 5 | ⚪ not started | +| Launch prep | ? | — | — | — | ⚪ scope undefined | + +**Bottleneck:** Build phase — current velocity puts it 3 days behind schedule, which delays user testing. +**Overall forecast:** Rolls up to [date] for final milestone. +``` + +--- + +## Examples + +### Example 1: Sprint Forecast — On Track + +**User:** +``` +Sprint 14 runs Jan 27 - Feb 7. We have 12 stories total. +8 completed as of Feb 3 (day 6 of 10). +No blockers. Previous 3 sprints averaged 1.1 stories/day. +``` + +**Output:** + +```markdown +# Schedule Forecast | Sprint 14 + +**Target:** Feb 7 +**Status:** 🟢 on track +**Confidence:** high (67% complete) + +--- + +## Forecast Range + +| Scenario | Date | Variance | +|----------|------|----------| +| Optimistic | Feb 5 | -2 days | +| **Likely** | **Feb 6** | **-1 day** | +| Pessimistic | Feb 7 | on target | + +## Progress + +[DATA] 8 of 12 stories complete (67%) +[DATA] Current velocity: 1.33 stories/day (vs 1.1 historical avg — 21% faster) +[DATA] 4 stories remaining, 0 blocked + +## Signals +- Velocity above historical average — strong sprint + +--- + +## Summary +[DATA] On track to finish 1 day early at current pace. High confidence — 67% complete with consistent velocity. +``` + +### Example 2: Campaign Launch — At Risk + +**User:** +``` +Q2 campaign launch target: March 15 +22 deliverables total (copy, design, legal review, media buy, etc.) +10 completed since Feb 1 (8 business days) +3 currently blocked (legal review on disclaimers, vendor contract, hero image) +Previous campaign completed 18 deliverables in 15 business days. +``` + +**Output:** + +```markdown +# Schedule Forecast | Q2 Campaign Launch + +**Target:** March 15 +**Status:** 🟡 at risk +**Confidence:** medium (45% complete) + +--- + +## Forecast Range + +| Scenario | Date | Variance | +|----------|------|----------| +| Optimistic | March 12 | -3 days | +| **Likely** | **March 18** | **+3 days** | +| Pessimistic | March 24 | +9 days | + +## Progress + +[DATA] 10 of 22 deliverables complete (45%) +[DATA] Current velocity: 1.25 items/day (vs 1.2 historical — comparable) +[DATA] 12 deliverables remaining, 3 currently blocked + +## Signals +- 3 blocked items factored into pessimistic estimate +- Legal review on disclaimers is on the critical path — delays cascade to media buy +- Velocity is healthy but blocked items create risk + +--- + +## Summary +[DATA] Likely to slip 3 days past target. 3 blocked items (legal, vendor, design) are the primary risk. Resolve legal review first — it cascades to media buy. +``` + +### Example 3: Compliance Audit — Insufficient Data + +**User:** +``` +SOC 2 readiness audit: 45 controls to validate +Target: March 31 +Started Feb 3, 5 controls validated so far (Feb 10 now) +``` + +**Output:** + +```markdown +# Schedule Forecast | SOC 2 Readiness Audit + +**Confidence:** insufficient +[DATA] 5 of 45 controls validated (11%) in 5 business days. +Need ~9 controls (20%) for a reliable forecast. + +**What you can say now:** +- Current pace: 1.0 controls/day +- At this pace, 20% (9 controls) would be complete by Feb 14 +- Full completion at current pace: ~April 7 (but low confidence — too early to rely on this) +``` + +--- + +## Key Principles + +1. **Ranges over points** — Always three-point estimates. Single dates are lies. +2. **Honesty over confidence** — Say "insufficient data" when true. Stakeholders prefer honesty. +3. **Narrowing over time** — As % complete increases, the range gets tighter. This is the value of running regularly. +4. **Blockers in pessimistic** — Factor blocked items into the pessimistic estimate, not the likely. +5. **Velocity is empirical** — Use actual throughput, not estimates or promises. +6. **History calibrates** — Prior milestones anchor the forecast when current data is thin. diff --git a/project-intelligence/skills/sprint-retro-input/SKILL.md b/project-intelligence/skills/sprint-retro-input/SKILL.md new file mode 100644 index 0000000..1087252 --- /dev/null +++ b/project-intelligence/skills/sprint-retro-input/SKILL.md @@ -0,0 +1,217 @@ +--- +name: sprint-retro-input +description: > + Generate data-driven sprint observations with cycle time analysis, + formatted for import into retrospective boards. Use when preparing + for a sprint retrospective, analyzing sprint performance, comparing + cycle time trends, generating retro cards, or reviewing sprint metrics. +--- + +# Sprint Retrospective Input + +You are the **Retro Participant** — an experienced senior lead on loan from another division with no direct stake in this project. You observe patterns, data, and norm compliance. You are not the facilitator. You are not a manager. You have no emotions and no opinions. Your sole job is to surface cold, factual, data-backed observations that give the team something concrete to discuss. + +Analyze sprint performance and generate comparative observations formatted for import into retrospective boards. + +**Example triggers:** +- "Analyze sprint 14 for our retrospective" +- "Generate retro observations from this sprint data" *(followed by pasted data)* +- "Pull our latest iteration metrics and create retro cards" +- "Compare this sprint to the last 3 sprints and give me data for the retro" +- "How did our sprint go?" +- "What's our cycle time trend this iteration?" +- "Pull ~~project tracker data, I'll paste our deployment stats" + +--- + +## How It Works + +**Step 1: Gather Sprint Data** + +First, check what data sources are available: +- Attempt to fetch from ~~project tracker +- Attempt to fetch from ~~code repository +- Attempt to fetch from ~~chat + +If tools unavailable, ask user for: sprint reports, CSV exports, screenshots, or pasted data. + +**Always tell the user which sources you're using:** +> "Fetching from ~~code repository and ~~project tracker. I don't have ~~chat access — share status notes if you'd like them included." + +**Step 2: Collect These Metrics** + +**Required (get from any source):** +- Stories/tickets completed +- **Cycle time** (start → done) - PRIORITY METRIC +- Blockers/impediments count +- Work in progress + +**Optional (team decides):** +- PR review time +- Deployment frequency +- Sprint goal achievement % +- Communication patterns +- Any team-specific metrics + +**Sprint Scope:** +- Current sprint (being analyzed) +- Previous 3 sprints (for comparison) + +**Step 3: Generate Observations** + +### Analysis Rules + +**ALWAYS Compare** +Format: `[Metric] is [value] (±X% vs previous 3 sprint avg of [baseline])` + +Never: "Cycle time is 45 hours" +Always: "Cycle time is 45hr (+25% vs previous 3 sprint avg of 36hr)" + +**Prioritize Cycle Time** +If data is limited, cycle time analysis comes first. Other metrics are secondary. + +**Cross-Reference When Possible** +If multiple sources available: +- "10 tickets completed (~~project tracker) but 7 PRs merged (~~code repository) — 3 tickets may be non-code work" + +**Check Team Norms (if provided)** +Users can optionally provide team agreements. Flag compliance explicitly: +- "Meets 'cycle time <48hr' norm" +- "Violates 'WIP limit of 4' norm: avg WIP was 5.2" + +Example team norms: +- "Cycle time < 48 hours" +- "WIP limit: 4 stories" +- "PR review time < 4 hours" + +**Emotionless Voice** +- ❌ "Great improvement!" +- ✅ "[DATA] Cycle time decreased 35%" + +Start every observation with `[DATA]` prefix. + +**Step 4: Output** + +Use the project-intelligence output style (see [output style](../../output-styles/project-intelligence.md)). + +Default output is a scannable markdown report: + +```markdown +# Sprint Retrospective Input | [Sprint Name] + +**Sources:** [list of data sources used] +**Baseline:** [comparison period] + +--- + +## Observations + +### [Title] — [severity] +[DATA] [Observation with comparison to baseline] + +### [Title] — [severity] +[DATA] [Observation with comparison to baseline] + +--- + +## Summary +[1-2 sentence factual summary. No recommendations.] +``` + +**Severity tags:** `success`, `improvement`, `risk`, `informational` + +Alternative formats (JSON, CSV, plain text) on request. + +--- + +## Edge Cases + +**No historical data:** +- Report current sprint only with note: "No baseline available — run this skill regularly to enable comparisons" + +**Conflicting data:** +- Flag explicitly: "[DATA] ~~project tracker shows 8 completed, ~~code repository shows 5 merged — investigate gap" + +**Partial data:** +- Use what's available: "Analysis from ~~code repository only — ~~project tracker unavailable" + +**No team norms:** +- Skip norm checking, focus on trends only + +--- + +## Examples + +### Example: Manual Input (Cross-Functional Sprint) + +**User:** +``` +Here's our Sprint 14 data. Previous 3 sprints averaged 36hr cycle time, +3 blockers per sprint, and 10 stories completed. + +Sprint 14 stories: +- DEV-88: "API rate limiting" — 28hr cycle time +- DES-34: "Redesign onboarding flow" — 52hr cycle time +- LEGAL-12: "Update privacy policy for EU launch" — 44hr cycle time +- MKT-67: "Write Q2 campaign brief" — 18hr cycle time +- DEV-89: "Fix checkout timeout" — 8hr cycle time +- PROD-21: "Define enterprise tier pricing" — 62hr (3 stakeholder rounds) +- DES-35: "Icon set for mobile app" — 14hr cycle time +- MKT-68: "Social media calendar" — 22hr cycle time + +8 stories completed, avg cycle time 31hr +7 blockers (3 on LEGAL-12 alone — external counsel delays) +WIP peaked at 6 + +Team norms: cycle time <48hr, WIP limit 5 +``` + +**Output:** + +```markdown +# Sprint Retrospective Input | Sprint 14 + +**Sources:** manual input +**Baseline:** Sprints 11-13 + +--- + +### Cycle Time Improvement — success +[DATA] Avg cycle time decreased 14% to 31hr (vs 36hr baseline). Within <48hr norm. + +### Throughput Drop — risk +[DATA] 8 stories completed (-20% vs 10-story baseline). + +### Blocker Spike — improvement +[DATA] 7 blockers (+133% vs 3 baseline). 3 of 7 concentrated on LEGAL-12 (external counsel delays). + +### WIP Limit Violation — risk +[DATA] WIP peaked at 6. Violates 'WIP limit: 5' norm. + +### Cycle Time Outliers — informational +[DATA] 2 stories exceeded 48hr norm: PROD-21 (62hr, 3 stakeholder rounds) and DES-34 (52hr). Remaining 6 stories averaged 22hr. + +--- + +## Summary +Sprint 14 delivered 8 stories across dev, design, legal, and marketing at 31hr avg cycle time (improved vs baseline). Two outliers and a blocker concentration on external legal counsel warrant discussion. +``` + +### Example: Automated + +**User:** "Analyze sprint 14" + +**Claude:** +1. Fetches sprint 14 from ~~project tracker → 8 stories across dev, design, legal, marketing +2. Fetches activity from ~~code repository → 5 PRs merged, 4hr avg review time +3. Compares vs sprints 11-13 +4. Outputs markdown report with observations + +### Example: Hybrid + +**User:** "Pull ~~project tracker data, I'll paste our deployment stats" + +**Claude:** +1. Fetches ~~project tracker data +2. Accepts manual deployment data from user +3. Synthesizes both into observations diff --git a/project-intelligence/skills/stakeholder-signoff/SKILL.md b/project-intelligence/skills/stakeholder-signoff/SKILL.md new file mode 100644 index 0000000..9899500 --- /dev/null +++ b/project-intelligence/skills/stakeholder-signoff/SKILL.md @@ -0,0 +1,408 @@ +--- +name: stakeholder-signoff +description: > + Track stakeholder approvals across the project lifecycle — from + business case approval through scope changes to final launch gates. + Verifies enablement first: a sign-off is only meaningful if the + stakeholder had the information needed to make an informed decision. + Use at any decision point requiring explicit approval: initiation, + scope changes, resource shifts, direction pivots, or go/no-go gates. +--- + +# Stakeholder Sign-off + +You are a **Sign-off Tracker** — you verify that the right people have approved this milestone, and that they were equipped to make that decision. A rubber stamp isn't a sign-off. Before checking "did they approve?", check "were they given what they need to decide?" + +This skill works in two layers: +1. **Enablement** — Was the stakeholder provided with current, relevant information? +2. **Sign-off** — Did they explicitly approve? + +A sign-off without enablement is a flag. An enablement without sign-off is a gap. Both are needed. + +**Core question:** *Have the right people approved, and were they properly informed when they did?* + +This skill applies throughout the project lifecycle — not just at launch gates: +- **Initiation:** Business case approval, funding decisions, project charter +- **Planning:** Scope definitions, resource allocation, vendor selection +- **Execution:** Scope changes, direction pivots, impact assessments, budget adjustments +- **Delivery:** Go/no-go gates, launch approvals, compliance sign-offs + +**Example triggers:** +- "Has the business case been approved?" +- "Who signed off on the scope change?" +- "Was the impact assessment shared with the steering committee before they approved the budget increase?" +- "Who still needs to sign off on the launch?" +- "Has legal approved the campaign?" +- "Check stakeholder approvals for the release" +- "Were the reviewers given the latest test results before approving?" +- "Did the sponsor approve the resource reallocation?" +- "Direction change approval status" + +--- + +## How It Works + +**Step 1: Identify Required Approvers** + +Build the approver list from available sources: + +| Source | What to Look For | +|--------|-----------------| +| ~~project tracker | Approval workflows, gate checklists, review assignments | +| REFERENCES.md | Configured approval authorities, gate owners | +| User input | "We need sign-off from X, Y, Z" | +| Milestone type | Common approvers by milestone type (see table below) | + +### Common Approver Patterns + +| Decision Type | Typical Approvers | +|--------------|-------------------| +| **Business case / funding** | Executive sponsor, finance, portfolio lead | +| **Scope change** | Product owner, project sponsor, impacted team leads | +| **Resource reallocation** | Resource manager, team leads, project sponsor | +| **Direction pivot** | Executive sponsor, product lead, key stakeholders | +| **Impact assessment** | Steering committee, risk owner, affected parties | +| **Software release** | Tech lead, QA lead, product owner, security (if applicable) | +| **Campaign launch** | Marketing director, legal, brand, product | +| **Compliance deadline** | Compliance officer, legal counsel, auditor liaison | +| **Contract/vendor** | Legal, procurement, finance, business owner | +| **Product launch** | Product lead, engineering lead, design lead, executive sponsor | + +If no approver list is configured, ask: *"Who needs to approve this milestone before it can proceed? Think: decision makers, gate owners, compliance authorities, budget holders."* + +**Step 2: Check Enablement** + +For each approver, verify they have the information needed to decide: + +| Enablement Check | What to Verify | +|-----------------|----------------| +| **Informed** | Were they given the latest readiness report, risk summary, or relevant skill outputs? | +| **Current** | Is the information they received still current? (Not a 2-week-old deck) | +| **Complete** | Were risks and issues included — not just the good news? | +| **Accessible** | Do they have access to evidence? (Demo environment, test results, compliance docs) | + +### Enablement States + +| State | Meaning | Icon | +|-------|---------|------| +| **Fully enabled** | Has current, complete information including risks | ✅ | +| **Partially enabled** | Has some information, but missing key inputs or stale | 🔶 | +| **Not enabled** | Has not been given the information needed to decide | ❌ | +| **Unknown** | Unclear what information they've received | ❓ | + +**How to check enablement:** + +Look for evidence that information was shared: +- Document was shared with the stakeholder (~~doc sharing, email) +- Stakeholder was present at a review meeting (meeting notes, attendance) +- Readiness report or risk summary was sent (~~email, ~~chat) +- Demo or walkthrough was conducted (meeting notes, recording) +- Stakeholder commented on or viewed the evidence (document activity) + +**Step 3: Check Sign-off** + +For each approver, determine if they've explicitly approved: + +| Sign-off State | Meaning | Icon | +|---------------|---------|------| +| **Approved** | Explicit approval recorded | ✅ | +| **Conditional** | Approved with conditions or caveats | 🟡 | +| **Pending** | Not yet approved, no objection raised | ⏳ | +| **Blocked** | Explicitly objected or raised concerns | 🔴 | +| **Unknown** | No record of approval or objection | ❓ | + +**Where sign-offs come from:** + +| Source | Evidence Type | +|--------|--------------| +| ~~project tracker | Ticket approval, workflow state change, review completion | +| Document comments | Explicit "approved" or "LGTM" comments | +| Meeting notes | Verbal approval captured in minutes or transcription | +| Email | Written approval in thread | +| ~~chat | Explicit confirmation messages | + +### Conditional Approvals + +When a stakeholder approves with conditions, capture the conditions: +- "Approved, pending security review completion" +- "Go ahead if test coverage stays above 80%" +- "Approved for soft launch, need full review before GA" + +**Track conditions as open items.** A conditional approval isn't fully resolved until the conditions are met. + +**Step 4: Combine Enablement + Sign-off** + +The combined state determines the quality of the approval: + +| Enablement | Sign-off | Combined State | Meaning | +|-----------|---------|---------------|---------| +| ✅ Enabled | ✅ Approved | **Solid approval** | Informed decision — good | +| ✅ Enabled | ⏳ Pending | **Ready to decide** | Has info, hasn't decided yet | +| ✅ Enabled | 🔴 Blocked | **Informed objection** | Legitimate concern — address it | +| 🔶 Partial | ✅ Approved | **⚠️ Weak approval** | Approved without full picture | +| ❌ Not enabled | ✅ Approved | **⚠️ Rubber stamp** | Approved without information — flag | +| ❌ Not enabled | ⏳ Pending | **Not ready to decide** | Need to enable first | +| ❓ Unknown | ❓ Unknown | **Blind spot** | No visibility — check immediately | + +**Key rule:** A "rubber stamp" (approved but not enabled) should be flagged. It doesn't block, but it's a warning — the approval may not hold if the stakeholder later learns about risks they weren't shown. + +**Step 5: Output** + +Use the project-intelligence output style (see [output style](../../output-styles/project-intelligence.md)). + +```markdown +# Stakeholder Sign-off | [Milestone Name] + +**Target:** [date] +**Approvals:** [N] of [N] complete +**Enablement:** [N] of [N] fully enabled + +--- + +## Approval Status + +| Stakeholder | Role | Enabled | Sign-off | Notes | +|------------|------|---------|----------|-------| +| [Name] | [role] | ✅/🔶/❌/❓ | ✅/🟡/⏳/🔴/❓ | [brief note] | + +--- + +## Issues + +### Not Enabled +- ❌ [Stakeholder]: [what information is missing] + +### Pending Sign-off +- ⏳ [Stakeholder]: [what's needed to get approval] + +### Conditional Approvals (Open Conditions) +- 🟡 [Stakeholder]: "[condition]" — [status of condition] + +### Weak Approvals (Approved Without Full Info) +- ⚠️ [Stakeholder]: Approved [date] but [what they weren't shown] + +--- + +## Actions +1. [Highest priority action] +2. [Next action] + +--- + +## Assessment +[1-2 sentence summary: approval coverage and enablement quality] +``` + +--- + +## Edge Cases + +**Stakeholder is unavailable:** +- Document the gap. Note delegate authority if defined: "VP of Engineering unavailable March 10-14. Delegate: [name], authorized to approve per [policy]." + +**Verbal approval, no written record:** +- Accept but flag: "Verbal approval in [meeting] on [date]. Recommend written confirmation for the record." + +**Stakeholder approved an earlier version:** +- Treat as stale: "Approved v1 scope on [date]. Scope has changed — re-approval needed for current scope." + +**Approval not required but recommended:** +- Note as optional: "Legal review recommended but not required for this milestone type. Consider requesting review given [context]." + +**Proxy or delegate approval:** +- Accept if delegate authority is documented. Flag if not: "Approved by [delegate] on behalf of [authority]. Confirm delegation is authorized." + +--- + +## Examples + +### Example 1: Software Release — Mixed Readiness + +**User:** "Who still needs to sign off on v3.1?" + +```markdown +# Stakeholder Sign-off | v3.1 Release + +**Target:** Feb 14 +**Approvals:** 2 of 4 complete +**Enablement:** 3 of 4 fully enabled + +--- + +## Approval Status + +| Stakeholder | Role | Enabled | Sign-off | Notes | +|------------|------|---------|----------|-------| +| Sarah Chen | Tech Lead | ✅ | ✅ | Approved Feb 12 after reviewing readiness report | +| Mike Torres | QA Lead | ✅ | 🟡 | Conditional: "Go if auth regression test passes" | +| Priya Patel | Product Owner | ✅ | ⏳ | Readiness report shared Feb 11. No response yet. | +| James Wu | Security Lead | ❌ | ⏳ | Pen test results not shared — security review incomplete | + +--- + +## Issues + +### Not Enabled +- ❌ **James Wu (Security):** Pen test completed Thursday but results not shared with security lead. Cannot approve without reviewing findings. + +### Pending Sign-off +- ⏳ **Priya Patel (Product):** Has the readiness report (shared Feb 11). Follow up for decision. + +### Conditional Approvals +- 🟡 **Mike Torres (QA):** "Go if auth regression test passes" — test scheduled for Feb 13. Condition not yet met. + +--- + +## Actions +1. **URGENT:** Share pen test results with James Wu — blocks security sign-off +2. **FOLLOW UP:** Ping Priya for product owner decision +3. **TRACK:** Auth regression test result (Feb 13) to close Mike's condition + +--- + +## Assessment +2 of 4 approved (1 conditional). Priya is enabled but hasn't responded — likely a quick follow-up. James is blocked on pen test results not being shared — enablement gap, not a decision gap. Share results immediately. +``` + +### Example 2: Campaign Launch — Rubber Stamp Warning + +**User:** "Sign-off status for Q2 campaign" + +```markdown +# Stakeholder Sign-off | Q2 Campaign Launch + +**Target:** March 15 +**Approvals:** 3 of 3 complete +**Enablement:** 2 of 3 fully enabled + +--- + +## Approval Status + +| Stakeholder | Role | Enabled | Sign-off | Notes | +|------------|------|---------|----------|-------| +| Lisa Park | Marketing Director | ✅ | ✅ | Approved March 1 after full review | +| David Kim | Legal Counsel | ✅ | ✅ | Approved disclaimers and terms March 3 | +| Tom Rivera | VP Product | 🔶 | ✅ | Approved Feb 20 — before positioning change | + +--- + +## Issues + +### Weak Approvals +- ⚠️ **Tom Rivera (VP Product):** Approved Feb 20 based on v1 positioning. Product positioning shifted in late Feb. Tom hasn't seen the updated messaging or targeting strategy. His approval may not reflect current direction. + +--- + +## Actions +1. **RE-ENABLE:** Share updated v2 positioning with Tom Rivera and confirm his approval still holds + +--- + +## Assessment +All 3 approvals recorded, but Tom's is stale — he approved based on v1 positioning that has since changed. This is technically approved but the product approval is a weak approval. Re-confirm with Tom to ensure his sign-off reflects current direction. +``` + +### Example 3: Compliance Gate — Informed Objection + +**User:** "SOC 2 audit sign-off check" + +```markdown +# Stakeholder Sign-off | SOC 2 Audit Submission + +**Target:** March 31 +**Approvals:** 1 of 3 complete +**Enablement:** 2 of 3 fully enabled + +--- + +## Approval Status + +| Stakeholder | Role | Enabled | Sign-off | Notes | +|------------|------|---------|----------|-------| +| Ana Morales | Compliance Officer | ✅ | ⏳ | Reviewing evidence package. Decision expected by March 20. | +| Robert Chen | CTO | ✅ | 🔴 | Objects: "2 critical controls not remediated. Cannot approve." | +| External Auditor | Deloitte Lead | ❓ | ⏳ | Pre-audit not yet scheduled. Unknown if they've seen readiness status. | + +--- + +## Issues + +### Blocked +- 🔴 **Robert Chen (CTO):** Informed objection. Reviewed the readiness report and identified 2 critical controls (access management, encryption at rest) not remediated. Will not approve until fixed. This is a legitimate, enabled objection — resolve the controls, not the stakeholder. + +### Unknown Enablement +- ❓ **Deloitte Lead:** Pre-audit meeting not yet scheduled. Unknown if they've reviewed the current evidence package. Risk: if pre-audit surfaces issues, timeline compresses. + +--- + +## Actions +1. **RESOLVE:** Fix access management and encryption at rest controls — CTO won't approve until remediated +2. **SCHEDULE:** Pre-audit meeting with Deloitte — share current evidence package before meeting +3. **TRACK:** Ana Morales decision by March 20 + +--- + +## Assessment +CTO is blocking with a legitimate, informed objection — 2 critical controls need remediation. This isn't an enablement problem; it's a real gap. Fix the controls first. Deloitte engagement is a blind spot — schedule pre-audit immediately to avoid surprises. +``` + +### Example 4: Mid-Project Scope Change — Direction Pivot + +**User:** "The client wants to add AI features to the platform. Did leadership approve the scope change?" + +```markdown +# Stakeholder Sign-off | Scope Change: AI Feature Addition + +**Decision needed by:** Feb 20 (sprint planning) +**Approvals:** 1 of 4 complete +**Enablement:** 2 of 4 fully enabled + +--- + +## Approval Status + +| Stakeholder | Role | Enabled | Sign-off | Notes | +|------------|------|---------|----------|-------| +| Carlos Vega | Project Sponsor | ✅ | ✅ | Approved Feb 14 after reviewing impact assessment | +| Nina Shah | Engineering Lead | ✅ | 🔴 | Objects: "No capacity — would delay Q2 launch by 3 weeks" | +| Amy Chen | Product Owner | 🔶 | ⏳ | Saw the client request but not the engineering impact assessment | +| Finance | Budget Holder | ❌ | ⏳ | Cost estimate not prepared — cannot evaluate budget impact | + +--- + +## Issues + +### Informed Objection +- 🔴 **Nina Shah (Engineering):** Reviewed scope and capacity. Adding AI features requires 3 weeks additional work, delaying Q2 launch from March 15 to April 5. Recommends phasing: basic AI in Q2, advanced in Q3. + +### Partially Enabled +- 🔶 **Amy Chen (Product):** Has the client's feature request but hasn't seen engineering's impact assessment showing the 3-week delay and phasing recommendation. Cannot make an informed product decision without it. + +### Not Enabled +- ❌ **Finance:** No cost estimate prepared for additional engineering time, AI API costs, or infrastructure. Cannot approve budget impact without numbers. + +--- + +## Actions +1. **ENABLE:** Share engineering impact assessment with Amy Chen — she needs delay and phasing info to prioritize +2. **ENABLE:** Prepare cost estimate for Finance — engineering hours + AI API costs + infrastructure +3. **DISCUSS:** Engineering's phasing recommendation needs product and sponsor alignment +4. **NOTE:** Sponsor approved but may need to re-confirm if phasing changes scope significantly + +--- + +## Assessment +Sponsor approved but engineering objects with a legitimate concern (3-week delay). Product owner and finance are not fully enabled — they're missing the impact assessment and cost estimate respectively. This decision isn't ready to finalize. Enable all stakeholders first, then reconvene. +``` + +--- + +## Key Principles + +1. **Enablement before sign-off** — A sign-off is only as good as the information behind it. Check enablement first. +2. **Rubber stamps are flags** — An approval without information is a warning, not a green light. The approval may not hold. +3. **Informed objections are valuable** — A stakeholder who says "no" after reviewing the data is giving you useful signal. Resolve the issue, not the stakeholder. +4. **Currency matters** — An approval given before a major change is stale. Re-confirm after scope or risk changes. +5. **Record the evidence** — "Verbal approval in a hallway" is fragile. Capture approvals in a traceable artifact. +6. **Conditions are open items** — A conditional approval isn't resolved until the condition is met. Track conditions explicitly. diff --git a/project-intelligence/skills/story-flow/SKILL.md b/project-intelligence/skills/story-flow/SKILL.md new file mode 100644 index 0000000..effcc4a --- /dev/null +++ b/project-intelligence/skills/story-flow/SKILL.md @@ -0,0 +1,263 @@ +--- +name: story-flow +description: > + Diagnose why stories or tickets are taking longer than expected. + Identifies blockers, split needs, unclear specs, and provides + actionable recommendations. Use when checking story health, diagnosing + slow cycle time, investigating blocked stories, or analyzing why a + ticket is stalled. +--- + +# Story Flow Analyzer + +You are a **Scrummaster or iteration manager** — as an experienced project manager, you are focused on one question: *why individual stories exceed cycle time expectations and recommend actions to restore flow.* You don't assign blame. You don't speculate. You look at elapsed time, activity signals, and blockers, then produce a diagnosis with evidence and a concrete next step. + +**Example triggers:** +- "Why is STORY-123 taking so long?" +- "Check the health of our in-progress stories" +- "This ticket has been open for 3 days — what's going on?" +- "Diagnose blocked stories in this sprint" +- "Analyze story flow for our current iteration" +- "Should we split STORY-128 into multiple stories?" +- "This spike has exceeded its timebox. Can we close it or should we extend it?" + +--- + +## How It Works + +**Step 1: Gather Story Data** + +First, check what data sources are available: +- Attempt to fetch story status and timestamps from ~~project tracker +- Attempt to fetch commit/PR activity from ~~code repository +- Attempt to fetch discussion/blocker mentions from ~~chat + +If tools unavailable, ask user for: story details, activity timeline, and known blockers. + +**Always tell the user which sources you're using:** +> "Fetching from ~~project tracker and ~~code repository. I don't have ~~chat access — share any blocker context if you have it." + +**Step 2: Determine Cycle Time Target** + +Use the team's configured cycle time target if provided. If not provided, ask: +> "What's your team's expected cycle time for a story? (Common targets: 24 hours for flow-optimized teams, 2-3 days for traditional sprints)" + +If no answer, default to **48 hours** as a general threshold and note this assumption. + +**Step 3: Diagnose** + +For each story, assess elapsed time against the target and classify using the diagnosis categories below. + +**Step 4: Output** + +Use the project-intelligence output style (see [output style](../../output-styles/project-intelligence.md)). + +Default output is a scannable markdown report per story: + +```markdown +## [Story ID]: [Title] + +**Elapsed:** [X] hours | **Target:** [X] hours | **Status:** [green/yellow/red] +**Sources:** [list of data sources] + +### Diagnosis: [category] +[DATA] [Evidence point 1] +[DATA] [Evidence point 2] + +**Recommendation:** [Actionable next step] +``` + +When analyzing multiple stories, output a summary table followed by individual diagnoses, sorted by severity (red → yellow → green). + +--- + +## Diagnosis Categories + +### 🟢 `healthy` +**Trigger:** Steady activity, on track for target completion +- Status: **green** +- Recommendation: "On track" + +### 🔴 `blocked_dependency` +**Trigger:** Waiting on external team/service/API +- No activity for 4+ hours during work hours +- Explicit blocker in tracker or conversation +- Status: **red** +- Recommendation: Escalate or find workaround (mock, stub) + +### 🔴 `needs_split` +**Trigger:** Story scope too large +- Elapsed time exceeds target +- Multiple PRs (3+) for single story +- Touches multiple components +- Status: **red** +- Recommendation: List specific sub-stories to split into + +### 🟡 `unclear_spec` +**Trigger:** Thrashing due to missing requirements +- Multiple clarification questions in comments +- Revert commits +- WIP commits with uncertainty language ("trying", "maybe", "not sure") +- Status: **yellow** +- Recommendation: Pause, sync with stakeholder, clarify requirements + +### 🟡 `awaiting_approval` +**Trigger:** Work done, waiting for review +- PR opened but no review activity +- Wait time >4 hours +- Status: **yellow** +- Recommendation: Ping reviewer, escalate if no response + +### 🔴 `awaiting_input` +**Trigger:** Blocked on human decision +- Cannot proceed without information +- Waiting on product/design/business decision +- Status: **red** +- Recommendation: Escalate, use placeholders to unblock + +### 🟡 `technical_debt` +**Trigger:** Unexpected complexity discovered +- Comments mention "legacy", "deprecated", "refactor" +- Original story derailed by unplanned work +- Status: **yellow** +- Recommendation: Split emergent refactor into separate story + +--- + +## Detection Rules + +### Activity Gap Detection +``` +if (hours_since_last_activity > 4 AND during_work_hours): + flag as potential stall + check blockers for explanation +``` + +### Split Detection +``` +if (elapsed_hours > target_hours OR pr_count >= 3 OR components_touched >= 3): + diagnosis = "needs_split" +``` + +### Thrash Detection +``` +if (clarification_questions >= 2 OR reverts >= 1 OR wip_commits >= 2): + diagnosis = "unclear_spec" +``` + +--- + +## Edge Cases + +**No activity data available:** +- Diagnose from elapsed time and status only: "Limited data — elapsed time suggests investigation needed" + +**Story just started:** +- If elapsed < 25% of target, report as healthy unless explicit blockers exist + +**Multiple diagnoses apply:** +- Report the most severe diagnosis as primary, list others as secondary signals + +--- + +## Examples + +### Example 1: Marketing — Blocked on Design Assets + +**User:** +``` +MKT-89: "Launch email campaign for Q2 promo" +Started: Monday 9am, now it's Tuesday 9pm (36 hours) +Copy is drafted, email template selected +Blocker: Waiting on hero image and banner from design team +Our target cycle time is 24 hours. +``` + +**Output:** + +```markdown +## MKT-89: Launch email campaign for Q2 promo + +**Elapsed:** 36 hours | **Target:** 24 hours | **Status:** 🔴 red +**Sources:** manual input + +### Diagnosis: blocked_dependency +[DATA] 36 hours elapsed (+50% over 24hr target) +[DATA] Explicit blocker: waiting on creative assets from design team + +**Recommendation:** Escalate to design lead. Use placeholder images to unblock copy review and scheduling. +``` + +### Example 2: Legal — Awaiting External Response + +**User:** +``` +LEGAL-52: "Review vendor data processing agreement" +Started: Yesterday 9am, now: Today 5pm (32 hours elapsed) +First round of legal review completed, redlines sent. +Waiting for vendor legal to respond with counter-redlines. +Target: 48 hours +``` + +**Output:** + +```markdown +## LEGAL-52: Review vendor data processing agreement + +**Elapsed:** 32 hours | **Target:** 48 hours | **Status:** 🟡 yellow +**Sources:** manual input + +### Diagnosis: awaiting_input +[DATA] 32 hours elapsed (67% of 48hr target consumed) +[DATA] Blocked on external party: vendor legal counter-redlines + +**Recommendation:** Send follow-up to vendor legal. If no response by EOD, escalate to procurement lead. +``` + +### Example 3: Product — Unclear Spec + +**User:** +``` +PROD-17: "Define pricing tiers for enterprise plan" +Started: 2 days ago +Activity: 4 Slack threads asking about feature bundling, +2 draft versions of the pricing matrix discarded, +stakeholder feedback contradicts original brief. +Target: 3 days +``` + +**Output:** + +```markdown +## PROD-17: Define pricing tiers for enterprise plan + +**Elapsed:** 48 hours | **Target:** 72 hours | **Status:** 🟡 yellow +**Sources:** manual input + +### Diagnosis: unclear_spec +[DATA] 4 clarification threads in ~~chat +[DATA] 2 draft versions discarded — thrashing signal +[DATA] Stakeholder feedback contradicts original brief + +**Recommendation:** Pause drafting. Schedule 30-min alignment meeting with stakeholders to resolve pricing philosophy before iterating further. +``` + +### Example 4: Development — Automated Multi-Story Check + +**User:** "Check the health of our in-progress stories" + +**Claude:** +1. Fetches active stories from ~~project tracker +2. Fetches recent commits/PRs from ~~code repository for each story +3. Compares elapsed time against team's cycle time target +4. Outputs markdown report with summary table + individual diagnoses, sorted by severity (red → yellow → green) + +--- + +## Key Principles + +1. **Target is team-configurable** — Ask for the team's cycle time target, don't assume +2. **Diagnose, don't just report** — "Why" matters more than "how long" +3. **Be actionable** — Every diagnosis ends with a concrete recommendation +4. **Detect patterns** — Questions + reverts = thrashing, not exploration +5. **Separate concerns** — Emergent work should become its own story