diff --git a/.env.example b/.env.example index adc64a5..c24f2f9 100644 --- a/.env.example +++ b/.env.example @@ -9,6 +9,21 @@ OPENCLAW_GATEWAY_URL=http://host.docker.internal:18789 OPENCLAW_TOKEN=your-gateway-token-here TASKBOARD_API_KEY=your-api-key-here +# Agent configuration +# Auto-detect agents from OpenClaw API at startup (default: true) +AGENT_AUTO_DETECT=true +# Manual agent list (overrides auto-detect). Format: agent_id:Display Name +# AGENTS=main:Jarvis,architect:Architect,security-auditor:Security Auditor,code-reviewer:Code Reviewer + +# Public URL for CORS and agent prompts (change if accessing via domain/proxy) +TASKBOARD_BASE_URL=http://localhost:8080 + +# Auto-stop agent sessions when a task moves to Done (default: false) +AUTO_STOP_ON_DONE=false + +# Allow access from specific IPs (comma-separated, leave empty for localhost only) +ALLOWED_IPS= + # ============================================================================= # PROJECT CONFIGURATION # ============================================================================= diff --git a/CHANGELOG.md b/CHANGELOG.md index dd8ebd1..279b468 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,95 @@ All notable changes to this project will be documented in this file. +## [2.0.0] - 2026-02-14 + +### Architecture — Backend Refactor + +- **Monolith → Package**: Refactored `app.py` (2224 lines) + `app.py.bak` (1976 lines) into modular `app/` package (15 modules, ~2800 lines) +- **Write-safe database layer**: Global write lock with `BEGIN IMMEDIATE`, WAL journal mode, 30s busy timeout for concurrent reads +- **Request logging middleware**: Batched 0.5s windows, groups by method:pattern, shows count/avg duration/errors +- **IP restriction middleware**: Configurable allowed IPs (localhost + Docker networks + env-based) +- **Dockerfile**: Updated to `uvicorn app.main:app`, copies `app/` directory + +### Added — Multi-Project Support + +- **Projects CRUD**: `POST /api/projects`, `DELETE /api/projects/{id}` with slug generation and duplicate check +- **Project switcher**: Dropdown in header to filter tasks by project; "All Projects" shows colored badges on cards +- **Project manager modal**: Add/remove projects with name, color picker, description +- **Task assignment**: `project_id` field on tasks (default: 1 = "Default"), project dropdown in task form + +### Added — Filtering & UI + +- **Filter bar**: Combined priority, agent, and keyword search filters (client-side AND logic) +- **"Todo" status column**: New column between Backlog and In Progress (6 columns total: Backlog → Todo → In Progress → Review → Done → Blocked) +- **Markdown export**: "Export MD" button in task modal — exports title, metadata table, description, action items (as checklist), and comments as downloadable `.md` +- **Auto-save task fields**: Status, priority, agent, project, due date, and description auto-save on change/blur — no more manual save for field updates +- **Inline action item creation**: "+ Add" button in action items section with type picker (question/blocker/completion) and inline text input +- **Improved task modal layout**: Larger modal (720×700px), 8-row description textarea, project field moved to first row, form fields split into two rows +- **Better save feedback**: Longer glow animation (2.5s), green background pulse, white icon with glow filter +- **Resolved action items**: Green checkmark (#10b981) instead of muted gray +- **Project switcher moved to filter bar**: Cleaner header with "Projects" button for project manager + +### Added — Agent Management + +- **Dynamic agent detection**: Three-tier priority: `AGENTS` env var → OpenClaw API auto-detect → hardcoded fallback +- **Agent metadata from API**: Icons, colors, descriptions fetched from OpenClaw at startup; dynamic CSS injection for agent tags +- **`AUTO_STOP_ON_DONE`**: Configurable via `.env` (default: `true`). When enabled, sessions are permanently deleted on Done or Stop. Set `false` to use OpenClaw's `archiveAfterMinutes` instead. +- **Session liveness check**: `GET /api/tasks/{id}/agent-status` — checks if OpenClaw session is alive +- **Toggle start/stop button**: Replaces old stop-only button; shows ▶ (green) / ■ (red) based on state +- **Double-spawn guard**: `_spawning_tasks` set prevents concurrent spawns for same task +- **Follow-up spawning**: `spawn_followup_session()` with last 5 comments as context +- **@Mention spawning**: `spawn_mentioned_agent()` with cleanup=delete + +### Added — Session Management + +- **WebSocket RPC**: `_ws_rpc()` for OpenClaw gateway communication (challenge → connect → request → response) +- **Session endpoints**: `POST /api/sessions/create`, `POST /api/sessions/{key}/stop`, `POST /api/sessions/{key}/delete`, `POST /api/sessions/stop-all` +- **Session list**: Sorted main-first, then by updatedAt + +### Added — Validation & Security + +- **Pydantic field validators**: Status/priority validated against `VALID_STATUSES`/`VALID_PRIORITIES`, returns HTTP 422 on invalid values +- **Model validation**: Comment content size limit, agent name length limit, color pattern validation on projects +- **Agent guardrails**: Filesystem boundaries, forbidden actions, compliance context, escalation chain, report format template + +### Added — Database Schema + +- `tasks.working_agent` (TEXT) — currently active agent +- `tasks.agent_session_key` (TEXT) — OpenClaw session key +- `tasks.source_file` (TEXT) — source file reference +- `tasks.source_ref` (TEXT) — source ref +- `tasks.project_id` (INTEGER, FK → projects) — project assignment +- `action_items.archived` (INTEGER) — archive support +- `chat_messages.session_key` (TEXT) — session isolation for chat history +- `projects` table — multi-project support + +### Added — Environment Variables + +- `AGENT_AUTO_DETECT` — Auto-detect agents from OpenClaw API (default: `true`) +- `AGENTS` — Manual agent list, format: `agent_id:Name,...` (overrides auto-detect) +- `AUTO_STOP_ON_DONE` — Auto-kill sessions on Done (default: `true`) +- `TASKBOARD_BASE_URL` — Public URL for CORS/agent prompts (default: `http://localhost:8080`) +- `ALLOWED_IPS` — Additional allowed IPs (comma-separated) +- `PROJECT_NAME`, `COMPANY_NAME`, `COMPANY_CONTEXT` — Agent prompt context +- `ALLOWED_PATHS` — Filesystem boundaries for agents +- `COMPLIANCE_FRAMEWORKS` — Compliance context for security auditor + +### Changed + +- **Board layout**: Column flex from `1 0 300px` to `1 0 250px`, board height accounts for filter bar +- **Agent colors**: Removed hardcoded CSS classes, now dynamically injected from `config.agentMeta` +- **Agent legend**: Dynamically populated from API metadata instead of hardcoded list +- **Task loading**: Removed server-side agent filter (`?agent=`), all filtering now client-side +- **Help modal**: Agent list dynamically generated from `config.agentMeta` +- **Action items**: Added archive/unarchive support (`POST /api/action-items/{id}/archive|unarchive`) +- **Chat history**: Filtered by `session_key` parameter +- **Comment posting**: Auto-clears `working_agent` when agent posts; builds 5-comment context for follow-ups + +### Fixed + +- **Status enforcement**: Backend rejects invalid statuses/priorities with HTTP 422 instead of silently accepting + ## [1.3.0] - 2026-02-03 ### Added diff --git a/Dockerfile b/Dockerfile index eb2c9f6..83f56b1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt # Copy app -COPY app.py . +COPY app/ app/ COPY static/ static/ # Data volume for SQLite @@ -15,4 +15,6 @@ VOLUME /app/data # Run EXPOSE 8080 -CMD ["python", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8080"] +# Disable uvicorn access logs (we have our own middleware logging) +# Set log-level to warning to reduce noise, but keep app logs visible +CMD ["python", "-m", "uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8080", "--log-level", "info", "--no-access-log"] diff --git a/OPENCLAW_SETUP.md b/OPENCLAW_SETUP.md index bef9c66..a0f31d6 100644 --- a/OPENCLAW_SETUP.md +++ b/OPENCLAW_SETUP.md @@ -114,19 +114,23 @@ The task board maps display names to OPENCLAW agent IDs: | Code Reviewer | `code-reviewer` | | UX Manager | `ux-manager` | -### Customizing Agent IDs +### Customizing Agents -If your OPENCLAW uses different agent IDs, edit `app.py`: +Agents are auto-detected from OpenClaw at startup. To override, use the `AGENTS` environment variable: -```python -AGENT_TO_OPENCLAW_ID = { - "OpenClaw": "main", - "Architect": "your-architect-id", - "Security Auditor": "your-security-id", - # ... etc -} +```env +# Format: agent_id:Display Name (comma-separated) +AGENTS=main:Jarvis,architect:Architect,security-auditor:Security Auditor,code-reviewer:Code Reviewer,ux-manager:UX Manager + +# Disable auto-detection (use AGENTS env or fallback defaults) +AGENT_AUTO_DETECT=false ``` +Priority order: +1. `AGENTS` env var (if set, always used) +2. Auto-detect from OpenClaw API (if `AGENT_AUTO_DETECT=true` and `OPENCLAW_TOKEN` set) +3. Hardcoded fallback defaults + --- ## Command Bar Setup (Two-Way Chat) @@ -180,7 +184,7 @@ COMMUNICATION: - Move to Review when done ``` -You can customize guardrails in `app.py` → `AGENT_GUARDRAILS`. +You can customize guardrails in `app/openclaw.py` → `get_agent_guardrails()`. --- diff --git a/README.md b/README.md index 0c0c49d..33e818e 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,32 @@ https://github.com/user-attachments/assets/fc4237c9-c4e5-437d-8638-1a51e8eb6219 ## 📋 Changelog +### v2.0.0 (2026-02-14) — Major Refactor + +#### ✨ New Features +- **Multi-Project Support** — Create, switch between, and manage multiple projects with colored badges +- **Project Manager Modal** — Add/remove projects with custom name, color, and description +- **"Todo" Status Column** — New column between Backlog and In Progress (6 columns total) +- **Advanced Filter Bar** — Combine Priority, Agent, and keyword search filters (client-side AND logic) +- **Markdown Export** — Export task details, action items, and comments as downloadable `.md` file +- **Configurable Agent Detection** — `AGENTS` env var for manual config, `AGENT_AUTO_DETECT` toggle +- **Responsive Design** — CSS media queries for tablet (768px) and mobile (480px) +- **Dockerized & CORS-Configurable** — `TASKBOARD_BASE_URL` for proxy/domain deployments, IP restriction middleware +- **Status Validation** — Backend rejects invalid statuses/priorities with HTTP 422 +- **Auto-Save Task Fields** — Status, priority, agent, project, due date auto-save on change +- **Inline Action Item Creation** — "+ Add" button with type picker directly in task modal + +#### 🏗️ Backend Refactor +- Refactored monolithic `app.py` (2600 lines) into modular `app/` package +- New structure: `config.py`, `database.py`, `models.py`, `websocket.py`, `openclaw.py`, `routes/` +- Agent session hardening: double-spawn guard, liveness checks, configurable auto-stop on Done + +#### 🔧 Improvements +- Dockerfile updated to use `app/` package with `uvicorn app.main:app` +- Task form now includes Project dropdown +- Agent icons and colors auto-assigned from OpenClaw API +- Session deletion via WebSocket RPC (removed legacy filesystem manipulation) + ### v1.6.0 (2026-02-03) #### ✨ New Features @@ -52,18 +78,24 @@ https://github.com/user-attachments/assets/fc4237c9-c4e5-437d-8638-1a51e8eb6219 - Fixed thinking indicator not clearing when agent finishes work - Fixed duplicate agent spawns when moving cards already being worked on +See [CHANGELOG.md](CHANGELOG.md) for full version history. + --- ## ✨ Features ### 🎯 Core Functionality -- **Live Kanban Board** — Real-time updates via WebSocket +- **Live Kanban Board** — Real-time updates via WebSocket (6 columns) - **Multi-Agent Support** — Assign tasks to different AI agents +- **Multi-Project Support** — Organize tasks across multiple projects with color-coded badges - **Auto-Spawn Sessions** — Agents automatically activate when tasks move to "In Progress" - **Persistent Conversations** — Back-and-forth chat with agents on each task - **Session Isolation** — Each agent maintains separate context per task -### 🤖 AI Agents (Configurable via .env) +### 🤖 AI Agents (Auto-Detected or Configurable) + +Agents are auto-detected from your OpenClaw instance at startup. Built-in defaults: + | Icon | Agent | Focus | |------|-------|-------| | 🤖 | Main Agent | Coordinator, command bar chat (name configurable) | @@ -72,18 +104,27 @@ https://github.com/user-attachments/assets/fc4237c9-c4e5-437d-8638-1a51e8eb6219 | 📋 | Code Reviewer | Code quality, best practices | | 🎨 | UX Manager | User flows, UI consistency | +Custom agents get auto-assigned icons and colors. Override via `AGENTS` env var. + +### 📊 Filtering & Organization +- **Project Switcher** — Filter by project; "All Projects" shows colored badges on cards +- **Filter Bar** — Combine priority, agent, and keyword search (client-side AND logic) +- **Markdown Export** — Export any task as `.md` with metadata, action items, and comments + ### 💬 Communication - **Command Bar** — Direct chat with your main agent from the header - **@Mentions** — Tag agents into any task conversation -- **Action Items** — Questions, blockers, and completion tracking +- **Action Items** — Questions, blockers, and completion tracking with notification bubbles - **File Attachments** — Paste images or attach documents ### 🔒 Security - API key authentication for sensitive endpoints -- Secrets stored in environment variables -- CORS restricted to localhost -- Input validation and size limits -- Agent guardrails (filesystem boundaries, forbidden actions) +- IP-based access restriction (localhost + configurable via `ALLOWED_IPS`) +- CORS restricted to `TASKBOARD_BASE_URL` and localhost variants +- Input validation, size limits, and Pydantic field validators +- Agent guardrails (filesystem boundaries, forbidden actions, escalation chain) + +--- ## 🚀 Quick Start @@ -126,9 +167,9 @@ The easiest way to set up the task board is to **ask your OpenClaw agent to do i Once the task board is running, prompt your OpenClaw agent: ``` -I have the task board running at http://localhost:8080. -Please onboard it as a channel plugin so you can receive -messages from the command bar and spawn sub-agents when +I have the task board running at http://:8080. +Please onboard it as a channel plugin so you can receive +messages from the command bar and spawn sub-agents when tasks move to "In Progress". ``` @@ -142,8 +183,8 @@ Your agent will: To set up the multi-agent dev team, prompt your agent: ``` -I want to set up the dev team sub-agents (Architect, Security Auditor, -Code Reviewer, UX Manager). Please configure them in OpenClaw so they +I want to set up the dev team sub-agents (Architect, Security Auditor, +Code Reviewer, UX Manager). Please configure them in OpenClaw so they can be spawned from the task board. ``` @@ -251,6 +292,16 @@ Copy `.env.example` to `.env` and customize: | `OPENCLAW_GATEWAY_URL` | OpenClaw gateway URL | For AI features | | `OPENCLAW_TOKEN` | OpenClaw API token | For AI features | | `TASKBOARD_API_KEY` | API key for protected endpoints | Recommended | +| `TASKBOARD_BASE_URL` | Public URL for CORS and agent prompts | No (default: `http://localhost:8080`) | + +#### Agent Configuration + +| Variable | Description | Default | +|----------|-------------|---------| +| `AGENT_AUTO_DETECT` | Auto-detect agents from OpenClaw API at startup | `true` | +| `AGENTS` | Manual agent list (overrides auto-detect). Format: `agent_id:Name,...` | — | +| `AUTO_STOP_ON_DONE` | Auto-kill agent sessions when task moves to Done. When enabled, sessions are **permanently deleted** (not archived). Set to `false` to keep sessions alive — use OpenClaw's `agents.defaults.subagents.archiveAfterMinutes` for graceful archival instead. | `true` | +| `ALLOWED_IPS` | Allow access from specific IPs (comma-separated) | localhost only | #### Project Configuration @@ -291,16 +342,17 @@ The task board will auto-spawn agent sessions when tasks move to "In Progress". ## 📋 Workflow ``` -Backlog → In Progress → Review → Done - ↓ - Blocked +Backlog → Todo → In Progress → Review → Done + ↓ + Blocked ``` -1. **Backlog** — Tasks waiting to be started -2. **In Progress** — Agent session auto-spawns, work begins -3. **Review** — Agent completed work, awaiting approval -4. **Done** — Human approval required (cannot be set by agents) -5. **Blocked** — Waiting on external input +1. **Backlog** — Tasks waiting to be triaged +2. **Todo** — Triaged, ready to be picked up +3. **In Progress** — Agent session auto-spawns, work begins +4. **Review** — Agent completed work, awaiting approval +5. **Done** — Approved and complete (agent session killed if `AUTO_STOP_ON_DONE=true`) +6. **Blocked** — Waiting on external input --- @@ -364,18 +416,40 @@ Action items track **what needs attention** with notification bubbles on cards: ## 🏗️ Architecture ``` -┌─────────────────────────────────────────────────────────┐ -│ Task Board UI │ -│ WebSocket ←→ FastAPI Backend ←→ SQLite │ -└─────────────────────────┬───────────────────────────────┘ - │ /tools/invoke -┌─────────────────────────┴───────────────────────────────┐ -│ OpenClaw Gateway │ -│ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ -│ │ Main │ │ Architect│ │ Security │ ... │ -│ │ Agent │ │ │ │ Auditor │ │ -│ └──────────┘ └──────────┘ └──────────┘ │ -└─────────────────────────────────────────────────────────┘ +┌─────────────────────────────────────────────────────────────┐ +│ Task Board UI │ +│ (static/index.html — SPA) │ +│ │ │ +│ WebSocket │ REST API │ +└─────────────────────────┼───────────────────────────────────┘ + │ +┌─────────────────────────┼───────────────────────────────────┐ +│ FastAPI Backend │ +│ │ +│ app/main.py ─── Middleware (IP restriction, request logging)│ +│ │ │ +│ ├── routes/tasks.py CRUD, move, start/stop │ +│ ├── routes/sessions.py OpenClaw session mgmt │ +│ ├── routes/comments.py Comments + @mention spawn │ +│ ├── routes/projects.py Project CRUD │ +│ ├── routes/chat.py Command bar ↔ main agent │ +│ ├── routes/action_items.py Questions, blockers │ +│ └── routes/uploads.py File attachments │ +│ │ │ +│ ├── config.py Env vars, agent metadata │ +│ ├── database.py SQLite + WAL + write lock │ +│ ├── models.py Pydantic models + validators │ +│ ├── openclaw.py WS-RPC, spawn, guardrails │ +│ └── websocket.py Broadcast manager │ +└─────────────────────────┬───────────────────────────────────┘ + │ /tools/invoke + WS-RPC +┌─────────────────────────┴───────────────────────────────────┐ +│ OpenClaw Gateway │ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ +│ │ Main │ │ Architect│ │ Security │ ... │ +│ │ Agent │ │ │ │ Auditor │ │ +│ └──────────┘ └──────────┘ └──────────┘ │ +└──────────────────────────────────────────────────────────────┘ ``` --- @@ -384,25 +458,45 @@ Action items track **what needs attention** with notification bubbles on cards: ### Tasks - `GET /api/tasks` — List all tasks -- `POST /api/tasks` — Create task +- `POST /api/tasks` — Create task (with `project_id`) - `PATCH /api/tasks/{id}` — Update task - `DELETE /api/tasks/{id}` — Delete task - `POST /api/tasks/{id}/move` — Move task to status +- `POST /api/tasks/{id}/start-work` — Set working agent +- `POST /api/tasks/{id}/stop-work` — Clear working agent +- `GET /api/tasks/{id}/agent-status` — Check session liveness + +### Projects +- `GET /api/projects` — List all projects +- `POST /api/projects` — Create project +- `DELETE /api/projects/{id}` — Delete project (reassigns tasks to Default) ### Comments - `GET /api/tasks/{id}/comments` — Get comments -- `POST /api/tasks/{id}/comments` — Add comment +- `POST /api/tasks/{id}/comments` — Add comment (triggers @mention spawn) +- `DELETE /api/tasks/{id}/comments/{comment_id}` — Delete comment ### Action Items -- `GET /api/tasks/{id}/action-items` — Get action items +- `GET /api/tasks/{id}/action-items` — Get action items (`?archived=true`) - `POST /api/tasks/{id}/action-items` — Create action item - `POST /api/action-items/{id}/resolve` — Resolve item +- `POST /api/action-items/{id}/archive` — Archive resolved item +- `POST /api/action-items/{id}/unarchive` — Unarchive item + +### Sessions +- `GET /api/sessions` — List active OpenClaw sessions +- `POST /api/sessions/create` — Spawn new session +- `POST /api/sessions/{key}/stop` — Stop session +- `DELETE /api/sessions/{key}` — Delete session (via WS-RPC) +- `POST /api/sessions/stop-all` — Emergency stop all non-main sessions ### Command Bar +- `GET /api/jarvis/history` — Chat history (`?session_key=...`) - `POST /api/jarvis/chat` — Send message to main agent - `POST /api/jarvis/respond` — Push response to command bar -### WebSocket +### Config & WebSocket +- `GET /api/config` — Board config (agents, projects, statuses, branding) - `WS /ws` — Real-time updates --- @@ -411,28 +505,19 @@ Action items track **what needs attention** with notification bubbles on cards: ### Adding New Agents -Edit `app.py`: +Agents are auto-detected from OpenClaw at startup. To configure manually, set in `.env`: -```python -AGENT_TO_OPENCLAW_ID = { - "Your Agent": "your-agent-id", - ... -} +```env +# Format: agent_id:Display Name (comma-separated) +AGENTS=main:Jarvis,architect:Architect,my-agent:My Custom Agent -AGENT_SYSTEM_PROMPTS = { - "your-agent-id": "Your agent's system prompt...", - ... -} +# Optional: disable auto-detection +AGENT_AUTO_DETECT=false ``` -Update `static/index.html` for agent icon: +Agent icons and colors are assigned automatically. Built-in agents (`main`, `architect`, `security-auditor`, `code-reviewer`, `ux-manager`) have predefined icons and colors. Custom agents get auto-assigned colors. -```javascript -const AGENT_ICONS = { - 'Your Agent': '🚀', - ... -}; -``` +See [OPENCLAW_SETUP.md](OPENCLAW_SETUP.md) for full agent configuration details. --- diff --git a/app.py b/app.py deleted file mode 100644 index 17612d3..0000000 --- a/app.py +++ /dev/null @@ -1,2224 +0,0 @@ -""" -RIZQ Task Board - FastAPI Backend -Simple, fast, full agent control, LIVE updates -""" - -import sqlite3 -import json -import asyncio -import re -import httpx -from datetime import datetime -from pathlib import Path -from typing import Optional, List, Set -from contextlib import contextmanager - -from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect, Depends, Header, Request -from fastapi.staticfiles import StaticFiles -from fastapi.responses import FileResponse, PlainTextResponse -from fastapi.middleware.cors import CORSMiddleware -from starlette.middleware.base import BaseHTTPMiddleware -from pydantic import BaseModel, field_validator - -# ============================================================================= -# CONFIG -# ============================================================================= -import os -import secrets -import hashlib - -DATA_DIR = Path(__file__).parent / "data" -DATA_DIR.mkdir(exist_ok=True) -DB_PATH = DATA_DIR / "tasks.db" -STATIC_PATH = Path(__file__).parent / "static" - -# ============================================================================= -# BRANDING (configurable via environment variables) -# ============================================================================= -MAIN_AGENT_NAME = os.getenv("MAIN_AGENT_NAME", "Jarvis") -MAIN_AGENT_EMOJI = os.getenv("MAIN_AGENT_EMOJI", "\U0001F6E1") -HUMAN_NAME = os.getenv("HUMAN_NAME", "User") -HUMAN_SUPERVISOR_LABEL = os.getenv("HUMAN_SUPERVISOR_LABEL", "User") -BOARD_TITLE = os.getenv("BOARD_TITLE", "Task Board") - -AGENTS = [MAIN_AGENT_NAME, "Architect", "Security Auditor", "Code Reviewer", "UX Manager", "User", "Unassigned"] -STATUSES = ["Backlog", "In Progress", "Review", "Done", "Blocked"] -PRIORITIES = ["Critical", "High", "Medium", "Low"] - -# Map task board agent names to OpenClaw agent IDs -# Customize these to match your OpenClaw agent configuration -AGENT_TO_OPENCLAW_ID = { - MAIN_AGENT_NAME: "main", # Main agent (handles command bar chat) - "Architect": "architect", - "Security Auditor": "security-auditor", - "Code Reviewer": "code-reviewer", - "UX Manager": "ux-manager", -} - -# Alias for backward compatibility -AGENT_TO_OPENCLAW_ID = AGENT_TO_OPENCLAW_ID - -# Build mention regex dynamically from agent names (including main agent now) -MENTIONABLE_AGENTS = list(AGENT_TO_OPENCLAW_ID.keys()) -MENTION_PATTERN = re.compile(r'@(' + '|'.join(re.escape(a) for a in MENTIONABLE_AGENTS) + r')', re.IGNORECASE) - -# Security: Load secrets from environment variables -OPENCLAW_GATEWAY_URL = os.getenv("OPENCLAW_GATEWAY_URL", "http://host.docker.internal:18789") -OPENCLAW_TOKEN = os.getenv("OPENCLAW_TOKEN", "") -TASKBOARD_API_KEY = os.getenv("TASKBOARD_API_KEY", "") -OPENCLAW_ENABLED = bool(OPENCLAW_TOKEN) - -# Project configuration (customize in .env) -PROJECT_NAME = os.getenv("PROJECT_NAME", "My Project") -COMPANY_NAME = os.getenv("COMPANY_NAME", "Acme Corp") -COMPANY_CONTEXT = os.getenv("COMPANY_CONTEXT", "software development") -ALLOWED_PATHS = os.getenv("ALLOWED_PATHS", "/workspace, /project") -COMPLIANCE_FRAMEWORKS = os.getenv("COMPLIANCE_FRAMEWORKS", "your security requirements") - -# IP-based access restriction -# Always allowed: localhost variants and Docker internal networks -ALWAYS_ALLOWED_IPS = {"127.0.0.1", "localhost", "::1"} -# Additional allowed IPs from env (comma-separated) -_env_ips = os.getenv("ALLOWED_IPS", "").strip() -ALLOWED_IPS = set(ip.strip() for ip in _env_ips.split(",") if ip.strip()) if _env_ips else set() -print(f"🔒 IP Restriction: localhost + 172.20.200.59 + 172.20.200.119 + 172.18.0.1 (internal) + {ALLOWED_IPS if ALLOWED_IPS else 'no external IPs'}") - -# Warn if running without security -if not TASKBOARD_API_KEY: - print("⚠️ WARNING: TASKBOARD_API_KEY not set. API authentication disabled!") -if not OPENCLAW_TOKEN: - print("⚠️ WARNING: OPENCLAW_TOKEN not set. OPENCLAW integration disabled!") - -# File upload limits -MAX_ATTACHMENT_SIZE_MB = 10 -MAX_ATTACHMENT_SIZE_BYTES = MAX_ATTACHMENT_SIZE_MB * 1024 * 1024 - -# ============================================================================= -# SECURITY -# ============================================================================= - -def verify_api_key(authorization: str = Header(None), x_api_key: str = Header(None)): - """Verify API key from Authorization header or X-API-Key header.""" - if not TASKBOARD_API_KEY: - return True # Auth disabled if no key configured - - # Check Authorization: Bearer - if authorization: - if authorization.startswith("Bearer "): - token = authorization[7:] - if secrets.compare_digest(token, TASKBOARD_API_KEY): - return True - - # Check X-API-Key header - if x_api_key: - if secrets.compare_digest(x_api_key, TASKBOARD_API_KEY): - return True - - raise HTTPException(status_code=401, detail="Invalid or missing API key") - -def verify_internal_only(request: Request): - """Only allow requests from localhost/internal sources.""" - client_host = request.client.host if request.client else None - allowed_hosts = ["127.0.0.1", "localhost", "::1", "172.17.0.1", "host.docker.internal"] - - # Also allow Docker internal IPs (172.x.x.x) - if client_host and (client_host in allowed_hosts or client_host.startswith("172.")): - return True - - # If API key is provided, allow from anywhere - if TASKBOARD_API_KEY: - return True - - raise HTTPException(status_code=403, detail="Access denied") - -async def notify_OPENCLAW(task_id: int, task_title: str, comment_agent: str, comment_content: str): - """Send webhook to OpenClaw when a comment needs attention.""" - if not OPENCLAW_ENABLED or comment_agent == MAIN_AGENT_NAME: - return # Don't notify for main agent's own comments - - try: - async with httpx.AsyncClient(timeout=5.0) as client: - # Use OPENCLAW's cron wake endpoint - payload = { - "action": "wake", - "text": f"💬 Task Board: New comment on #{task_id} ({task_title}) from {comment_agent}:\n\n{comment_content[:200]}{'...' if len(comment_content) > 200 else ''}\n\nCheck and respond: http://localhost:8080" - } - headers = { - "Authorization": f"Bearer {OPENCLAW_TOKEN}", - "Content-Type": "application/json" - } - await client.post(f"{OPENCLAW_GATEWAY_URL}/api/cron/wake", json=payload, headers=headers) - print(f"Notified OPENCLAW about comment from {comment_agent}") - except Exception as e: - print(f"Webhook to OPENCLAW failed: {e}") - - -async def send_to_agent_session(session_key: str, message: str) -> bool: - """Send a follow-up message to an active agent session.""" - if not OPENCLAW_ENABLED or not session_key: - return False - - try: - async with httpx.AsyncClient(timeout=30.0) as client: - payload = { - "tool": "sessions_send", - "args": { - "sessionKey": session_key, - "message": message - } - } - headers = { - "Authorization": f"Bearer {OPENCLAW_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{OPENCLAW_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - result = response.json() if response.status_code == 200 else None - if result and result.get("ok"): - print(f"✅ Sent message to session {session_key}") - return True - else: - print(f"❌ Failed to send to session: {response.text}") - return False - except Exception as e: - print(f"❌ Failed to send to agent session: {e}") - return False - - -def get_task_session(task_id: int) -> Optional[str]: - """Get the active agent session key for a task.""" - with get_db() as conn: - row = conn.execute("SELECT agent_session_key FROM tasks WHERE id = ?", (task_id,)).fetchone() - return row["agent_session_key"] if row and row["agent_session_key"] else None - - -async def spawn_followup_session(task_id: int, task_title: str, agent_name: str, previous_context: str, new_message: str): - """Spawn a follow-up session for an agent with conversation context.""" - if not OPENCLAW_ENABLED: - return None - - agent_id = AGENT_TO_OPENCLAW_ID.get(agent_name) - if not agent_id: - return None - # Main agent can spawn follow-up sessions too - - system_prompt = AGENT_SYSTEM_PROMPTS.get(agent_id, "") - - followup_prompt = f"""# Follow-up on Task #{task_id}: {task_title} - -You previously worked on this task and moved it to Review. User has a follow-up question. - -## Previous Conversation: -{previous_context if previous_context else "(No previous messages)"} - -## User's New Message: -{new_message} - -## Your Role: -{system_prompt} - -## Instructions: -1. Call start-work API: POST http://localhost:8080/api/tasks/{task_id}/start-work?agent={agent_name} -2. Read the context and User's question -3. Respond helpfully by posting a comment: POST http://localhost:8080/api/tasks/{task_id}/comments -4. Keep your response focused on what User asked -5. Call stop-work API: POST http://localhost:8080/api/tasks/{task_id}/stop-work?agent={agent_name} - - Add &outcome=review&reason= if work is complete - - Add &outcome=blocked&reason= if you need more input - -Respond now. -""" - - try: - async with httpx.AsyncClient(timeout=60.0) as client: - payload = { - "tool": "sessions_spawn", - "args": { - "agentId": agent_id, - "task": followup_prompt, - "label": f"task-{task_id}-followup", - "cleanup": "keep" - } - } - headers = { - "Authorization": f"Bearer {OPENCLAW_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{OPENCLAW_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - result = response.json() if response.status_code == 200 else None - if result and result.get("ok"): - spawn_info = result.get("result", {}) - session_key = spawn_info.get("childSessionKey", None) - if session_key: - set_task_session(task_id, session_key) - print(f"✅ Spawned follow-up session for {agent_name} on task #{task_id}") - return result - else: - print(f"❌ Failed to spawn follow-up: {response.text}") - return None - except Exception as e: - print(f"❌ Failed to spawn follow-up session: {e}") - return None - - -def set_task_session(task_id: int, session_key: Optional[str]): - """Set or clear the agent session key for a task.""" - with get_db() as conn: - conn.execute( - "UPDATE tasks SET agent_session_key = ?, updated_at = ? WHERE id = ?", - (session_key, datetime.now().isoformat(), task_id) - ) - conn.commit() - - -async def spawn_mentioned_agent(task_id: int, task_title: str, task_description: str, - mentioned_agent: str, mentioner: str, comment_content: str, - previous_context: str = ""): - """Spawn a session for an @mentioned agent to contribute to a task they don't own. - - For the main agent (Jarvis), sends to main session instead of spawning. - """ - if not OPENCLAW_ENABLED: - return None - - agent_id = AGENT_TO_OPENCLAW_ID.get(mentioned_agent) - if not agent_id: - return None - - # All agents (including main) now spawn subagent sessions - system_prompt = AGENT_SYSTEM_PROMPTS.get(agent_id, "") - - mention_prompt = f"""# You've Been Tagged: Task #{task_id} - -**{mentioner}** mentioned you on a task and needs your input. - -## Task: {task_title} -{task_description or '(No description)'} - -## What {mentioner} Said: -{comment_content} - -## Previous Conversation: -{previous_context if previous_context else "(No prior comments)"} - -## Your Role: -{system_prompt} - -## Instructions: -1. Call start-work API: POST http://localhost:8080/api/tasks/{task_id}/start-work?agent={mentioned_agent} -2. Review the task from YOUR perspective ({mentioned_agent}) -3. Post your findings/response as a comment: POST http://localhost:8080/api/tasks/{task_id}/comments -4. Call stop-work API: POST http://localhost:8080/api/tasks/{task_id}/stop-work?agent={mentioned_agent} - -**Note:** You are NOT the assigned owner of this task. You're providing your expertise because you were tagged. -Do NOT move the task (no outcome param) — that's the owner's job. - -{AGENT_GUARDRAILS} - -Respond now with your assessment. -""" - - try: - async with httpx.AsyncClient(timeout=60.0) as client: - payload = { - "tool": "sessions_spawn", - "args": { - "agentId": agent_id, - "task": mention_prompt, - "label": f"task-{task_id}-mention-{agent_id}", - "cleanup": "delete" # Cleanup after since they're just dropping in - } - } - headers = { - "Authorization": f"Bearer {OPENCLAW_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{OPENCLAW_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - result = response.json() if response.status_code == 200 else None - if result and result.get("ok"): - spawn_info = result.get("result", {}) - session_key = spawn_info.get("childSessionKey", "unknown") - - # Post system comment about the spawn - async with httpx.AsyncClient(timeout=5.0) as comment_client: - await comment_client.post( - f"http://localhost:8080/api/tasks/{task_id}/comments", - json={ - "agent": "System", - "content": f"📢 **{mentioned_agent}** was tagged by {mentioner} and is now reviewing this task." - } - ) - - print(f"✅ Spawned {mentioned_agent} for mention on task #{task_id}") - return result - else: - print(f"❌ Failed to spawn {mentioned_agent} for mention: {response.text}") - return None - except Exception as e: - print(f"❌ Failed to spawn mentioned agent: {e}") - return None - -# Guardrails to inject into every sub-agent task -AGENT_GUARDRAILS = f""" -⚠️ MANDATORY CONSTRAINTS (Approved by User via Task Board assignment): - -FILESYSTEM BOUNDARIES: -- ONLY access: {ALLOWED_PATHS} -- Everything else is FORBIDDEN without explicit authorization - -FORBIDDEN ACTIONS (do not attempt without approval): -- Browser tool (except UX Manager on localhost only) -- git commit (requires safeword from User) -- Any action outside the authorized paths - -WEB_FETCH (requires approval): -- You have web_fetch available but MUST ask User first -- Create an action item (type: question) explaining what URL you need and why -- Wait for User to resolve the action item before fetching -- Only fetch after explicit approval - -COMPLIANCE CONTEXT: -- {COMPANY_NAME}, {COMPANY_CONTEXT} -- {COMPLIANCE_FRAMEWORKS} -- Security over convenience — always - -COMMUNICATION & ESCALATION: -- Post comments on the task card to communicate -- Create action items for questions that need answers (type: question) -- Create action items for blockers (type: blocker) - -ESCALATION CHAIN: -1. {MAIN_AGENT_NAME} (coordinator) monitors your action items and may answer if confident -2. If {MAIN_AGENT_NAME} answers, the item gets resolved and you can proceed -3. If {MAIN_AGENT_NAME} is unsure, they leave it for {HUMAN_SUPERVISOR_LABEL} to review -4. {HUMAN_SUPERVISOR_LABEL} has final authority on all decisions - -TASK BOARD INTEGRATION: -- Use start-work API when beginning: POST http://localhost:8080/api/tasks/{{task_id}}/start-work?agent={{your_name}} -- Post updates as comments: POST http://localhost:8080/api/tasks/{{task_id}}/comments (json: {{"agent": "your_name", "content": "message"}}) -- Create action items for questions: POST http://localhost:8080/api/tasks/{{task_id}}/action-items (json: {{"agent": "your_name", "content": "question", "item_type": "question"}}) -- Move to Review when done: POST http://localhost:8080/api/tasks/{{task_id}}/move?status=Review&agent={{your_name}}&reason=... -- Use stop-work API when finished: POST http://localhost:8080/api/tasks/{{task_id}}/stop-work - -REPORT FORMAT: -When complete, post a comment with your findings using this format: -## [Your Role] Report -**Task:** [task title] -**Verdict:** ✅ APPROVED / ⚠️ CONCERNS / 🛑 BLOCKED -### Findings -- [SEVERITY] Issue description -### Summary -[1-2 sentence assessment] -""" - -AGENT_SYSTEM_PROMPTS = { - "main": f"""You are {MAIN_AGENT_NAME}, the primary coordinator for {COMPANY_NAME}. - -Your focus: -- General task implementation and coordination -- Code writing and debugging -- Cross-cutting concerns that don't fit specialist roles -- Synthesizing input from other agents -- Direct implementation work - -Project: {PROJECT_NAME} -You're the hands-on executor. When assigned a task, dig in and get it done.""", - - "architect": f"""You are the Architect for {COMPANY_NAME}. - -Your focus: -- System design and architectural patterns -- Scalability and performance implications -- Technical trade-offs and recommendations -- Integration architecture -- Database design and data modeling - -Project: {PROJECT_NAME} -Be concise. Flag concerns with severity (CRITICAL/HIGH/MEDIUM/LOW).""", - - "security-auditor": f"""You are the Security Auditor for {COMPANY_NAME}. - -Your focus: -- SOC2 Trust Services Criteria (Security, Availability, Confidentiality, Privacy) -- HIPAA compliance (PHI handling, access controls, audit logging) -- CIS Controls benchmarks -- OWASP Top 10 vulnerabilities -- Secure credential storage and handling -- Tenant data isolation (multi-tenant SaaS) - -NON-NEGOTIABLE: Security over convenience. Always. -Rate findings: CRITICAL (blocks deploy) / HIGH / MEDIUM / LOW""", - - "code-reviewer": f"""You are the Code Reviewer for {COMPANY_NAME}. - -Your focus: -- Code quality and best practices -- DRY, SOLID principles -- Error handling and edge cases -- Performance considerations -- Code readability and maintainability -- Test coverage gaps - -Project: {PROJECT_NAME} -Format: MUST FIX / SHOULD FIX / CONSIDER / NICE TO HAVE""", - - "ux-manager": f"""You are the UX Manager for {COMPANY_NAME}. - -Your focus: -- User flow clarity and efficiency -- Error message helpfulness -- Form design and validation feedback -- UI consistency across the platform -- Accessibility basics -- Onboarding experience - -Project: {PROJECT_NAME} - -BROWSER ACCESS (localhost only): -You have browser access to review the app UI. Use it to: -- Take snapshots of pages to analyze layout, spacing, colors -- Check user flows and navigation -- Verify form designs and error states -- Assess overall visual consistency - -ALLOWED URLs (localhost only): -- http://localhost:* (any port) -- http://127.0.0.1:* - -DO NOT navigate to any external URLs. Your browser access is strictly for reviewing the local app.""" -} - -async def spawn_agent_session(task_id: int, task_title: str, task_description: str, agent_name: str): - """Spawn a OPENCLAW sub-agent session for a task via tools/invoke API.""" - if not OPENCLAW_ENABLED: - return None - - agent_id = AGENT_TO_OPENCLAW_ID.get(agent_name) - if not agent_id: - return None # Don't spawn for unknown agents - # Note: Main agent (Jarvis) CAN spawn subagents now - no special case - - # Build the task prompt with guardrails - system_prompt = AGENT_SYSTEM_PROMPTS.get(agent_id, "") - task_prompt = f"""# Task Assignment from RIZQ Task Board (Approved by {HUMAN_SUPERVISOR_LABEL}) - -**Task #{task_id}:** {task_title} - -**Description:** -{task_description or 'No description provided.'} - -{AGENT_GUARDRAILS} - -## Your Role -{system_prompt} - ---- - -## Instructions -1. Call start-work API: POST http://localhost:8080/api/tasks/{task_id}/start-work?agent={agent_name} - - This auto-moves the card to "In Progress" if needed -2. Analyze the task thoroughly -3. Post your findings as a comment on the task -4. When done, call stop-work with outcome: POST http://localhost:8080/api/tasks/{task_id}/stop-work?agent={agent_name}&outcome=review&reason= - - Use outcome=review when work is complete (auto-moves to Review) - - Use outcome=blocked&reason= if you need input (auto-moves to Blocked) - -## IMPORTANT: Stay Available -After posting your findings, **remain available for follow-up questions**. User may reply with questions or requests for clarification. When you receive a message starting with "💬 **User replied**", respond thoughtfully and post your response as a comment on the task. - -Your session will automatically end when User marks the task as Done. - -Begin now. -""" - - try: - async with httpx.AsyncClient(timeout=60.0) as client: - # Use OPENCLAW's tools/invoke API to spawn sub-agent directly - payload = { - "tool": "sessions_spawn", - "args": { - "agentId": agent_id, - "task": task_prompt, - "label": f"task-{task_id}", - "cleanup": "keep" - } - } - headers = { - "Authorization": f"Bearer {OPENCLAW_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{OPENCLAW_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - result = response.json() if response.status_code == 200 else None - if result and result.get("ok"): - print(f"✅ Spawned {agent_name} ({agent_id}) for task #{task_id}") - # Add a comment to the task noting the agent was spawned - spawn_info = result.get("result", {}) - run_id = spawn_info.get("runId", "unknown") - session_key = spawn_info.get("childSessionKey", None) - - # Save session key to database for follow-up messages - if session_key: - set_task_session(task_id, session_key) - - async with httpx.AsyncClient(timeout=5.0) as comment_client: - await comment_client.post( - f"http://localhost:8080/api/tasks/{task_id}/comments", - json={ - "agent": "System", - "content": f"🤖 **{agent_name}** agent spawned automatically.\n\nSession: `{session_key or 'unknown'}`\nRun ID: `{run_id}`\n\n💬 *Reply to this task and the agent will respond.*" - } - ) - return result - else: - print(f"❌ Failed to spawn {agent_name}: {response.text}") - return None - except Exception as e: - print(f"❌ Failed to spawn agent session: {e}") - return None - -# ============================================================================= -# WEBSOCKET MANAGER -# ============================================================================= - -class ConnectionManager: - """Manage WebSocket connections for live updates.""" - - def __init__(self): - self.active_connections: Set[WebSocket] = set() - - async def connect(self, websocket: WebSocket): - await websocket.accept() - self.active_connections.add(websocket) - - def disconnect(self, websocket: WebSocket): - self.active_connections.discard(websocket) - - async def broadcast(self, message: dict): - """Send update to all connected clients.""" - dead = set() - for connection in self.active_connections: - try: - await connection.send_json(message) - except: - dead.add(connection) - self.active_connections -= dead - -manager = ConnectionManager() - -# ============================================================================= -# DATABASE -# ============================================================================= - -def init_db(): - """Initialize the database.""" - with get_db() as conn: - conn.execute(""" - CREATE TABLE IF NOT EXISTS tasks ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - title TEXT NOT NULL, - description TEXT DEFAULT '', - status TEXT DEFAULT 'Backlog', - priority TEXT DEFAULT 'Medium', - agent TEXT DEFAULT 'Unassigned', - due_date TEXT, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - board TEXT DEFAULT 'tasks' - ) - """) - conn.execute(""" - CREATE TABLE IF NOT EXISTS activity_log ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - task_id INTEGER, - action TEXT NOT NULL, - agent TEXT, - details TEXT, - timestamp TEXT NOT NULL - ) - """) - conn.execute(""" - CREATE TABLE IF NOT EXISTS comments ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - task_id INTEGER NOT NULL, - agent TEXT NOT NULL, - content TEXT NOT NULL, - created_at TEXT NOT NULL - ) - """) - conn.execute(""" - CREATE TABLE IF NOT EXISTS action_items ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - task_id INTEGER NOT NULL, - comment_id INTEGER, - agent TEXT NOT NULL, - content TEXT NOT NULL, - item_type TEXT DEFAULT 'question', - resolved INTEGER DEFAULT 0, - created_at TEXT NOT NULL, - resolved_at TEXT - ) - """) - # Add working_agent column if it doesn't exist - try: - conn.execute("ALTER TABLE tasks ADD COLUMN working_agent TEXT DEFAULT NULL") - except: - pass # Column already exists - # Add agent_session_key column for persistent agent sessions - try: - conn.execute("ALTER TABLE tasks ADD COLUMN agent_session_key TEXT DEFAULT NULL") - except: - pass # Column already exists - - # Add archived column to action_items - try: - conn.execute("ALTER TABLE action_items ADD COLUMN archived INTEGER DEFAULT 0") - except: - pass # Column already exists - - # Chat messages table for persistent command bar history - conn.execute(""" - CREATE TABLE IF NOT EXISTS chat_messages ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - session_key TEXT DEFAULT 'main', - role TEXT NOT NULL, - content TEXT NOT NULL, - attachments TEXT, - created_at TEXT NOT NULL - ) - """) - # Add session_key column if upgrading from older schema - try: - conn.execute("ALTER TABLE chat_messages ADD COLUMN session_key TEXT DEFAULT 'main'") - except: - pass # Column already exists - - # Deleted sessions table - to filter out from dropdown - conn.execute(""" - CREATE TABLE IF NOT EXISTS deleted_sessions ( - session_key TEXT PRIMARY KEY, - deleted_at TEXT NOT NULL - ) - """) - conn.commit() - -@contextmanager -def get_db(): - """Database connection context manager.""" - conn = sqlite3.connect(DB_PATH) - conn.row_factory = sqlite3.Row - try: - yield conn - finally: - conn.close() - -def log_activity(task_id: int, action: str, agent: str = None, details: str = None): - """Log an activity.""" - with get_db() as conn: - conn.execute( - "INSERT INTO activity_log (task_id, action, agent, details, timestamp) VALUES (?, ?, ?, ?, ?)", - (task_id, action, agent, details, datetime.now().isoformat()) - ) - conn.commit() - -# ============================================================================= -# MODELS -# ============================================================================= - -class TaskCreate(BaseModel): - title: str - description: str = "" - status: str = "Backlog" - priority: str = "Medium" - agent: str = "Unassigned" - due_date: Optional[str] = None - board: str = "tasks" - source_file: Optional[str] = None - source_ref: Optional[str] = None - -class TaskUpdate(BaseModel): - title: Optional[str] = None - description: Optional[str] = None - status: Optional[str] = None - priority: Optional[str] = None - agent: Optional[str] = None - due_date: Optional[str] = None - source_file: Optional[str] = None - source_ref: Optional[str] = None - -class Task(BaseModel): - id: int - title: str - description: str - status: str - priority: str - agent: str - due_date: Optional[str] - created_at: str - updated_at: str - board: str - source_file: Optional[str] = None - source_ref: Optional[str] = None - working_agent: Optional[str] = None - -# ============================================================================= -# APP -# ============================================================================= - -app = FastAPI(title="RIZQ Task Board", version="1.2.0") - -# Restrict CORS to localhost origins only -ALLOWED_ORIGINS = [ - "http://localhost:8080", - "http://127.0.0.1:8080", - "http://localhost:3000", - "http://127.0.0.1:3000", -] - -app.add_middleware( - CORSMiddleware, - allow_origins=ALLOWED_ORIGINS, - allow_credentials=True, - allow_methods=["GET", "POST", "PATCH", "DELETE"], - allow_headers=["Authorization", "X-API-Key", "Content-Type"], -) - -# IP Restriction Middleware -# Specific Docker IPs allowed (NOT blanket 172.x.x.x or 10.x.x.x) -ALLOWED_DOCKER_IPS = { - "172.20.200.59", # OpenClaw gateway IP (user's access) - "172.20.200.119", # Additional allowed IP (user's access) - "172.18.0.1", # Internal Docker bridge network (container-to-container) -} - -class IPRestrictionMiddleware(BaseHTTPMiddleware): - """Block requests from IPs not in the allowed list.""" - - async def dispatch(self, request: Request, call_next): - client_ip = request.client.host if request.client else None - - # Always allow localhost - if client_ip in ALWAYS_ALLOWED_IPS: - return await call_next(request) - - # Allow specific Docker IPs (NOT blanket ranges) - if client_ip in ALLOWED_DOCKER_IPS: - return await call_next(request) - - # Check against allowed IPs from env - if client_ip in ALLOWED_IPS: - return await call_next(request) - - # Block with clear message - print(f"🚫 Blocked request from {client_ip} - not in allowed IPs") - return PlainTextResponse( - f"Access denied. IP {client_ip} not authorized.", - status_code=403 - ) - -app.add_middleware(IPRestrictionMiddleware) - -# Initialize DB on startup -@app.on_event("startup") -def startup(): - init_db() - -# Serve static files -STATIC_PATH.mkdir(exist_ok=True) -app.mount("/static", StaticFiles(directory=STATIC_PATH), name="static") - -# Serve data attachments (images uploaded via chat) -ATTACHMENTS_PATH = DATA_DIR / "attachments" -ATTACHMENTS_PATH.mkdir(exist_ok=True) -app.mount("/data/attachments", StaticFiles(directory=ATTACHMENTS_PATH), name="attachments") - -@app.get("/") -def read_root(): - """Serve the Kanban UI.""" - return FileResponse(STATIC_PATH / "index.html") - -# ============================================================================= -# WEBSOCKET ENDPOINT -# ============================================================================= - -@app.websocket("/ws") -async def websocket_endpoint(websocket: WebSocket): - """WebSocket for live updates.""" - await manager.connect(websocket) - try: - while True: - # Keep connection alive, wait for messages (ping/pong) - data = await websocket.receive_text() - # Echo back for ping - if data == "ping": - await websocket.send_text("pong") - except WebSocketDisconnect: - manager.disconnect(websocket) - -# ============================================================================= -# CONFIG ENDPOINTS -# ============================================================================= - -@app.get("/api/config") -def get_config(): - """Get board configuration including branding.""" - return { - "agents": AGENTS, - "statuses": STATUSES, - "priorities": PRIORITIES, - "branding": { - "mainAgentName": MAIN_AGENT_NAME, - "mainAgentEmoji": MAIN_AGENT_EMOJI, - "humanName": HUMAN_NAME, - "humanSupervisorLabel": HUMAN_SUPERVISOR_LABEL, - "boardTitle": BOARD_TITLE, - } - } - -# ============================================================================= -# TASK ENDPOINTS -# ============================================================================= - -@app.get("/api/tasks", response_model=List[Task]) -def list_tasks(board: str = "tasks", agent: str = None, status: str = None): - """List all tasks with optional filters.""" - with get_db() as conn: - query = "SELECT * FROM tasks WHERE board = ?" - params = [board] - - if agent: - query += " AND agent = ?" - params.append(agent) - if status: - query += " AND status = ?" - params.append(status) - - query += " ORDER BY CASE priority WHEN 'Critical' THEN 1 WHEN 'High' THEN 2 WHEN 'Medium' THEN 3 ELSE 4 END, created_at DESC" - - rows = conn.execute(query, params).fetchall() - return [dict(row) for row in rows] - -@app.get("/api/tasks/{task_id}", response_model=Task) -def get_task(task_id: int): - """Get a single task.""" - with get_db() as conn: - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - return dict(row) - -@app.post("/api/tasks", response_model=Task) -async def create_task(task: TaskCreate): - """Create a new task.""" - now = datetime.now().isoformat() - with get_db() as conn: - cursor = conn.execute( - """INSERT INTO tasks (title, description, status, priority, agent, due_date, created_at, updated_at, board, source_file, source_ref) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", - (task.title, task.description, task.status, task.priority, task.agent, task.due_date, now, now, task.board, task.source_file, task.source_ref) - ) - conn.commit() - task_id = cursor.lastrowid - log_activity(task_id, "created", task.agent, f"Created: {task.title}") - - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - result = dict(row) - - # Broadcast to all clients - await manager.broadcast({"type": "task_created", "task": result}) - return result - -@app.patch("/api/tasks/{task_id}", response_model=Task) -async def update_task(task_id: int, updates: TaskUpdate): - """Update a task.""" - with get_db() as conn: - # Get current task - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - current = dict(row) - changes = [] - - # Build update - update_fields = [] - params = [] - - for field in ["title", "description", "status", "priority", "agent", "due_date", "source_file", "source_ref"]: - new_value = getattr(updates, field) - if new_value is not None and new_value != current[field]: - update_fields.append(f"{field} = ?") - params.append(new_value) - changes.append(f"{field}: {current[field]} → {new_value}") - - if update_fields: - update_fields.append("updated_at = ?") - params.append(datetime.now().isoformat()) - params.append(task_id) - - conn.execute(f"UPDATE tasks SET {', '.join(update_fields)} WHERE id = ?", params) - conn.commit() - - log_activity(task_id, "updated", updates.agent or current["agent"], "; ".join(changes)) - - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - result = dict(row) - - # Broadcast to all clients - await manager.broadcast({"type": "task_updated", "task": result}) - return result - -@app.delete("/api/tasks/{task_id}") -async def delete_task(task_id: int): - """Delete a task.""" - with get_db() as conn: - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - conn.execute("DELETE FROM tasks WHERE id = ?", (task_id,)) - conn.commit() - log_activity(task_id, "deleted", None, f"Deleted: {row['title']}") - - # Broadcast to all clients - await manager.broadcast({"type": "task_deleted", "task_id": task_id}) - return {"status": "deleted", "id": task_id} - -# ============================================================================= -# AGENT ENDPOINTS -# ============================================================================= - -@app.get("/api/agents/{agent}/tasks") -def get_agent_tasks(agent: str): - """Get all tasks assigned to an agent.""" - with get_db() as conn: - rows = conn.execute( - "SELECT * FROM tasks WHERE agent = ? AND status NOT IN ('Done', 'Blocked') ORDER BY priority, created_at", - (agent,) - ).fetchall() - return [dict(row) for row in rows] - -# ============================================================================= -# WORK STATUS (AI Activity Indicator) -# ============================================================================= - -@app.post("/api/tasks/{task_id}/start-work") -async def start_work(task_id: int, agent: str): - """Mark that an agent is actively working on a task. Auto-moves to In Progress.""" - with get_db() as conn: - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - current_status = row["status"] - now = datetime.now().isoformat() - - # Auto-move to In Progress if in Backlog or Blocked - moved = False - if current_status in ["Backlog", "Blocked"]: - conn.execute( - "UPDATE tasks SET working_agent = ?, status = ?, updated_at = ? WHERE id = ?", - (agent, "In Progress", now, task_id) - ) - moved = True - log_activity(task_id, "status_change", agent, f"Auto-moved from {current_status} to In Progress (agent started work)") - else: - conn.execute( - "UPDATE tasks SET working_agent = ?, updated_at = ? WHERE id = ?", - (agent, now, task_id) - ) - conn.commit() - - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - result = dict(row) - - await manager.broadcast({"type": "work_started", "task_id": task_id, "agent": agent}) - if moved: - await manager.broadcast({"type": "task_updated", "task": result}) - return {"status": "working", "task_id": task_id, "agent": agent, "moved_to": "In Progress" if moved else None} - -@app.post("/api/tasks/{task_id}/stop-work") -async def stop_work(task_id: int, agent: str = None, outcome: str = None, reason: str = None): - """Mark that an agent has stopped working on a task. - - Args: - outcome: Optional - "review" or "blocked" to auto-move the card - reason: Optional - reason for the move (used for action items) - """ - with get_db() as conn: - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - now = datetime.now().isoformat() - current_status = row["status"] - new_status = None - action_item = None - - # Determine target status based on outcome - if outcome == "review" and current_status == "In Progress": - new_status = "Review" - # Create completion action item - reason_text = reason or "Work completed, ready for review" - cursor = conn.execute( - "INSERT INTO action_items (task_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?)", - (task_id, agent or "Agent", reason_text, "completion", now) - ) - action_item = {"id": cursor.lastrowid, "task_id": task_id, "agent": agent or "Agent", - "content": reason_text, "item_type": "completion", "resolved": False, "created_at": now} - elif outcome == "blocked" and current_status == "In Progress": - new_status = "Blocked" - # Create blocker action item - reason_text = reason or "Blocked - awaiting input" - cursor = conn.execute( - "INSERT INTO action_items (task_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?)", - (task_id, agent or "Agent", reason_text, "blocker", now) - ) - action_item = {"id": cursor.lastrowid, "task_id": task_id, "agent": agent or "Agent", - "content": reason_text, "item_type": "blocker", "resolved": False, "created_at": now} - - # Update task - if new_status: - conn.execute( - "UPDATE tasks SET working_agent = NULL, status = ?, updated_at = ? WHERE id = ?", - (new_status, now, task_id) - ) - log_activity(task_id, "status_change", agent or "Agent", f"Auto-moved to {new_status} (agent stopped work)") - else: - conn.execute( - "UPDATE tasks SET working_agent = NULL, updated_at = ? WHERE id = ?", - (now, task_id) - ) - conn.commit() - - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - result = dict(row) - - await manager.broadcast({"type": "work_stopped", "task_id": task_id}) - if new_status: - await manager.broadcast({"type": "task_updated", "task": result}) - if action_item: - await manager.broadcast({"type": "action_item_added", "task_id": task_id, "item": action_item}) - - return {"status": "stopped", "task_id": task_id, "moved_to": new_status} - -class MoveRequest(BaseModel): - status: str - agent: str = None - reason: str = None # Required for Review/Blocked transitions - -@app.post("/api/tasks/{task_id}/move") -async def move_task(task_id: int, status: str = None, agent: str = None, reason: str = None): - """Quick move task to a new status with workflow rules.""" - now = datetime.now().isoformat() - - with get_db() as conn: - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - task = dict(row) - old_status = task["status"] - - # RULE: Only User (human) can move to Done - if status == "Done" and agent != "User": - raise HTTPException(status_code=403, detail="Only User can move tasks to Done") - - # Update status - conn.execute( - "UPDATE tasks SET status = ?, updated_at = ? WHERE id = ?", - (status, now, task_id) - ) - conn.commit() - log_activity(task_id, "moved", agent, f"Moved to {status}") - - # AUTO-CREATE ACTION ITEMS based on transition - action_item = None - - # Moving to Review → create completion action item - if status == "Review" and old_status != "Review": - content = reason or f"Ready for review: {task['title']}" - cursor = conn.execute( - "INSERT INTO action_items (task_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?)", - (task_id, agent or task["agent"], content, "completion", now) - ) - conn.commit() - action_item = { - "id": cursor.lastrowid, "task_id": task_id, "agent": agent or task["agent"], - "content": content, "item_type": "completion", "resolved": 0, "created_at": now - } - - # Moving to Blocked → create blocker action item - if status == "Blocked" and old_status != "Blocked": - content = reason or f"Blocked: {task['title']} - reason not specified" - cursor = conn.execute( - "INSERT INTO action_items (task_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?)", - (task_id, agent or task["agent"], content, "blocker", now) - ) - conn.commit() - action_item = { - "id": cursor.lastrowid, "task_id": task_id, "agent": agent or task["agent"], - "content": content, "item_type": "blocker", "resolved": 0, "created_at": now - } - - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - result = dict(row) - - # Broadcast updates - await manager.broadcast({"type": "task_updated", "task": result}) - if action_item: - await manager.broadcast({"type": "action_item_added", "task_id": task_id, "item": action_item}) - - # CLEANUP: When moving to Done, clear the agent session AND working indicator - session_cleared = False - if status == "Done": - # Always clear working_agent when task is Done - with get_db() as conn: - conn.execute( - "UPDATE tasks SET working_agent = NULL WHERE id = ?", - (task_id,) - ) - conn.commit() - await manager.broadcast({"type": "work_stopped", "task_id": task_id}) - - session_key = get_task_session(task_id) - if session_key: - # Notify the agent that the task is complete - await send_to_agent_session(session_key, - f"✅ **Task #{task_id} marked as Done by User.**\n\nYour work is complete. This session will now end. Thank you!") - # Clear the session from the database - set_task_session(task_id, None) - session_cleared = True - print(f"🧹 Cleared agent session for task #{task_id}") - - return {"status": "moved", "new_status": status, "action_item_created": action_item is not None, "session_cleared": session_cleared} - -# ============================================================================= -# COMMENTS -# ============================================================================= - -class CommentCreate(BaseModel): - agent: str - content: str - - @field_validator('content') - @classmethod - def validate_content_size(cls, v): - # Limit content to 10MB (base64 images can be large) - if len(v) > MAX_ATTACHMENT_SIZE_BYTES: - raise ValueError(f'Content exceeds maximum size of {MAX_ATTACHMENT_SIZE_MB}MB') - return v - - @field_validator('agent') - @classmethod - def validate_agent(cls, v): - if len(v) > 100: - raise ValueError('Agent name too long') - return v - -@app.get("/api/tasks/{task_id}/comments") -def get_comments(task_id: int): - """Get comments for a task.""" - with get_db() as conn: - rows = conn.execute( - "SELECT * FROM comments WHERE task_id = ? ORDER BY created_at ASC", - (task_id,) - ).fetchall() - return [dict(row) for row in rows] - -class ImageUpload(BaseModel): - data: str # base64 data URL - filename: Optional[str] = "image" - -@app.post("/api/upload/image") -async def upload_image(image: ImageUpload): - """Upload a base64 image and return the file path.""" - import base64 as b64_module - import uuid - - attachments_dir = DATA_DIR / "attachments" - attachments_dir.mkdir(exist_ok=True) - - try: - # Extract base64 data from data URL - data = image.data - if data.startswith("data:") and ";base64," in data: - # Get mime type and base64 content - header, b64_content = data.split(",", 1) - mime_type = header.split(":")[1].split(";")[0] # e.g., "image/png" - ext = mime_type.split("/")[1] if "/" in mime_type else "png" - else: - b64_content = data - ext = "png" - - if ext not in ["png", "jpg", "jpeg", "gif", "webp"]: - ext = "png" - - # Generate unique filename - img_filename = f"{uuid.uuid4().hex[:8]}_{image.filename or 'image'}" - if not img_filename.endswith(f".{ext}"): - img_filename = f"{img_filename}.{ext}" - - img_path = attachments_dir / img_filename - - # Write image file - with open(img_path, "wb") as f: - f.write(b64_module.b64decode(b64_content)) - - return {"path": f"/app/data/attachments/{img_filename}", "filename": img_filename} - except Exception as e: - raise HTTPException(status_code=400, detail=f"Failed to save image: {e}") - -@app.post("/api/tasks/{task_id}/comments") -async def add_comment(task_id: int, comment: CommentCreate): - """Add a comment to a task.""" - now = datetime.now().isoformat() - task_title = "" - task_status = "" - agent_session = None - - with get_db() as conn: - # Verify task exists - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - task_title = row["title"] - task_status = row["status"] - agent_session = row["agent_session_key"] if "agent_session_key" in row.keys() else None - - cursor = conn.execute( - "INSERT INTO comments (task_id, agent, content, created_at) VALUES (?, ?, ?, ?)", - (task_id, comment.agent, comment.content, now) - ) - conn.commit() - - result = { - "id": cursor.lastrowid, - "task_id": task_id, - "agent": comment.agent, - "content": comment.content, - "created_at": now - } - - # Auto-clear working_agent when an agent (not User) posts a comment - # This ensures the "thinking" indicator clears when agent responds - working_agent_cleared = None - if comment.agent and comment.agent != "User": - task_row = conn.execute("SELECT working_agent FROM tasks WHERE id = ?", (task_id,)).fetchone() - if task_row and task_row["working_agent"] == comment.agent: - conn.execute( - "UPDATE tasks SET working_agent = NULL, updated_at = ? WHERE id = ?", - (now, task_id) - ) - conn.commit() - working_agent_cleared = comment.agent - - # Broadcast to all clients - await manager.broadcast({"type": "comment_added", "task_id": task_id, "comment": result}) - - # If working agent was cleared, broadcast work_stopped event - if working_agent_cleared: - await manager.broadcast({"type": "work_stopped", "task_id": task_id, "agent": working_agent_cleared}) - - # Check for @mentions in the comment and spawn mentioned agents - mentions = MENTION_PATTERN.findall(comment.content) - if mentions: - # Get task description and previous context for the spawned agent - task_description = "" - previous_context = "" - with get_db() as conn: - task_row = conn.execute("SELECT description FROM tasks WHERE id = ?", (task_id,)).fetchone() - task_description = task_row["description"] if task_row else "" - - # Get last few comments for context (excluding the one that just triggered this) - comment_rows = conn.execute( - "SELECT agent, content FROM comments WHERE task_id = ? AND id != ? ORDER BY created_at DESC LIMIT 5", - (task_id, result["id"]) - ).fetchall() - if comment_rows: - previous_context = "\n".join([f"**{r['agent']}:** {r['content'][:500]}" for r in reversed(comment_rows)]) - - for mentioned_agent in set(mentions): # dedupe mentions - # Normalize case to match AGENT_TO_OPENCLAW_ID keys - matched_agent = None - for agent_name in AGENT_TO_OPENCLAW_ID.keys(): - if agent_name.lower() == mentioned_agent.lower(): - matched_agent = agent_name - break - - if matched_agent and matched_agent != comment.agent: # Don't spawn self - agent_id = AGENT_TO_OPENCLAW_ID.get(matched_agent) - if agent_id: # All agents including main can be spawned now - # Spawn the mentioned agent to respond - await spawn_mentioned_agent( - task_id=task_id, - task_title=task_title, - task_description=task_description, - mentioned_agent=matched_agent, - mentioner=comment.agent, - comment_content=comment.content, - previous_context=previous_context - ) - print(f"📢 Spawned {matched_agent} for mention in task #{task_id}") - - # If this is from User and task is active, try to reach the assigned agent - # BUT only if no explicit @mentions (if User tagged someone specific, don't auto-notify assignee) - if comment.agent == "User" and task_status in ["In Progress", "Review"] and not mentions: - # Get the assigned agent for this task - with get_db() as conn: - row = conn.execute("SELECT agent FROM tasks WHERE id = ?", (task_id,)).fetchone() - assigned_agent = row["agent"] if row else None - - if assigned_agent and assigned_agent in AGENT_TO_OPENCLAW_ID and assigned_agent != "User": - # Get previous conversation context (last few comments) - previous_comments = [] - with get_db() as conn: - rows = conn.execute( - "SELECT agent, content FROM comments WHERE task_id = ? ORDER BY created_at DESC LIMIT 5", - (task_id,) - ).fetchall() - previous_comments = [{"agent": r["agent"], "content": r["content"][:500]} for r in reversed(rows)] - - context = "\n".join([f"**{c['agent']}:** {c['content']}" for c in previous_comments[:-1]]) # Exclude current comment - - # Try to send to existing session first - sent = False - if agent_session: - message = f"""💬 **User replied on Task #{task_id}:** - -{comment.content} - ---- -Respond by posting a comment to the task.""" - sent = await send_to_agent_session(agent_session, message) - - if not sent: - # Session ended - spawn a new one with context - print(f"🔄 Session ended, spawning follow-up for task #{task_id}") - await spawn_followup_session( - task_id=task_id, - task_title=task_title, - agent_name=assigned_agent, - previous_context=context, - new_message=comment.content - ) - elif comment.agent not in ["System", "User"] + list(AGENT_TO_OPENCLAW_ID.keys()): - # Notify OPENCLAW for other comments - await notify_OPENCLAW(task_id, task_title, comment.agent, comment.content) - - return result - - -@app.delete("/api/tasks/{task_id}/comments/{comment_id}") -async def delete_comment(task_id: int, comment_id: int): - """Delete a comment from a task (for cleaning up context or removing secrets).""" - with get_db() as conn: - # Verify comment exists and belongs to this task - row = conn.execute( - "SELECT id FROM comments WHERE id = ? AND task_id = ?", - (comment_id, task_id) - ).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Comment not found") - - conn.execute("DELETE FROM comments WHERE id = ?", (comment_id,)) - conn.commit() - - # Broadcast deletion to all clients - await manager.broadcast({ - "type": "comment_deleted", - "task_id": task_id, - "comment_id": comment_id - }) - - return {"status": "deleted", "comment_id": comment_id} - - -# ============================================================================= -# ACTION ITEMS (Questions, Notifications, Blockers) -# ============================================================================= - -class ActionItemCreate(BaseModel): - agent: str - content: str - item_type: str = "question" # question, completion, blocker - comment_id: Optional[int] = None - -@app.get("/api/tasks/{task_id}/action-items") -def get_action_items(task_id: int, resolved: bool = False, archived: bool = False): - """Get action items for a task. By default excludes archived items.""" - with get_db() as conn: - if archived: - # Only return archived items - rows = conn.execute( - "SELECT * FROM action_items WHERE task_id = ? AND archived = 1 ORDER BY created_at ASC", - (task_id,) - ).fetchall() - else: - # Return non-archived items filtered by resolved status - rows = conn.execute( - "SELECT * FROM action_items WHERE task_id = ? AND resolved = ? AND archived = 0 ORDER BY created_at ASC", - (task_id, 1 if resolved else 0) - ).fetchall() - return [dict(row) for row in rows] - -@app.post("/api/tasks/{task_id}/action-items") -async def add_action_item(task_id: int, item: ActionItemCreate): - """Add an action item to a task.""" - now = datetime.now().isoformat() - with get_db() as conn: - # Verify task exists - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - cursor = conn.execute( - "INSERT INTO action_items (task_id, comment_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?, ?)", - (task_id, item.comment_id, item.agent, item.content, item.item_type, now) - ) - conn.commit() - - result = { - "id": cursor.lastrowid, - "task_id": task_id, - "comment_id": item.comment_id, - "agent": item.agent, - "content": item.content, - "item_type": item.item_type, - "resolved": 0, - "created_at": now, - "resolved_at": None - } - - # Broadcast to all clients - await manager.broadcast({"type": "action_item_added", "task_id": task_id, "item": result}) - - return result - -@app.post("/api/action-items/{item_id}/resolve") -async def resolve_action_item(item_id: int): - """Resolve an action item.""" - now = datetime.now().isoformat() - with get_db() as conn: - row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Action item not found") - - conn.execute( - "UPDATE action_items SET resolved = 1, resolved_at = ? WHERE id = ?", - (now, item_id) - ) - conn.commit() - - task_id = row["task_id"] - - # Broadcast to all clients - await manager.broadcast({"type": "action_item_resolved", "task_id": task_id, "item_id": item_id}) - - return {"success": True, "item_id": item_id} - - -@app.post("/api/action-items/{item_id}/unresolve") -async def unresolve_action_item(item_id: int): - """Unresolve an action item (undo accidental resolve).""" - with get_db() as conn: - row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Action item not found") - - conn.execute( - "UPDATE action_items SET resolved = 0, resolved_at = NULL WHERE id = ?", - (item_id,) - ) - conn.commit() - - task_id = row["task_id"] - - # Broadcast to all clients - await manager.broadcast({"type": "action_item_unresolved", "task_id": task_id, "item_id": item_id}) - - return {"success": True, "item_id": item_id} - - -@app.post("/api/action-items/{item_id}/archive") -async def archive_action_item(item_id: int): - """Archive a resolved action item to hide it from main view.""" - now = datetime.now().isoformat() - with get_db() as conn: - row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Action item not found") - - conn.execute( - "UPDATE action_items SET archived = 1 WHERE id = ?", - (item_id,) - ) - conn.commit() - - task_id = row["task_id"] - - # Broadcast to all clients - await manager.broadcast({"type": "action_item_archived", "task_id": task_id, "item_id": item_id}) - - return {"success": True, "item_id": item_id} - - -@app.post("/api/action-items/{item_id}/unarchive") -async def unarchive_action_item(item_id: int): - """Unarchive an action item to show it in main view again.""" - with get_db() as conn: - row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Action item not found") - - conn.execute( - "UPDATE action_items SET archived = 0 WHERE id = ?", - (item_id,) - ) - conn.commit() - - task_id = row["task_id"] - - # Broadcast to all clients - await manager.broadcast({"type": "action_item_unarchived", "task_id": task_id, "item_id": item_id}) - - return {"success": True, "item_id": item_id} - -@app.delete("/api/action-items/{item_id}") -async def delete_action_item(item_id: int): - """Delete an action item.""" - with get_db() as conn: - row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Action item not found") - - task_id = row["task_id"] - conn.execute("DELETE FROM action_items WHERE id = ?", (item_id,)) - conn.commit() - - # Broadcast to all clients - await manager.broadcast({"type": "action_item_deleted", "task_id": task_id, "item_id": item_id}) - - return {"success": True, "item_id": item_id} - -# ============================================================================= -# ACTIVITY LOG -# ============================================================================= - -@app.get("/api/activity") -def get_activity(limit: int = 50): - """Get recent activity.""" - with get_db() as conn: - rows = conn.execute( - "SELECT * FROM activity_log ORDER BY timestamp DESC LIMIT ?", - (limit,) - ).fetchall() - return [dict(row) for row in rows] - - -# ============================================================================= -# JARVIS DIRECT CHAT (Command Bar Channel) -# ============================================================================= - -class JarvisMessage(BaseModel): - message: str - session: str = "main" # Which session to send to - attachments: Optional[List[dict]] = None # [{type: "image/png", data: "base64...", filename: "..."}] - - @field_validator('message') - @classmethod - def validate_message_size(cls, v): - if len(v) > MAX_ATTACHMENT_SIZE_BYTES: - raise ValueError(f'Message exceeds maximum size of {MAX_ATTACHMENT_SIZE_MB}MB') - return v - -# Chat history now persisted in SQLite (no more in-memory loss on refresh) - -# ============================================================================= -# OPENCLAW SESSIONS API -# ============================================================================= - -@app.get("/api/sessions") -async def list_sessions(): - """Proxy to OpenClaw sessions_list to get active sessions.""" - if not OPENCLAW_ENABLED: - return {"sessions": [], "error": "OpenClaw integration not enabled"} - - try: - async with httpx.AsyncClient(timeout=10.0) as client: - payload = { - "tool": "sessions_list", - "args": { - "limit": 20, - "messageLimit": 0 - } - } - headers = { - "Authorization": f"Bearer {OPENCLAW_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{OPENCLAW_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - - if response.status_code == 200: - result = response.json() - if result.get("ok"): - # Response is in result.content[0].text as JSON string - inner_result = result.get("result", {}) - content = inner_result.get("content", []) - if content and len(content) > 0: - text_content = content[0].get("text", "{}") - sessions_data = json.loads(text_content) - else: - sessions_data = inner_result - sessions = sessions_data.get("sessions", []) - - # Format for frontend - formatted = [] - for s in sessions: - key = s.get("key", "") - session_label = s.get("label", "") # Label from OpenClaw - display = s.get("displayName", key) - - # Use OpenClaw's label - if key == "main" or key == "agent:main:main": - label = "🛡️ Jarvis (Main)" - elif session_label: - # Use OpenClaw's label if available - label = f"🤖 {session_label}" - elif "subagent" in key: - # Subagent without label - use short ID - short_id = key.split(":")[-1][:8] if ":" in key else key[:8] - label = f"🤖 Session {short_id}" - elif key.startswith("agent:"): - parts = key.split(":") - agent_name = parts[1] if len(parts) > 1 else key - label = f"🤖 {agent_name.title()}" - else: - label = display - - formatted.append({ - "key": key, - "label": label, - "channel": s.get("channel", ""), - "model": s.get("model", ""), - "updatedAt": s.get("updatedAt", 0) - }) - - # Filter out deleted sessions and cleanup stale entries - openclaw_keys = set(s["key"] for s in formatted) - - with get_db() as conn: - deleted_rows = conn.execute("SELECT session_key FROM deleted_sessions").fetchall() - deleted_keys = set(row["session_key"] for row in deleted_rows) - - # Cleanup: remove deleted_sessions entries that are no longer in OpenClaw - # (OpenClaw has already removed them, so we don't need to track them anymore) - orphaned_keys = deleted_keys - openclaw_keys - if orphaned_keys: - placeholders = ",".join("?" * len(orphaned_keys)) - conn.execute(f"DELETE FROM deleted_sessions WHERE session_key IN ({placeholders})", - list(orphaned_keys)) - conn.commit() - - formatted = [s for s in formatted if s["key"] not in deleted_keys] - - # Sort: main first, then by updatedAt - formatted.sort(key=lambda x: (0 if "main" in x["key"].lower() else 1, -x.get("updatedAt", 0))) - return {"sessions": formatted} - - return {"sessions": [], "error": f"Failed to fetch sessions: {response.status_code}"} - except Exception as e: - print(f"Error fetching sessions: {e}") - return {"sessions": [], "error": str(e)} - - -class SessionCreate(BaseModel): - label: str = None - agentId: str = "main" - task: str = "New session started from Task Board. Awaiting instructions." - - -@app.post("/api/sessions/create") -async def create_session(req: SessionCreate): - """Create a new OpenClaw session via sessions_spawn.""" - if not OPENCLAW_ENABLED: - return {"success": False, "error": "OpenClaw integration not enabled"} - - try: - async with httpx.AsyncClient(timeout=30.0) as client: - payload = { - "tool": "sessions_spawn", - "args": { - "agentId": req.agentId, - "task": req.task, - "label": req.label or f"taskboard-{datetime.now().strftime('%H%M%S')}", - "cleanup": "keep" - } - } - headers = { - "Authorization": f"Bearer {OPENCLAW_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{OPENCLAW_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - - if response.status_code == 200: - result = response.json() - if result.get("ok"): - return {"success": True, "result": result.get("result", {})} - - return {"success": False, "error": f"Failed: {response.status_code}"} - except Exception as e: - print(f"Error creating session: {e}") - return {"success": False, "error": str(e)} - - -@app.post("/api/sessions/{session_key}/stop") -async def stop_session(session_key: str): - """Stop/abort a running session.""" - if not OPENCLAW_ENABLED: - return {"success": False, "error": "OpenClaw integration not enabled"} - - try: - # Use the gateway's abort mechanism - async with httpx.AsyncClient(timeout=10.0) as client: - # Send an abort signal via sessions_send with a special abort message - payload = { - "tool": "sessions_send", - "args": { - "sessionKey": session_key, - "message": "SYSTEM: ABORT - User requested stop from Task Board" - } - } - headers = { - "Authorization": f"Bearer {OPENCLAW_TOKEN}", - "Content-Type": "application/json" - } - - # First try to send abort message - await client.post( - f"{OPENCLAW_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - - # Also try the direct abort endpoint if available - try: - abort_response = await client.post( - f"{OPENCLAW_GATEWAY_URL}/api/sessions/{session_key}/abort", - headers=headers - ) - if abort_response.status_code == 200: - return {"success": True, "message": f"Stopped session: {session_key}"} - except: - pass - - return {"success": True, "message": f"Stop signal sent to: {session_key}"} - except Exception as e: - print(f"Error stopping session: {e}") - return {"success": False, "error": str(e)} - - -@app.post("/api/sessions/stop-all") -async def stop_all_sessions(): - """Emergency stop all non-main sessions.""" - if not OPENCLAW_ENABLED: - return {"success": False, "error": "OpenClaw integration not enabled"} - - stopped = [] - errors = [] - - try: - # First get all sessions - async with httpx.AsyncClient(timeout=10.0) as client: - payload = { - "tool": "sessions_list", - "args": {"limit": 50, "messageLimit": 0} - } - headers = { - "Authorization": f"Bearer {OPENCLAW_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{OPENCLAW_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - - if response.status_code == 200: - result = response.json() - if result.get("ok"): - inner_result = result.get("result", {}) - content = inner_result.get("content", []) - if content and len(content) > 0: - text_content = content[0].get("text", "{}") - sessions_data = json.loads(text_content) - else: - sessions_data = inner_result - - sessions = sessions_data.get("sessions", []) - - # Stop each non-main session - for s in sessions: - key = s.get("key", "") - if key and "main" not in key.lower(): - try: - stop_result = await stop_session(key) - if stop_result.get("success"): - stopped.append(key) - else: - errors.append(key) - except: - errors.append(key) - - return { - "success": True, - "stopped": stopped, - "errors": errors, - "message": f"Stopped {len(stopped)} sessions" - } - except Exception as e: - print(f"Error stopping all sessions: {e}") - return {"success": False, "error": str(e)} - - -@app.delete("/api/sessions/{session_key}") -async def delete_session(session_key: str): - """Close/delete a session - removes from OpenClaw's session store.""" - if not OPENCLAW_ENABLED: - return {"success": False, "error": "OpenClaw integration not enabled"} - - # Send stop signal first - await stop_session(session_key) - - now = datetime.now().isoformat() - - # Clear taskboard's local chat history - with get_db() as conn: - conn.execute("DELETE FROM chat_messages WHERE session_key = ?", (session_key,)) - conn.execute( - "INSERT OR REPLACE INTO deleted_sessions (session_key, deleted_at) VALUES (?, ?)", - (session_key, now) - ) - conn.commit() - - # Delete from OpenClaw's session store - openclaw_deleted = False - try: - # Parse session key to get agent id (format: agent::) - parts = session_key.split(":") - if len(parts) >= 2 and parts[0] == "agent": - agent_id = parts[1] # e.g., "main" - - # Path to OpenClaw session store (use env var if in Docker, fallback to home dir) - import os - openclaw_home = os.environ.get("OPENCLAW_DATA_PATH", os.path.expanduser("~/.openclaw")) - sessions_file = os.path.join(openclaw_home, "agents", agent_id, "sessions", "sessions.json") - - if os.path.exists(sessions_file): - import json - with open(sessions_file, 'r', encoding='utf-8') as f: - sessions_data = json.load(f) - - # Check if session exists and get its sessionId for transcript deletion - session_id = None - if session_key in sessions_data: - session_id = sessions_data[session_key].get("sessionId") - del sessions_data[session_key] - - # Write back - with open(sessions_file, 'w', encoding='utf-8') as f: - json.dump(sessions_data, f, indent=2) - - openclaw_deleted = True - print(f"Deleted session {session_key} from OpenClaw store") - - # Also delete transcript file if it exists - if session_id: - transcript_file = os.path.join(openclaw_home, "agents", agent_id, "sessions", f"{session_id}.jsonl") - if os.path.exists(transcript_file): - os.remove(transcript_file) - print(f"Deleted transcript {transcript_file}") - except Exception as e: - print(f"Warning: Could not delete from OpenClaw store: {e}") - - # Broadcast session deletion to all clients for real-time UI update - await manager.broadcast({ - "type": "session_deleted", - "session_key": session_key - }) - - return { - "success": True, - "message": f"Deleted session: {session_key}", - "openclaw_deleted": openclaw_deleted - } - - -@app.get("/api/jarvis/history") -def get_chat_history(limit: int = 100, session: str = "main"): - """Get command bar chat history from database, filtered by session.""" - with get_db() as conn: - rows = conn.execute( - "SELECT id, session_key, role, content, attachments, created_at FROM chat_messages WHERE session_key = ? ORDER BY id DESC LIMIT ?", - (session, limit) - ).fetchall() - # Return in chronological order - messages = [] - for row in reversed(rows): - msg = { - "id": row["id"], - "session_key": row["session_key"], - "role": row["role"], - "content": row["content"], - "timestamp": row["created_at"] - } - if row["attachments"]: - msg["attachments"] = json.loads(row["attachments"]) - messages.append(msg) - return {"history": messages, "session": session} - -@app.post("/api/jarvis/chat") -async def chat_with_jarvis(msg: JarvisMessage): - """Send a message to Jarvis via sessions_send (synchronous, waits for response).""" - if not OPENCLAW_ENABLED: - return {"sent": False, "error": "OpenClaw integration not enabled."} - - now = datetime.now().isoformat() - - # Build the message content with taskboard context - message_content = f"System: [TASKBOARD_CHAT] User says: {msg.message}\n\nRespond naturally." - - # Include attachment data in the message for the agent to process - if msg.attachments: - import base64 as b64_module - import uuid - - # Create attachments directory if needed - attachments_dir = DATA_DIR / "attachments" - attachments_dir.mkdir(exist_ok=True) - - for att in msg.attachments: - att_type = att.get("type", "") - att_data = att.get("data", "") - att_filename = att.get("filename", "file") - - if att_type.startswith("image/") and att_data: - # Save image to file so agent can read it with Read tool - try: - # Extract base64 data from data URL - if att_data.startswith("data:") and ";base64," in att_data: - b64_content = att_data.split(",", 1)[1] - else: - b64_content = att_data - - # Determine file extension - ext = att_type.split("/")[1].split(";")[0] # e.g., "png" from "image/png" - if ext not in ["png", "jpg", "jpeg", "gif", "webp"]: - ext = "png" - - # Generate unique filename - img_filename = f"{uuid.uuid4().hex[:8]}_{att_filename or 'image'}" - if not img_filename.endswith(f".{ext}"): - img_filename = f"{img_filename}.{ext}" - - img_path = attachments_dir / img_filename - - # Write image file - with open(img_path, "wb") as f: - f.write(b64_module.b64decode(b64_content)) - - # Include path for agent to read - message_content += f"\n\n📷 **Image attached:** `/app/data/attachments/{img_filename}`\nUse the Read tool to view this image." - except Exception as e: - print(f"Failed to save image attachment: {e}") - message_content += f"\n\n[Image attachment failed to save: {e}]" - elif att_data: - # For text files, try to extract and embed the content - if att_data.startswith("data:") and ";base64," in att_data: - try: - import base64 - # Extract base64 part after the comma - b64_content = att_data.split(",", 1)[1] - decoded = base64.b64decode(b64_content).decode("utf-8", errors="replace") - message_content += f"\n\n**📎 Attached file: {att_filename}**\n```\n{decoded}\n```" - except Exception as e: - message_content += f"\n\n[Attached File: {att_filename} (decode error: {e})]" - else: - message_content += f"\n\n[Attached File: {att_filename}]" - - # Normalize session key - session_key = msg.session or "main" - - # Store user message in database - attachments_json = json.dumps(msg.attachments) if msg.attachments else None - with get_db() as conn: - cursor = conn.execute( - "INSERT INTO chat_messages (session_key, role, content, attachments, created_at) VALUES (?, ?, ?, ?, ?)", - (session_key, "user", msg.message, attachments_json, now) - ) - conn.commit() - user_msg_id = cursor.lastrowid - - user_msg = { - "id": user_msg_id, - "session_key": session_key, - "role": "user", - "content": msg.message, - "timestamp": now, - "attachments": msg.attachments - } - - # Broadcast user message to all clients (so other tabs see it) - await manager.broadcast({ - "type": "command_bar_message", - "message": user_msg - }) - - try: - # Use sessions_send via tools/invoke - this is synchronous and waits for response - async with httpx.AsyncClient(timeout=120.0) as client: - payload = { - "tool": "sessions_send", - "args": { - "message": message_content, - "sessionKey": session_key, # Use selected session - "timeoutSeconds": 90 - } - } - headers = { - "Authorization": f"Bearer {OPENCLAW_TOKEN}", - "Content-Type": "application/json" - } - - response = await client.post( - f"{OPENCLAW_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - - if response.status_code == 200: - result = response.json() - - # /tools/invoke returns { ok: true, result: { content, details: { reply, ... } } } - inner = result.get("result", {}) - - if isinstance(inner, dict): - # Response is in inner.details.reply - details = inner.get("details", {}) - assistant_reply = details.get("reply") or inner.get("reply") or inner.get("response") - else: - assistant_reply = str(inner) if inner else None - - # Ensure it's a string - if assistant_reply and not isinstance(assistant_reply, str): - import json as json_module - assistant_reply = json_module.dumps(assistant_reply) if isinstance(assistant_reply, (dict, list)) else str(assistant_reply) - - if assistant_reply: - # Store the response in database - with get_db() as conn: - cursor = conn.execute( - "INSERT INTO chat_messages (session_key, role, content, attachments, created_at) VALUES (?, ?, ?, ?, ?)", - (session_key, "assistant", assistant_reply, None, now) - ) - conn.commit() - assistant_msg_id = cursor.lastrowid - - jarvis_msg = { - "id": assistant_msg_id, - "session_key": session_key, - "role": "assistant", - "content": assistant_reply, - "timestamp": datetime.now().isoformat() - } - # Return response directly - frontend adds to history from HTTP response - return {"sent": True, "response": assistant_reply, "session": session_key} - - return {"sent": True, "response": "No response received"} - else: - error_text = response.text[:200] if response.text else f"HTTP {response.status_code}" - return {"sent": False, "error": error_text} - - except Exception as e: - print(f"Error sending to Jarvis: {e}") - return {"sent": False, "error": str(e)} - -class JarvisResponse(BaseModel): - response: str - session: str = "main" # Which session this response is for - - @field_validator('response') - @classmethod - def validate_response_size(cls, v): - if len(v) > 1024 * 1024: # 1MB limit for responses - raise ValueError('Response too large') - return v - -@app.post("/api/jarvis/respond") -async def jarvis_respond(msg: JarvisResponse, _: bool = Depends(verify_api_key)): - """Endpoint for Jarvis to push responses back to the command bar. Requires API key.""" - now = datetime.now().isoformat() - session_key = msg.session or "main" - - # Store Jarvis response in database - with get_db() as conn: - cursor = conn.execute( - "INSERT INTO chat_messages (session_key, role, content, attachments, created_at) VALUES (?, ?, ?, ?, ?)", - (session_key, "assistant", msg.response, None, now) - ) - conn.commit() - msg_id = cursor.lastrowid - - jarvis_msg = { - "id": msg_id, - "session_key": session_key, - "role": "assistant", - "content": msg.response, - "timestamp": now - } - - # Broadcast to all connected clients - await manager.broadcast({ - "type": "command_bar_message", - "message": jarvis_msg - }) - return {"delivered": True} - -# Legacy endpoint for backwards compatibility -@app.post("/api/molt/chat") -async def chat_with_molt_legacy(msg: JarvisMessage): - """Legacy endpoint - redirects to /api/jarvis/chat.""" - return await chat_with_jarvis(msg) - -@app.post("/api/molt/respond") -async def jarvis_respond_legacy(msg: JarvisResponse, _: bool = Depends(verify_api_key)): - """Legacy endpoint - redirects to /api/jarvis/respond.""" - return await jarvis_respond(msg, _) - - -if __name__ == "__main__": - import uvicorn - uvicorn.run(app, host="0.0.0.0", port=8080) diff --git a/app.py.bak b/app.py.bak deleted file mode 100644 index 523faee..0000000 --- a/app.py.bak +++ /dev/null @@ -1,1976 +0,0 @@ -""" -RIZQ Task Board - FastAPI Backend -Simple, fast, full agent control, LIVE updates -""" - -import sqlite3 -import json -import asyncio -import re -import httpx -from datetime import datetime -from pathlib import Path -from typing import Optional, List, Set -from contextlib import contextmanager - -from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect, Depends, Header, Request -from fastapi.staticfiles import StaticFiles -from fastapi.responses import FileResponse -from fastapi.middleware.cors import CORSMiddleware -from pydantic import BaseModel, field_validator - -# ============================================================================= -# CONFIG -# ============================================================================= -import os -import secrets -import hashlib - -DATA_DIR = Path(__file__).parent / "data" -DATA_DIR.mkdir(exist_ok=True) -DB_PATH = DATA_DIR / "tasks.db" -STATIC_PATH = Path(__file__).parent / "static" - -# ============================================================================= -# BRANDING (configurable via environment variables) -# ============================================================================= -MAIN_AGENT_NAME = os.getenv("MAIN_AGENT_NAME", "Jarvis") -MAIN_AGENT_EMOJI = os.getenv("MAIN_AGENT_EMOJI", "\U0001F6E1") -HUMAN_NAME = os.getenv("HUMAN_NAME", "User") -HUMAN_SUPERVISOR_LABEL = os.getenv("HUMAN_SUPERVISOR_LABEL", "User") -BOARD_TITLE = os.getenv("BOARD_TITLE", "Task Board") - -AGENTS = [MAIN_AGENT_NAME, "Architect", "Security Auditor", "Code Reviewer", "UX Manager", "User", "Unassigned"] -STATUSES = ["Backlog", "In Progress", "Review", "Done", "Blocked"] -PRIORITIES = ["Critical", "High", "Medium", "Low"] - -# Map task board agent names to Clawdbot agent IDs -# Customize these to match your Clawdbot agent configuration -AGENT_TO_CLAWDBOT_ID = { - MAIN_AGENT_NAME: "main", # Main agent (handles command bar chat) - "Architect": "architect", - "Security Auditor": "security-auditor", - "Code Reviewer": "code-reviewer", - "UX Manager": "ux-manager", -} - -# Alias for backward compatibility -AGENT_TO_MOLTBOT_ID = AGENT_TO_CLAWDBOT_ID - -# Build mention regex dynamically from agent names (including main agent now) -MENTIONABLE_AGENTS = list(AGENT_TO_CLAWDBOT_ID.keys()) -MENTION_PATTERN = re.compile(r'@(' + '|'.join(re.escape(a) for a in MENTIONABLE_AGENTS) + r')', re.IGNORECASE) - -# Security: Load secrets from environment variables -MOLTBOT_GATEWAY_URL = os.getenv("MOLTBOT_GATEWAY_URL", "http://host.docker.internal:18789") -MOLTBOT_TOKEN = os.getenv("MOLTBOT_TOKEN", "") -TASKBOARD_API_KEY = os.getenv("TASKBOARD_API_KEY", "") -MOLTBOT_ENABLED = bool(MOLTBOT_TOKEN) - -# Project configuration (customize in .env) -PROJECT_NAME = os.getenv("PROJECT_NAME", "My Project") -COMPANY_NAME = os.getenv("COMPANY_NAME", "Acme Corp") -COMPANY_CONTEXT = os.getenv("COMPANY_CONTEXT", "software development") -ALLOWED_PATHS = os.getenv("ALLOWED_PATHS", "/workspace, /project") -COMPLIANCE_FRAMEWORKS = os.getenv("COMPLIANCE_FRAMEWORKS", "your security requirements") - -# Warn if running without security -if not TASKBOARD_API_KEY: - print("⚠️ WARNING: TASKBOARD_API_KEY not set. API authentication disabled!") -if not MOLTBOT_TOKEN: - print("⚠️ WARNING: MOLTBOT_TOKEN not set. MOLTBOT integration disabled!") - -# File upload limits -MAX_ATTACHMENT_SIZE_MB = 10 -MAX_ATTACHMENT_SIZE_BYTES = MAX_ATTACHMENT_SIZE_MB * 1024 * 1024 - -# ============================================================================= -# SECURITY -# ============================================================================= - -def verify_api_key(authorization: str = Header(None), x_api_key: str = Header(None)): - """Verify API key from Authorization header or X-API-Key header.""" - if not TASKBOARD_API_KEY: - return True # Auth disabled if no key configured - - # Check Authorization: Bearer - if authorization: - if authorization.startswith("Bearer "): - token = authorization[7:] - if secrets.compare_digest(token, TASKBOARD_API_KEY): - return True - - # Check X-API-Key header - if x_api_key: - if secrets.compare_digest(x_api_key, TASKBOARD_API_KEY): - return True - - raise HTTPException(status_code=401, detail="Invalid or missing API key") - -def verify_internal_only(request: Request): - """Only allow requests from localhost/internal sources.""" - client_host = request.client.host if request.client else None - allowed_hosts = ["127.0.0.1", "localhost", "::1", "172.17.0.1", "host.docker.internal"] - - # Also allow Docker internal IPs (172.x.x.x) - if client_host and (client_host in allowed_hosts or client_host.startswith("172.")): - return True - - # If API key is provided, allow from anywhere - if TASKBOARD_API_KEY: - return True - - raise HTTPException(status_code=403, detail="Access denied") - -async def notify_MOLTBOT(task_id: int, task_title: str, comment_agent: str, comment_content: str): - """Send webhook to Clawdbot when a comment needs attention.""" - if not MOLTBOT_ENABLED or comment_agent == MAIN_AGENT_NAME: - return # Don't notify for main agent's own comments - - try: - async with httpx.AsyncClient(timeout=5.0) as client: - # Use MOLTBOT's cron wake endpoint - payload = { - "action": "wake", - "text": f"💬 Task Board: New comment on #{task_id} ({task_title}) from {comment_agent}:\n\n{comment_content[:200]}{'...' if len(comment_content) > 200 else ''}\n\nCheck and respond: http://localhost:8080" - } - headers = { - "Authorization": f"Bearer {MOLTBOT_TOKEN}", - "Content-Type": "application/json" - } - await client.post(f"{MOLTBOT_GATEWAY_URL}/api/cron/wake", json=payload, headers=headers) - print(f"Notified MOLTBOT about comment from {comment_agent}") - except Exception as e: - print(f"Webhook to MOLTBOT failed: {e}") - - -async def send_to_agent_session(session_key: str, message: str) -> bool: - """Send a follow-up message to an active agent session.""" - if not MOLTBOT_ENABLED or not session_key: - return False - - try: - async with httpx.AsyncClient(timeout=30.0) as client: - payload = { - "tool": "sessions_send", - "args": { - "sessionKey": session_key, - "message": message - } - } - headers = { - "Authorization": f"Bearer {MOLTBOT_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{MOLTBOT_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - result = response.json() if response.status_code == 200 else None - if result and result.get("ok"): - print(f"✅ Sent message to session {session_key}") - return True - else: - print(f"❌ Failed to send to session: {response.text}") - return False - except Exception as e: - print(f"❌ Failed to send to agent session: {e}") - return False - - -def get_task_session(task_id: int) -> Optional[str]: - """Get the active agent session key for a task.""" - with get_db() as conn: - row = conn.execute("SELECT agent_session_key FROM tasks WHERE id = ?", (task_id,)).fetchone() - return row["agent_session_key"] if row and row["agent_session_key"] else None - - -async def spawn_followup_session(task_id: int, task_title: str, agent_name: str, previous_context: str, new_message: str): - """Spawn a follow-up session for an agent with conversation context.""" - if not MOLTBOT_ENABLED: - return None - - agent_id = AGENT_TO_MOLTBOT_ID.get(agent_name) - if not agent_id: - return None - # Main agent can spawn follow-up sessions too - - system_prompt = AGENT_SYSTEM_PROMPTS.get(agent_id, "") - - followup_prompt = f"""# Follow-up on Task #{task_id}: {task_title} - -You previously worked on this task and moved it to Review. User has a follow-up question. - -## Previous Conversation: -{previous_context if previous_context else "(No previous messages)"} - -## User's New Message: -{new_message} - -## Your Role: -{system_prompt} - -## Instructions: -1. Read the context and User's question -2. Respond helpfully by posting a comment: POST http://localhost:8080/api/tasks/{task_id}/comments -3. Keep your response focused on what User asked - -Respond now. -""" - - try: - async with httpx.AsyncClient(timeout=60.0) as client: - payload = { - "tool": "sessions_spawn", - "args": { - "agentId": agent_id, - "task": followup_prompt, - "label": f"task-{task_id}-followup", - "cleanup": "keep" - } - } - headers = { - "Authorization": f"Bearer {MOLTBOT_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{MOLTBOT_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - result = response.json() if response.status_code == 200 else None - if result and result.get("ok"): - spawn_info = result.get("result", {}) - session_key = spawn_info.get("childSessionKey", None) - if session_key: - set_task_session(task_id, session_key) - print(f"✅ Spawned follow-up session for {agent_name} on task #{task_id}") - return result - else: - print(f"❌ Failed to spawn follow-up: {response.text}") - return None - except Exception as e: - print(f"❌ Failed to spawn follow-up session: {e}") - return None - - -def set_task_session(task_id: int, session_key: Optional[str]): - """Set or clear the agent session key for a task.""" - with get_db() as conn: - conn.execute( - "UPDATE tasks SET agent_session_key = ?, updated_at = ? WHERE id = ?", - (session_key, datetime.now().isoformat(), task_id) - ) - conn.commit() - - -async def spawn_mentioned_agent(task_id: int, task_title: str, task_description: str, - mentioned_agent: str, mentioner: str, comment_content: str, - previous_context: str = ""): - """Spawn a session for an @mentioned agent to contribute to a task they don't own. - - For the main agent (Jarvis), sends to main session instead of spawning. - """ - if not MOLTBOT_ENABLED: - return None - - agent_id = AGENT_TO_CLAWDBOT_ID.get(mentioned_agent) - if not agent_id: - return None - - # All agents (including main) now spawn subagent sessions - system_prompt = AGENT_SYSTEM_PROMPTS.get(agent_id, "") - - mention_prompt = f"""# You've Been Tagged: Task #{task_id} - -**{mentioner}** mentioned you on a task and needs your input. - -## Task: {task_title} -{task_description or '(No description)'} - -## What {mentioner} Said: -{comment_content} - -## Previous Conversation: -{previous_context if previous_context else "(No prior comments)"} - -## Your Role: -{system_prompt} - -## Instructions: -1. Call start-work API: POST http://localhost:8080/api/tasks/{task_id}/start-work?agent={mentioned_agent} -2. Review the task from YOUR perspective ({mentioned_agent}) -3. Post your findings/response as a comment: POST http://localhost:8080/api/tasks/{task_id}/comments -4. Call stop-work API: POST http://localhost:8080/api/tasks/{task_id}/stop-work - -**Note:** You are NOT the assigned owner of this task. You're providing your expertise because you were tagged. -Do NOT move the task to a different status — that's the owner's job. - -{AGENT_GUARDRAILS} - -Respond now with your assessment. -""" - - try: - async with httpx.AsyncClient(timeout=60.0) as client: - payload = { - "tool": "sessions_spawn", - "args": { - "agentId": agent_id, - "task": mention_prompt, - "label": f"task-{task_id}-mention-{agent_id}", - "cleanup": "delete" # Cleanup after since they're just dropping in - } - } - headers = { - "Authorization": f"Bearer {MOLTBOT_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{MOLTBOT_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - result = response.json() if response.status_code == 200 else None - if result and result.get("ok"): - spawn_info = result.get("result", {}) - session_key = spawn_info.get("childSessionKey", "unknown") - - # Post system comment about the spawn - async with httpx.AsyncClient(timeout=5.0) as comment_client: - await comment_client.post( - f"http://localhost:8080/api/tasks/{task_id}/comments", - json={ - "agent": "System", - "content": f"📢 **{mentioned_agent}** was tagged by {mentioner} and is now reviewing this task." - } - ) - - print(f"✅ Spawned {mentioned_agent} for mention on task #{task_id}") - return result - else: - print(f"❌ Failed to spawn {mentioned_agent} for mention: {response.text}") - return None - except Exception as e: - print(f"❌ Failed to spawn mentioned agent: {e}") - return None - -# Guardrails to inject into every sub-agent task -AGENT_GUARDRAILS = f""" -⚠️ MANDATORY CONSTRAINTS (Approved by User via Task Board assignment): - -FILESYSTEM BOUNDARIES: -- ONLY access: {ALLOWED_PATHS} -- Everything else is FORBIDDEN without explicit authorization - -FORBIDDEN ACTIONS (do not attempt without approval): -- Browser tool (except UX Manager on localhost only) -- git commit (requires safeword from User) -- Any action outside the authorized paths - -WEB_FETCH (requires approval): -- You have web_fetch available but MUST ask User first -- Create an action item (type: question) explaining what URL you need and why -- Wait for User to resolve the action item before fetching -- Only fetch after explicit approval - -COMPLIANCE CONTEXT: -- {COMPANY_NAME}, {COMPANY_CONTEXT} -- {COMPLIANCE_FRAMEWORKS} -- Security over convenience — always - -COMMUNICATION & ESCALATION: -- Post comments on the task card to communicate -- Create action items for questions that need answers (type: question) -- Create action items for blockers (type: blocker) - -ESCALATION CHAIN: -1. {MAIN_AGENT_NAME} (coordinator) monitors your action items and may answer if confident -2. If {MAIN_AGENT_NAME} answers, the item gets resolved and you can proceed -3. If {MAIN_AGENT_NAME} is unsure, they leave it for {HUMAN_SUPERVISOR_LABEL} to review -4. {HUMAN_SUPERVISOR_LABEL} has final authority on all decisions - -TASK BOARD INTEGRATION: -- Use start-work API when beginning: POST http://localhost:8080/api/tasks/{{task_id}}/start-work?agent={{your_name}} -- Post updates as comments: POST http://localhost:8080/api/tasks/{{task_id}}/comments (json: {{"agent": "your_name", "content": "message"}}) -- Create action items for questions: POST http://localhost:8080/api/tasks/{{task_id}}/action-items (json: {{"agent": "your_name", "content": "question", "item_type": "question"}}) -- Move to Review when done: POST http://localhost:8080/api/tasks/{{task_id}}/move?status=Review&agent={{your_name}}&reason=... -- Use stop-work API when finished: POST http://localhost:8080/api/tasks/{{task_id}}/stop-work - -REPORT FORMAT: -When complete, post a comment with your findings using this format: -## [Your Role] Report -**Task:** [task title] -**Verdict:** ✅ APPROVED / ⚠️ CONCERNS / 🛑 BLOCKED -### Findings -- [SEVERITY] Issue description -### Summary -[1-2 sentence assessment] -""" - -AGENT_SYSTEM_PROMPTS = { - "main": f"""You are {MAIN_AGENT_NAME}, the primary coordinator for {COMPANY_NAME}. - -Your focus: -- General task implementation and coordination -- Code writing and debugging -- Cross-cutting concerns that don't fit specialist roles -- Synthesizing input from other agents -- Direct implementation work - -Project: {PROJECT_NAME} -You're the hands-on executor. When assigned a task, dig in and get it done.""", - - "architect": f"""You are the Architect for {COMPANY_NAME}. - -Your focus: -- System design and architectural patterns -- Scalability and performance implications -- Technical trade-offs and recommendations -- Integration architecture -- Database design and data modeling - -Project: {PROJECT_NAME} -Be concise. Flag concerns with severity (CRITICAL/HIGH/MEDIUM/LOW).""", - - "security-auditor": f"""You are the Security Auditor for {COMPANY_NAME}. - -Your focus: -- SOC2 Trust Services Criteria (Security, Availability, Confidentiality, Privacy) -- HIPAA compliance (PHI handling, access controls, audit logging) -- CIS Controls benchmarks -- OWASP Top 10 vulnerabilities -- Secure credential storage and handling -- Tenant data isolation (multi-tenant SaaS) - -NON-NEGOTIABLE: Security over convenience. Always. -Rate findings: CRITICAL (blocks deploy) / HIGH / MEDIUM / LOW""", - - "code-reviewer": f"""You are the Code Reviewer for {COMPANY_NAME}. - -Your focus: -- Code quality and best practices -- DRY, SOLID principles -- Error handling and edge cases -- Performance considerations -- Code readability and maintainability -- Test coverage gaps - -Project: {PROJECT_NAME} -Format: MUST FIX / SHOULD FIX / CONSIDER / NICE TO HAVE""", - - "ux-manager": f"""You are the UX Manager for {COMPANY_NAME}. - -Your focus: -- User flow clarity and efficiency -- Error message helpfulness -- Form design and validation feedback -- UI consistency across the platform -- Accessibility basics -- Onboarding experience - -Project: {PROJECT_NAME} - -BROWSER ACCESS (localhost only): -You have browser access to review the app UI. Use it to: -- Take snapshots of pages to analyze layout, spacing, colors -- Check user flows and navigation -- Verify form designs and error states -- Assess overall visual consistency - -ALLOWED URLs (localhost only): -- http://localhost:* (any port) -- http://127.0.0.1:* - -DO NOT navigate to any external URLs. Your browser access is strictly for reviewing the local app.""" -} - -async def spawn_agent_session(task_id: int, task_title: str, task_description: str, agent_name: str): - """Spawn a MOLTBOT sub-agent session for a task via tools/invoke API.""" - if not MOLTBOT_ENABLED: - return None - - agent_id = AGENT_TO_MOLTBOT_ID.get(agent_name) - if not agent_id: - return None # Don't spawn for unknown agents - # Note: Main agent (Jarvis) CAN spawn subagents now - no special case - - # Build the task prompt with guardrails - system_prompt = AGENT_SYSTEM_PROMPTS.get(agent_id, "") - task_prompt = f"""# Task Assignment from RIZQ Task Board (Approved by {HUMAN_SUPERVISOR_LABEL}) - -**Task #{task_id}:** {task_title} - -**Description:** -{task_description or 'No description provided.'} - -{AGENT_GUARDRAILS} - -## Your Role -{system_prompt} - ---- - -## Instructions -1. Call start-work API: POST http://localhost:8080/api/tasks/{task_id}/start-work?agent={agent_name} -2. Analyze the task thoroughly -3. Post your findings as a comment on the task -4. Move to Review when complete: POST http://localhost:8080/api/tasks/{task_id}/move?status=Review&agent={agent_name}&reason= -5. Call stop-work API: POST http://localhost:8080/api/tasks/{task_id}/stop-work - -## IMPORTANT: Stay Available -After posting your findings, **remain available for follow-up questions**. User may reply with questions or requests for clarification. When you receive a message starting with "💬 **User replied**", respond thoughtfully and post your response as a comment on the task. - -Your session will automatically end when User marks the task as Done. - -Begin now. -""" - - try: - async with httpx.AsyncClient(timeout=60.0) as client: - # Use MOLTBOT's tools/invoke API to spawn sub-agent directly - payload = { - "tool": "sessions_spawn", - "args": { - "agentId": agent_id, - "task": task_prompt, - "label": f"task-{task_id}", - "cleanup": "keep" - } - } - headers = { - "Authorization": f"Bearer {MOLTBOT_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{MOLTBOT_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - result = response.json() if response.status_code == 200 else None - if result and result.get("ok"): - print(f"✅ Spawned {agent_name} ({agent_id}) for task #{task_id}") - # Add a comment to the task noting the agent was spawned - spawn_info = result.get("result", {}) - run_id = spawn_info.get("runId", "unknown") - session_key = spawn_info.get("childSessionKey", None) - - # Save session key to database for follow-up messages - if session_key: - set_task_session(task_id, session_key) - - async with httpx.AsyncClient(timeout=5.0) as comment_client: - await comment_client.post( - f"http://localhost:8080/api/tasks/{task_id}/comments", - json={ - "agent": "System", - "content": f"🤖 **{agent_name}** agent spawned automatically.\n\nSession: `{session_key or 'unknown'}`\nRun ID: `{run_id}`\n\n💬 *Reply to this task and the agent will respond.*" - } - ) - return result - else: - print(f"❌ Failed to spawn {agent_name}: {response.text}") - return None - except Exception as e: - print(f"❌ Failed to spawn agent session: {e}") - return None - -# ============================================================================= -# WEBSOCKET MANAGER -# ============================================================================= - -class ConnectionManager: - """Manage WebSocket connections for live updates.""" - - def __init__(self): - self.active_connections: Set[WebSocket] = set() - - async def connect(self, websocket: WebSocket): - await websocket.accept() - self.active_connections.add(websocket) - - def disconnect(self, websocket: WebSocket): - self.active_connections.discard(websocket) - - async def broadcast(self, message: dict): - """Send update to all connected clients.""" - dead = set() - for connection in self.active_connections: - try: - await connection.send_json(message) - except: - dead.add(connection) - self.active_connections -= dead - -manager = ConnectionManager() - -# ============================================================================= -# DATABASE -# ============================================================================= - -def init_db(): - """Initialize the database.""" - with get_db() as conn: - conn.execute(""" - CREATE TABLE IF NOT EXISTS tasks ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - title TEXT NOT NULL, - description TEXT DEFAULT '', - status TEXT DEFAULT 'Backlog', - priority TEXT DEFAULT 'Medium', - agent TEXT DEFAULT 'Unassigned', - due_date TEXT, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - board TEXT DEFAULT 'tasks' - ) - """) - conn.execute(""" - CREATE TABLE IF NOT EXISTS activity_log ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - task_id INTEGER, - action TEXT NOT NULL, - agent TEXT, - details TEXT, - timestamp TEXT NOT NULL - ) - """) - conn.execute(""" - CREATE TABLE IF NOT EXISTS comments ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - task_id INTEGER NOT NULL, - agent TEXT NOT NULL, - content TEXT NOT NULL, - created_at TEXT NOT NULL - ) - """) - conn.execute(""" - CREATE TABLE IF NOT EXISTS action_items ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - task_id INTEGER NOT NULL, - comment_id INTEGER, - agent TEXT NOT NULL, - content TEXT NOT NULL, - item_type TEXT DEFAULT 'question', - resolved INTEGER DEFAULT 0, - created_at TEXT NOT NULL, - resolved_at TEXT - ) - """) - # Add working_agent column if it doesn't exist - try: - conn.execute("ALTER TABLE tasks ADD COLUMN working_agent TEXT DEFAULT NULL") - except: - pass # Column already exists - # Add agent_session_key column for persistent agent sessions - try: - conn.execute("ALTER TABLE tasks ADD COLUMN agent_session_key TEXT DEFAULT NULL") - except: - pass # Column already exists - - # Add archived column to action_items - try: - conn.execute("ALTER TABLE action_items ADD COLUMN archived INTEGER DEFAULT 0") - except: - pass # Column already exists - - # Chat messages table for persistent command bar history - conn.execute(""" - CREATE TABLE IF NOT EXISTS chat_messages ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - session_key TEXT DEFAULT 'main', - role TEXT NOT NULL, - content TEXT NOT NULL, - attachments TEXT, - created_at TEXT NOT NULL - ) - """) - # Add session_key column if upgrading from older schema - try: - conn.execute("ALTER TABLE chat_messages ADD COLUMN session_key TEXT DEFAULT 'main'") - except: - pass # Column already exists - - # Deleted sessions table - to filter out from dropdown - conn.execute(""" - CREATE TABLE IF NOT EXISTS deleted_sessions ( - session_key TEXT PRIMARY KEY, - deleted_at TEXT NOT NULL - ) - """) - conn.commit() - -@contextmanager -def get_db(): - """Database connection context manager.""" - conn = sqlite3.connect(DB_PATH) - conn.row_factory = sqlite3.Row - try: - yield conn - finally: - conn.close() - -def log_activity(task_id: int, action: str, agent: str = None, details: str = None): - """Log an activity.""" - with get_db() as conn: - conn.execute( - "INSERT INTO activity_log (task_id, action, agent, details, timestamp) VALUES (?, ?, ?, ?, ?)", - (task_id, action, agent, details, datetime.now().isoformat()) - ) - conn.commit() - -# ============================================================================= -# MODELS -# ============================================================================= - -class TaskCreate(BaseModel): - title: str - description: str = "" - status: str = "Backlog" - priority: str = "Medium" - agent: str = "Unassigned" - due_date: Optional[str] = None - board: str = "tasks" - source_file: Optional[str] = None - source_ref: Optional[str] = None - -class TaskUpdate(BaseModel): - title: Optional[str] = None - description: Optional[str] = None - status: Optional[str] = None - priority: Optional[str] = None - agent: Optional[str] = None - due_date: Optional[str] = None - source_file: Optional[str] = None - source_ref: Optional[str] = None - -class Task(BaseModel): - id: int - title: str - description: str - status: str - priority: str - agent: str - due_date: Optional[str] - created_at: str - updated_at: str - board: str - source_file: Optional[str] = None - source_ref: Optional[str] = None - working_agent: Optional[str] = None - -# ============================================================================= -# APP -# ============================================================================= - -app = FastAPI(title="RIZQ Task Board", version="1.2.0") - -# Restrict CORS to localhost origins only -ALLOWED_ORIGINS = [ - "http://localhost:8080", - "http://127.0.0.1:8080", - "http://localhost:3000", - "http://127.0.0.1:3000", -] - -app.add_middleware( - CORSMiddleware, - allow_origins=ALLOWED_ORIGINS, - allow_credentials=True, - allow_methods=["GET", "POST", "PATCH", "DELETE"], - allow_headers=["Authorization", "X-API-Key", "Content-Type"], -) - -# Initialize DB on startup -@app.on_event("startup") -def startup(): - init_db() - -# Serve static files -STATIC_PATH.mkdir(exist_ok=True) -app.mount("/static", StaticFiles(directory=STATIC_PATH), name="static") - -@app.get("/") -def read_root(): - """Serve the Kanban UI.""" - return FileResponse(STATIC_PATH / "index.html") - -# ============================================================================= -# WEBSOCKET ENDPOINT -# ============================================================================= - -@app.websocket("/ws") -async def websocket_endpoint(websocket: WebSocket): - """WebSocket for live updates.""" - await manager.connect(websocket) - try: - while True: - # Keep connection alive, wait for messages (ping/pong) - data = await websocket.receive_text() - # Echo back for ping - if data == "ping": - await websocket.send_text("pong") - except WebSocketDisconnect: - manager.disconnect(websocket) - -# ============================================================================= -# CONFIG ENDPOINTS -# ============================================================================= - -@app.get("/api/config") -def get_config(): - """Get board configuration including branding.""" - return { - "agents": AGENTS, - "statuses": STATUSES, - "priorities": PRIORITIES, - "branding": { - "mainAgentName": MAIN_AGENT_NAME, - "mainAgentEmoji": MAIN_AGENT_EMOJI, - "humanName": HUMAN_NAME, - "humanSupervisorLabel": HUMAN_SUPERVISOR_LABEL, - "boardTitle": BOARD_TITLE, - } - } - -# ============================================================================= -# TASK ENDPOINTS -# ============================================================================= - -@app.get("/api/tasks", response_model=List[Task]) -def list_tasks(board: str = "tasks", agent: str = None, status: str = None): - """List all tasks with optional filters.""" - with get_db() as conn: - query = "SELECT * FROM tasks WHERE board = ?" - params = [board] - - if agent: - query += " AND agent = ?" - params.append(agent) - if status: - query += " AND status = ?" - params.append(status) - - query += " ORDER BY CASE priority WHEN 'Critical' THEN 1 WHEN 'High' THEN 2 WHEN 'Medium' THEN 3 ELSE 4 END, created_at DESC" - - rows = conn.execute(query, params).fetchall() - return [dict(row) for row in rows] - -@app.get("/api/tasks/{task_id}", response_model=Task) -def get_task(task_id: int): - """Get a single task.""" - with get_db() as conn: - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - return dict(row) - -@app.post("/api/tasks", response_model=Task) -async def create_task(task: TaskCreate): - """Create a new task.""" - now = datetime.now().isoformat() - with get_db() as conn: - cursor = conn.execute( - """INSERT INTO tasks (title, description, status, priority, agent, due_date, created_at, updated_at, board, source_file, source_ref) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", - (task.title, task.description, task.status, task.priority, task.agent, task.due_date, now, now, task.board, task.source_file, task.source_ref) - ) - conn.commit() - task_id = cursor.lastrowid - log_activity(task_id, "created", task.agent, f"Created: {task.title}") - - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - result = dict(row) - - # Broadcast to all clients - await manager.broadcast({"type": "task_created", "task": result}) - return result - -@app.patch("/api/tasks/{task_id}", response_model=Task) -async def update_task(task_id: int, updates: TaskUpdate): - """Update a task.""" - with get_db() as conn: - # Get current task - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - current = dict(row) - changes = [] - - # Build update - update_fields = [] - params = [] - - for field in ["title", "description", "status", "priority", "agent", "due_date", "source_file", "source_ref"]: - new_value = getattr(updates, field) - if new_value is not None and new_value != current[field]: - update_fields.append(f"{field} = ?") - params.append(new_value) - changes.append(f"{field}: {current[field]} → {new_value}") - - if update_fields: - update_fields.append("updated_at = ?") - params.append(datetime.now().isoformat()) - params.append(task_id) - - conn.execute(f"UPDATE tasks SET {', '.join(update_fields)} WHERE id = ?", params) - conn.commit() - - log_activity(task_id, "updated", updates.agent or current["agent"], "; ".join(changes)) - - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - result = dict(row) - - # Broadcast to all clients - await manager.broadcast({"type": "task_updated", "task": result}) - return result - -@app.delete("/api/tasks/{task_id}") -async def delete_task(task_id: int): - """Delete a task.""" - with get_db() as conn: - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - conn.execute("DELETE FROM tasks WHERE id = ?", (task_id,)) - conn.commit() - log_activity(task_id, "deleted", None, f"Deleted: {row['title']}") - - # Broadcast to all clients - await manager.broadcast({"type": "task_deleted", "task_id": task_id}) - return {"status": "deleted", "id": task_id} - -# ============================================================================= -# AGENT ENDPOINTS -# ============================================================================= - -@app.get("/api/agents/{agent}/tasks") -def get_agent_tasks(agent: str): - """Get all tasks assigned to an agent.""" - with get_db() as conn: - rows = conn.execute( - "SELECT * FROM tasks WHERE agent = ? AND status NOT IN ('Done', 'Blocked') ORDER BY priority, created_at", - (agent,) - ).fetchall() - return [dict(row) for row in rows] - -# ============================================================================= -# WORK STATUS (AI Activity Indicator) -# ============================================================================= - -@app.post("/api/tasks/{task_id}/start-work") -async def start_work(task_id: int, agent: str): - """Mark that an agent is actively working on a task.""" - with get_db() as conn: - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - conn.execute( - "UPDATE tasks SET working_agent = ?, updated_at = ? WHERE id = ?", - (agent, datetime.now().isoformat(), task_id) - ) - conn.commit() - - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - result = dict(row) - - await manager.broadcast({"type": "work_started", "task_id": task_id, "agent": agent}) - return {"status": "working", "task_id": task_id, "agent": agent} - -@app.post("/api/tasks/{task_id}/stop-work") -async def stop_work(task_id: int, agent: str = None): - """Mark that an agent has stopped working on a task.""" - with get_db() as conn: - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - conn.execute( - "UPDATE tasks SET working_agent = NULL, updated_at = ? WHERE id = ?", - (datetime.now().isoformat(), task_id) - ) - conn.commit() - - await manager.broadcast({"type": "work_stopped", "task_id": task_id}) - return {"status": "stopped", "task_id": task_id} - -class MoveRequest(BaseModel): - status: str - agent: str = None - reason: str = None # Required for Review/Blocked transitions - -@app.post("/api/tasks/{task_id}/move") -async def move_task(task_id: int, status: str = None, agent: str = None, reason: str = None): - """Quick move task to a new status with workflow rules.""" - now = datetime.now().isoformat() - - with get_db() as conn: - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - task = dict(row) - old_status = task["status"] - - # RULE: Only User (human) can move to Done - if status == "Done" and agent != "User": - raise HTTPException(status_code=403, detail="Only User can move tasks to Done") - - # Update status - conn.execute( - "UPDATE tasks SET status = ?, updated_at = ? WHERE id = ?", - (status, now, task_id) - ) - conn.commit() - log_activity(task_id, "moved", agent, f"Moved to {status}") - - # AUTO-CREATE ACTION ITEMS based on transition - action_item = None - - # Moving to Review → create completion action item - if status == "Review" and old_status != "Review": - content = reason or f"Ready for review: {task['title']}" - cursor = conn.execute( - "INSERT INTO action_items (task_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?)", - (task_id, agent or task["agent"], content, "completion", now) - ) - conn.commit() - action_item = { - "id": cursor.lastrowid, "task_id": task_id, "agent": agent or task["agent"], - "content": content, "item_type": "completion", "resolved": 0, "created_at": now - } - - # Moving to Blocked → create blocker action item - if status == "Blocked" and old_status != "Blocked": - content = reason or f"Blocked: {task['title']} - reason not specified" - cursor = conn.execute( - "INSERT INTO action_items (task_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?)", - (task_id, agent or task["agent"], content, "blocker", now) - ) - conn.commit() - action_item = { - "id": cursor.lastrowid, "task_id": task_id, "agent": agent or task["agent"], - "content": content, "item_type": "blocker", "resolved": 0, "created_at": now - } - - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - result = dict(row) - - # Broadcast updates - await manager.broadcast({"type": "task_updated", "task": result}) - if action_item: - await manager.broadcast({"type": "action_item_added", "task_id": task_id, "item": action_item}) - - # AUTO-SPAWN: When moving to In Progress, spawn the assigned agent's session - spawned = False - if status == "In Progress" and old_status != "In Progress": - assigned_agent = result.get("agent", "Unassigned") - if assigned_agent in AGENT_TO_MOLTBOT_ID and assigned_agent != "User": - await spawn_agent_session( - task_id=task_id, - task_title=result["title"], - task_description=result.get("description", ""), - agent_name=assigned_agent - ) - spawned = True - - # CLEANUP: When moving to Done, clear the agent session AND working indicator - session_cleared = False - if status == "Done": - # Always clear working_agent when task is Done - with get_db() as conn: - conn.execute( - "UPDATE tasks SET working_agent = NULL WHERE id = ?", - (task_id,) - ) - conn.commit() - await manager.broadcast({"type": "work_stopped", "task_id": task_id}) - - session_key = get_task_session(task_id) - if session_key: - # Notify the agent that the task is complete - await send_to_agent_session(session_key, - f"✅ **Task #{task_id} marked as Done by User.**\n\nYour work is complete. This session will now end. Thank you!") - # Clear the session from the database - set_task_session(task_id, None) - session_cleared = True - print(f"🧹 Cleared agent session for task #{task_id}") - - return {"status": "moved", "new_status": status, "action_item_created": action_item is not None, "agent_spawned": spawned, "session_cleared": session_cleared} - -# ============================================================================= -# COMMENTS -# ============================================================================= - -class CommentCreate(BaseModel): - agent: str - content: str - - @field_validator('content') - @classmethod - def validate_content_size(cls, v): - # Limit content to 10MB (base64 images can be large) - if len(v) > MAX_ATTACHMENT_SIZE_BYTES: - raise ValueError(f'Content exceeds maximum size of {MAX_ATTACHMENT_SIZE_MB}MB') - return v - - @field_validator('agent') - @classmethod - def validate_agent(cls, v): - if len(v) > 100: - raise ValueError('Agent name too long') - return v - -@app.get("/api/tasks/{task_id}/comments") -def get_comments(task_id: int): - """Get comments for a task.""" - with get_db() as conn: - rows = conn.execute( - "SELECT * FROM comments WHERE task_id = ? ORDER BY created_at ASC", - (task_id,) - ).fetchall() - return [dict(row) for row in rows] - -@app.post("/api/tasks/{task_id}/comments") -async def add_comment(task_id: int, comment: CommentCreate): - """Add a comment to a task.""" - now = datetime.now().isoformat() - task_title = "" - task_status = "" - agent_session = None - - with get_db() as conn: - # Verify task exists - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - task_title = row["title"] - task_status = row["status"] - agent_session = row["agent_session_key"] if "agent_session_key" in row.keys() else None - - cursor = conn.execute( - "INSERT INTO comments (task_id, agent, content, created_at) VALUES (?, ?, ?, ?)", - (task_id, comment.agent, comment.content, now) - ) - conn.commit() - - result = { - "id": cursor.lastrowid, - "task_id": task_id, - "agent": comment.agent, - "content": comment.content, - "created_at": now - } - - # Broadcast to all clients - await manager.broadcast({"type": "comment_added", "task_id": task_id, "comment": result}) - - # Check for @mentions in the comment and spawn mentioned agents - mentions = MENTION_PATTERN.findall(comment.content) - if mentions: - # Get task description and previous context for the spawned agent - task_description = "" - previous_context = "" - with get_db() as conn: - task_row = conn.execute("SELECT description FROM tasks WHERE id = ?", (task_id,)).fetchone() - task_description = task_row["description"] if task_row else "" - - # Get last few comments for context (excluding the one that just triggered this) - comment_rows = conn.execute( - "SELECT agent, content FROM comments WHERE task_id = ? AND id != ? ORDER BY created_at DESC LIMIT 5", - (task_id, result["id"]) - ).fetchall() - if comment_rows: - previous_context = "\n".join([f"**{r['agent']}:** {r['content'][:500]}" for r in reversed(comment_rows)]) - - for mentioned_agent in set(mentions): # dedupe mentions - # Normalize case to match AGENT_TO_CLAWDBOT_ID keys - matched_agent = None - for agent_name in AGENT_TO_CLAWDBOT_ID.keys(): - if agent_name.lower() == mentioned_agent.lower(): - matched_agent = agent_name - break - - if matched_agent and matched_agent != comment.agent: # Don't spawn self - agent_id = AGENT_TO_CLAWDBOT_ID.get(matched_agent) - if agent_id: # All agents including main can be spawned now - # Spawn the mentioned agent to respond - await spawn_mentioned_agent( - task_id=task_id, - task_title=task_title, - task_description=task_description, - mentioned_agent=matched_agent, - mentioner=comment.agent, - comment_content=comment.content, - previous_context=previous_context - ) - print(f"📢 Spawned {matched_agent} for mention in task #{task_id}") - - # If this is from User and task is active, try to reach the agent - if comment.agent == "User" and task_status in ["In Progress", "Review"]: - # Get the assigned agent for this task - with get_db() as conn: - row = conn.execute("SELECT agent FROM tasks WHERE id = ?", (task_id,)).fetchone() - assigned_agent = row["agent"] if row else None - - if assigned_agent and assigned_agent in AGENT_TO_MOLTBOT_ID and assigned_agent != "User": - # Get previous conversation context (last few comments) - previous_comments = [] - with get_db() as conn: - rows = conn.execute( - "SELECT agent, content FROM comments WHERE task_id = ? ORDER BY created_at DESC LIMIT 5", - (task_id,) - ).fetchall() - previous_comments = [{"agent": r["agent"], "content": r["content"][:500]} for r in reversed(rows)] - - context = "\n".join([f"**{c['agent']}:** {c['content']}" for c in previous_comments[:-1]]) # Exclude current comment - - # Try to send to existing session first - sent = False - if agent_session: - message = f"""💬 **User replied on Task #{task_id}:** - -{comment.content} - ---- -Respond by posting a comment to the task.""" - sent = await send_to_agent_session(agent_session, message) - - if not sent: - # Session ended - spawn a new one with context - print(f"🔄 Session ended, spawning follow-up for task #{task_id}") - await spawn_followup_session( - task_id=task_id, - task_title=task_title, - agent_name=assigned_agent, - previous_context=context, - new_message=comment.content - ) - elif comment.agent not in ["System", "User"] + list(AGENT_TO_MOLTBOT_ID.keys()): - # Notify MOLTBOT for other comments - await notify_MOLTBOT(task_id, task_title, comment.agent, comment.content) - - return result - -# ============================================================================= -# ACTION ITEMS (Questions, Notifications, Blockers) -# ============================================================================= - -class ActionItemCreate(BaseModel): - agent: str - content: str - item_type: str = "question" # question, completion, blocker - comment_id: Optional[int] = None - -@app.get("/api/tasks/{task_id}/action-items") -def get_action_items(task_id: int, resolved: bool = False, archived: bool = False): - """Get action items for a task. By default excludes archived items.""" - with get_db() as conn: - if archived: - # Only return archived items - rows = conn.execute( - "SELECT * FROM action_items WHERE task_id = ? AND archived = 1 ORDER BY created_at ASC", - (task_id,) - ).fetchall() - else: - # Return non-archived items filtered by resolved status - rows = conn.execute( - "SELECT * FROM action_items WHERE task_id = ? AND resolved = ? AND archived = 0 ORDER BY created_at ASC", - (task_id, 1 if resolved else 0) - ).fetchall() - return [dict(row) for row in rows] - -@app.post("/api/tasks/{task_id}/action-items") -async def add_action_item(task_id: int, item: ActionItemCreate): - """Add an action item to a task.""" - now = datetime.now().isoformat() - with get_db() as conn: - # Verify task exists - row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Task not found") - - cursor = conn.execute( - "INSERT INTO action_items (task_id, comment_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?, ?)", - (task_id, item.comment_id, item.agent, item.content, item.item_type, now) - ) - conn.commit() - - result = { - "id": cursor.lastrowid, - "task_id": task_id, - "comment_id": item.comment_id, - "agent": item.agent, - "content": item.content, - "item_type": item.item_type, - "resolved": 0, - "created_at": now, - "resolved_at": None - } - - # Broadcast to all clients - await manager.broadcast({"type": "action_item_added", "task_id": task_id, "item": result}) - - return result - -@app.post("/api/action-items/{item_id}/resolve") -async def resolve_action_item(item_id: int): - """Resolve an action item.""" - now = datetime.now().isoformat() - with get_db() as conn: - row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Action item not found") - - conn.execute( - "UPDATE action_items SET resolved = 1, resolved_at = ? WHERE id = ?", - (now, item_id) - ) - conn.commit() - - task_id = row["task_id"] - - # Broadcast to all clients - await manager.broadcast({"type": "action_item_resolved", "task_id": task_id, "item_id": item_id}) - - return {"success": True, "item_id": item_id} - - -@app.post("/api/action-items/{item_id}/archive") -async def archive_action_item(item_id: int): - """Archive a resolved action item to hide it from main view.""" - now = datetime.now().isoformat() - with get_db() as conn: - row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Action item not found") - - conn.execute( - "UPDATE action_items SET archived = 1 WHERE id = ?", - (item_id,) - ) - conn.commit() - - task_id = row["task_id"] - - # Broadcast to all clients - await manager.broadcast({"type": "action_item_archived", "task_id": task_id, "item_id": item_id}) - - return {"success": True, "item_id": item_id} - - -@app.post("/api/action-items/{item_id}/unarchive") -async def unarchive_action_item(item_id: int): - """Unarchive an action item to show it in main view again.""" - with get_db() as conn: - row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Action item not found") - - conn.execute( - "UPDATE action_items SET archived = 0 WHERE id = ?", - (item_id,) - ) - conn.commit() - - task_id = row["task_id"] - - # Broadcast to all clients - await manager.broadcast({"type": "action_item_unarchived", "task_id": task_id, "item_id": item_id}) - - return {"success": True, "item_id": item_id} - -@app.delete("/api/action-items/{item_id}") -async def delete_action_item(item_id: int): - """Delete an action item.""" - with get_db() as conn: - row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() - if not row: - raise HTTPException(status_code=404, detail="Action item not found") - - task_id = row["task_id"] - conn.execute("DELETE FROM action_items WHERE id = ?", (item_id,)) - conn.commit() - - # Broadcast to all clients - await manager.broadcast({"type": "action_item_deleted", "task_id": task_id, "item_id": item_id}) - - return {"success": True, "item_id": item_id} - -# ============================================================================= -# ACTIVITY LOG -# ============================================================================= - -@app.get("/api/activity") -def get_activity(limit: int = 50): - """Get recent activity.""" - with get_db() as conn: - rows = conn.execute( - "SELECT * FROM activity_log ORDER BY timestamp DESC LIMIT ?", - (limit,) - ).fetchall() - return [dict(row) for row in rows] - - -# ============================================================================= -# JARVIS DIRECT CHAT (Command Bar Channel) -# ============================================================================= - -class JarvisMessage(BaseModel): - message: str - session: str = "main" # Which session to send to - attachments: Optional[List[dict]] = None # [{type: "image/png", data: "base64...", filename: "..."}] - - @field_validator('message') - @classmethod - def validate_message_size(cls, v): - if len(v) > MAX_ATTACHMENT_SIZE_BYTES: - raise ValueError(f'Message exceeds maximum size of {MAX_ATTACHMENT_SIZE_MB}MB') - return v - -# Chat history now persisted in SQLite (no more in-memory loss on refresh) - -# ============================================================================= -# CLAWDBOT SESSIONS API -# ============================================================================= - -@app.get("/api/sessions") -async def list_sessions(): - """Proxy to Clawdbot sessions_list to get active sessions.""" - if not MOLTBOT_ENABLED: - return {"sessions": [], "error": "Clawdbot integration not enabled"} - - try: - async with httpx.AsyncClient(timeout=10.0) as client: - payload = { - "tool": "sessions_list", - "args": { - "limit": 20, - "messageLimit": 0 - } - } - headers = { - "Authorization": f"Bearer {MOLTBOT_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{MOLTBOT_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - - if response.status_code == 200: - result = response.json() - if result.get("ok"): - # Response is in result.content[0].text as JSON string - inner_result = result.get("result", {}) - content = inner_result.get("content", []) - if content and len(content) > 0: - text_content = content[0].get("text", "{}") - sessions_data = json.loads(text_content) - else: - sessions_data = inner_result - sessions = sessions_data.get("sessions", []) - - # Format for frontend - formatted = [] - for s in sessions: - key = s.get("key", "") - session_label = s.get("label", "") # Label from Clawdbot - display = s.get("displayName", key) - - # Use Clawdbot's label - if key == "main" or key == "agent:main:main": - label = "🛡️ Jarvis (Main)" - elif session_label: - # Use Clawdbot's label if available - label = f"🤖 {session_label}" - elif "subagent" in key: - # Subagent without label - use short ID - short_id = key.split(":")[-1][:8] if ":" in key else key[:8] - label = f"🤖 Session {short_id}" - elif key.startswith("agent:"): - parts = key.split(":") - agent_name = parts[1] if len(parts) > 1 else key - label = f"🤖 {agent_name.title()}" - else: - label = display - - formatted.append({ - "key": key, - "label": label, - "channel": s.get("channel", ""), - "model": s.get("model", ""), - "updatedAt": s.get("updatedAt", 0) - }) - - # Filter out deleted sessions and cleanup stale entries - clawdbot_keys = set(s["key"] for s in formatted) - - with get_db() as conn: - deleted_rows = conn.execute("SELECT session_key FROM deleted_sessions").fetchall() - deleted_keys = set(row["session_key"] for row in deleted_rows) - - # Cleanup: remove deleted_sessions entries that are no longer in Clawdbot - # (Clawdbot has already removed them, so we don't need to track them anymore) - orphaned_keys = deleted_keys - clawdbot_keys - if orphaned_keys: - placeholders = ",".join("?" * len(orphaned_keys)) - conn.execute(f"DELETE FROM deleted_sessions WHERE session_key IN ({placeholders})", - list(orphaned_keys)) - conn.commit() - - formatted = [s for s in formatted if s["key"] not in deleted_keys] - - # Sort: main first, then by updatedAt - formatted.sort(key=lambda x: (0 if "main" in x["key"].lower() else 1, -x.get("updatedAt", 0))) - return {"sessions": formatted} - - return {"sessions": [], "error": f"Failed to fetch sessions: {response.status_code}"} - except Exception as e: - print(f"Error fetching sessions: {e}") - return {"sessions": [], "error": str(e)} - - -class SessionCreate(BaseModel): - label: str = None - agentId: str = "main" - task: str = "New session started from Task Board. Awaiting instructions." - - -@app.post("/api/sessions/create") -async def create_session(req: SessionCreate): - """Create a new Clawdbot session via sessions_spawn.""" - if not MOLTBOT_ENABLED: - return {"success": False, "error": "Clawdbot integration not enabled"} - - try: - async with httpx.AsyncClient(timeout=30.0) as client: - payload = { - "tool": "sessions_spawn", - "args": { - "agentId": req.agentId, - "task": req.task, - "label": req.label or f"taskboard-{datetime.now().strftime('%H%M%S')}", - "cleanup": "keep" - } - } - headers = { - "Authorization": f"Bearer {MOLTBOT_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{MOLTBOT_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - - if response.status_code == 200: - result = response.json() - if result.get("ok"): - return {"success": True, "result": result.get("result", {})} - - return {"success": False, "error": f"Failed: {response.status_code}"} - except Exception as e: - print(f"Error creating session: {e}") - return {"success": False, "error": str(e)} - - -@app.post("/api/sessions/{session_key}/stop") -async def stop_session(session_key: str): - """Stop/abort a running session.""" - if not MOLTBOT_ENABLED: - return {"success": False, "error": "Clawdbot integration not enabled"} - - try: - # Use the gateway's abort mechanism - async with httpx.AsyncClient(timeout=10.0) as client: - # Send an abort signal via sessions_send with a special abort message - payload = { - "tool": "sessions_send", - "args": { - "sessionKey": session_key, - "message": "SYSTEM: ABORT - User requested stop from Task Board" - } - } - headers = { - "Authorization": f"Bearer {MOLTBOT_TOKEN}", - "Content-Type": "application/json" - } - - # First try to send abort message - await client.post( - f"{MOLTBOT_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - - # Also try the direct abort endpoint if available - try: - abort_response = await client.post( - f"{MOLTBOT_GATEWAY_URL}/api/sessions/{session_key}/abort", - headers=headers - ) - if abort_response.status_code == 200: - return {"success": True, "message": f"Stopped session: {session_key}"} - except: - pass - - return {"success": True, "message": f"Stop signal sent to: {session_key}"} - except Exception as e: - print(f"Error stopping session: {e}") - return {"success": False, "error": str(e)} - - -@app.post("/api/sessions/stop-all") -async def stop_all_sessions(): - """Emergency stop all non-main sessions.""" - if not MOLTBOT_ENABLED: - return {"success": False, "error": "Clawdbot integration not enabled"} - - stopped = [] - errors = [] - - try: - # First get all sessions - async with httpx.AsyncClient(timeout=10.0) as client: - payload = { - "tool": "sessions_list", - "args": {"limit": 50, "messageLimit": 0} - } - headers = { - "Authorization": f"Bearer {MOLTBOT_TOKEN}", - "Content-Type": "application/json" - } - response = await client.post( - f"{MOLTBOT_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - - if response.status_code == 200: - result = response.json() - if result.get("ok"): - inner_result = result.get("result", {}) - content = inner_result.get("content", []) - if content and len(content) > 0: - text_content = content[0].get("text", "{}") - sessions_data = json.loads(text_content) - else: - sessions_data = inner_result - - sessions = sessions_data.get("sessions", []) - - # Stop each non-main session - for s in sessions: - key = s.get("key", "") - if key and "main" not in key.lower(): - try: - stop_result = await stop_session(key) - if stop_result.get("success"): - stopped.append(key) - else: - errors.append(key) - except: - errors.append(key) - - return { - "success": True, - "stopped": stopped, - "errors": errors, - "message": f"Stopped {len(stopped)} sessions" - } - except Exception as e: - print(f"Error stopping all sessions: {e}") - return {"success": False, "error": str(e)} - - -@app.delete("/api/sessions/{session_key}") -async def delete_session(session_key: str): - """Close/delete a session - removes from Clawdbot's session store.""" - if not MOLTBOT_ENABLED: - return {"success": False, "error": "Clawdbot integration not enabled"} - - # Send stop signal first - await stop_session(session_key) - - now = datetime.now().isoformat() - - # Clear taskboard's local chat history - with get_db() as conn: - conn.execute("DELETE FROM chat_messages WHERE session_key = ?", (session_key,)) - conn.execute( - "INSERT OR REPLACE INTO deleted_sessions (session_key, deleted_at) VALUES (?, ?)", - (session_key, now) - ) - conn.commit() - - # Delete from Clawdbot's session store - clawdbot_deleted = False - try: - # Parse session key to get agent id (format: agent::) - parts = session_key.split(":") - if len(parts) >= 2 and parts[0] == "agent": - agent_id = parts[1] # e.g., "main" - - # Path to Clawdbot session store (use env var if in Docker, fallback to home dir) - import os - clawdbot_home = os.environ.get("CLAWDBOT_DATA_PATH", os.path.expanduser("~/.clawdbot")) - sessions_file = os.path.join(clawdbot_home, "agents", agent_id, "sessions", "sessions.json") - - if os.path.exists(sessions_file): - import json - with open(sessions_file, 'r', encoding='utf-8') as f: - sessions_data = json.load(f) - - # Check if session exists and get its sessionId for transcript deletion - session_id = None - if session_key in sessions_data: - session_id = sessions_data[session_key].get("sessionId") - del sessions_data[session_key] - - # Write back - with open(sessions_file, 'w', encoding='utf-8') as f: - json.dump(sessions_data, f, indent=2) - - clawdbot_deleted = True - print(f"Deleted session {session_key} from Clawdbot store") - - # Also delete transcript file if it exists - if session_id: - transcript_file = os.path.join(clawdbot_home, "agents", agent_id, "sessions", f"{session_id}.jsonl") - if os.path.exists(transcript_file): - os.remove(transcript_file) - print(f"Deleted transcript {transcript_file}") - except Exception as e: - print(f"Warning: Could not delete from Clawdbot store: {e}") - - # Broadcast session deletion to all clients for real-time UI update - await manager.broadcast({ - "type": "session_deleted", - "session_key": session_key - }) - - return { - "success": True, - "message": f"Deleted session: {session_key}", - "clawdbot_deleted": clawdbot_deleted - } - - -@app.get("/api/jarvis/history") -def get_chat_history(limit: int = 100, session: str = "main"): - """Get command bar chat history from database, filtered by session.""" - with get_db() as conn: - rows = conn.execute( - "SELECT id, session_key, role, content, attachments, created_at FROM chat_messages WHERE session_key = ? ORDER BY id DESC LIMIT ?", - (session, limit) - ).fetchall() - # Return in chronological order - messages = [] - for row in reversed(rows): - msg = { - "id": row["id"], - "session_key": row["session_key"], - "role": row["role"], - "content": row["content"], - "timestamp": row["created_at"] - } - if row["attachments"]: - msg["attachments"] = json.loads(row["attachments"]) - messages.append(msg) - return {"history": messages, "session": session} - -@app.post("/api/jarvis/chat") -async def chat_with_jarvis(msg: JarvisMessage): - """Send a message to Jarvis via sessions_send (synchronous, waits for response).""" - if not MOLTBOT_ENABLED: - return {"sent": False, "error": "Clawdbot integration not enabled."} - - now = datetime.now().isoformat() - - # Build the message content with taskboard context - message_content = f"System: [TASKBOARD_CHAT] User says: {msg.message}\n\nRespond naturally." - - # Include attachment data in the message for the agent to process - if msg.attachments: - for att in msg.attachments: - att_type = att.get("type", "") - att_data = att.get("data", "") - att_filename = att.get("filename", "file") - - if att_type.startswith("image/") and att_data: - # Embed full base64 image data so agent can use image tool - message_content += f"\n\n[IMAGE:{att_data}]" - elif att_data: - # For text files, try to extract and embed the content - if att_data.startswith("data:") and ";base64," in att_data: - try: - import base64 - # Extract base64 part after the comma - b64_content = att_data.split(",", 1)[1] - decoded = base64.b64decode(b64_content).decode("utf-8", errors="replace") - message_content += f"\n\n**📎 Attached file: {att_filename}**\n```\n{decoded}\n```" - except Exception as e: - message_content += f"\n\n[Attached File: {att_filename} (decode error: {e})]" - else: - message_content += f"\n\n[Attached File: {att_filename}]" - - # Normalize session key - session_key = msg.session or "main" - - # Store user message in database - attachments_json = json.dumps(msg.attachments) if msg.attachments else None - with get_db() as conn: - cursor = conn.execute( - "INSERT INTO chat_messages (session_key, role, content, attachments, created_at) VALUES (?, ?, ?, ?, ?)", - (session_key, "user", msg.message, attachments_json, now) - ) - conn.commit() - user_msg_id = cursor.lastrowid - - user_msg = { - "id": user_msg_id, - "session_key": session_key, - "role": "user", - "content": msg.message, - "timestamp": now, - "attachments": msg.attachments - } - - # Broadcast user message to all clients (so other tabs see it) - await manager.broadcast({ - "type": "command_bar_message", - "message": user_msg - }) - - try: - # Use sessions_send via tools/invoke - this is synchronous and waits for response - async with httpx.AsyncClient(timeout=120.0) as client: - payload = { - "tool": "sessions_send", - "args": { - "message": message_content, - "sessionKey": session_key, # Use selected session - "timeoutSeconds": 90 - } - } - headers = { - "Authorization": f"Bearer {MOLTBOT_TOKEN}", - "Content-Type": "application/json" - } - - response = await client.post( - f"{MOLTBOT_GATEWAY_URL}/tools/invoke", - json=payload, - headers=headers - ) - - if response.status_code == 200: - result = response.json() - - # /tools/invoke returns { ok: true, result: { content, details: { reply, ... } } } - inner = result.get("result", {}) - - if isinstance(inner, dict): - # Response is in inner.details.reply - details = inner.get("details", {}) - assistant_reply = details.get("reply") or inner.get("reply") or inner.get("response") - else: - assistant_reply = str(inner) if inner else None - - # Ensure it's a string - if assistant_reply and not isinstance(assistant_reply, str): - import json as json_module - assistant_reply = json_module.dumps(assistant_reply) if isinstance(assistant_reply, (dict, list)) else str(assistant_reply) - - if assistant_reply: - # Store the response in database - with get_db() as conn: - cursor = conn.execute( - "INSERT INTO chat_messages (session_key, role, content, attachments, created_at) VALUES (?, ?, ?, ?, ?)", - (session_key, "assistant", assistant_reply, None, now) - ) - conn.commit() - assistant_msg_id = cursor.lastrowid - - jarvis_msg = { - "id": assistant_msg_id, - "session_key": session_key, - "role": "assistant", - "content": assistant_reply, - "timestamp": datetime.now().isoformat() - } - # Return response directly - frontend adds to history from HTTP response - return {"sent": True, "response": assistant_reply, "session": session_key} - - return {"sent": True, "response": "No response received"} - else: - error_text = response.text[:200] if response.text else f"HTTP {response.status_code}" - return {"sent": False, "error": error_text} - - except Exception as e: - print(f"Error sending to Jarvis: {e}") - return {"sent": False, "error": str(e)} - -class JarvisResponse(BaseModel): - response: str - session: str = "main" # Which session this response is for - - @field_validator('response') - @classmethod - def validate_response_size(cls, v): - if len(v) > 1024 * 1024: # 1MB limit for responses - raise ValueError('Response too large') - return v - -@app.post("/api/jarvis/respond") -async def jarvis_respond(msg: JarvisResponse, _: bool = Depends(verify_api_key)): - """Endpoint for Jarvis to push responses back to the command bar. Requires API key.""" - now = datetime.now().isoformat() - session_key = msg.session or "main" - - # Store Jarvis response in database - with get_db() as conn: - cursor = conn.execute( - "INSERT INTO chat_messages (session_key, role, content, attachments, created_at) VALUES (?, ?, ?, ?, ?)", - (session_key, "assistant", msg.response, None, now) - ) - conn.commit() - msg_id = cursor.lastrowid - - jarvis_msg = { - "id": msg_id, - "session_key": session_key, - "role": "assistant", - "content": msg.response, - "timestamp": now - } - - # Broadcast to all connected clients - await manager.broadcast({ - "type": "command_bar_message", - "message": jarvis_msg - }) - return {"delivered": True} - -# Legacy endpoint for backwards compatibility -@app.post("/api/molt/chat") -async def chat_with_molt_legacy(msg: JarvisMessage): - """Legacy endpoint - redirects to /api/jarvis/chat.""" - return await chat_with_jarvis(msg) - -@app.post("/api/molt/respond") -async def jarvis_respond_legacy(msg: JarvisResponse, _: bool = Depends(verify_api_key)): - """Legacy endpoint - redirects to /api/jarvis/respond.""" - return await jarvis_respond(msg, _) - - -if __name__ == "__main__": - import uvicorn - uvicorn.run(app, host="0.0.0.0", port=8080) diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/config.py b/app/config.py new file mode 100644 index 0000000..577fbee --- /dev/null +++ b/app/config.py @@ -0,0 +1,225 @@ +""" +Configuration: environment variables, constants, agent metadata. +""" + +import os +import re +from pathlib import Path +from typing import List, Optional + +# ============================================================================= +# PATHS +# ============================================================================= +DATA_DIR = Path(__file__).parent.parent / "data" +DATA_DIR.mkdir(exist_ok=True) +DB_PATH = DATA_DIR / "tasks.db" +STATIC_PATH = Path(__file__).parent.parent / "static" +ATTACHMENTS_PATH = DATA_DIR / "attachments" +ATTACHMENTS_PATH.mkdir(exist_ok=True) + +# ============================================================================= +# BRANDING +# ============================================================================= +MAIN_AGENT_NAME = os.getenv("MAIN_AGENT_NAME", "Jarvis") +MAIN_AGENT_EMOJI = os.getenv("MAIN_AGENT_EMOJI", "\U0001F6E1") +HUMAN_NAME = os.getenv("HUMAN_NAME", "User") +HUMAN_SUPERVISOR_LABEL = os.getenv("HUMAN_SUPERVISOR_LABEL", "User") +BOARD_TITLE = os.getenv("BOARD_TITLE", "Task Board") + +# ============================================================================= +# VALID STATUSES & PRIORITIES (used for validation) +# ============================================================================= +VALID_STATUSES = ["Backlog", "Todo", "In Progress", "Review", "Done", "Blocked"] +VALID_PRIORITIES = ["Critical", "High", "Medium", "Low"] + +# Legacy list used by /api/config (kept as-is from original) +STATUSES = ["Backlog", "Todo", "In Progress", "Review", "Done", "Blocked"] +PRIORITIES = ["Critical", "High", "Medium", "Low"] + +# ============================================================================= +# AGENT DEFAULTS +# ============================================================================= +AGENT_DEFAULTS = { + "main": {"icon": "\U0001f916", "color": "#6366f1", "description": "Main coordinator, handles command bar chat"}, + "architect": {"icon": "\U0001f3db\ufe0f", "color": "#8b5cf6", "description": "System design, patterns, scalability"}, + "security-auditor": {"icon": "\U0001f512", "color": "#ef4444", "description": "Compliance, vulnerability detection"}, + "code-reviewer": {"icon": "\U0001f4cb", "color": "#14b8a6", "description": "Code quality, best practices"}, + "ux-manager": {"icon": "\U0001f3a8", "color": "#ec4899", "description": "User experience, flows, accessibility"}, +} + +_AGENT_COLORS = ["#f59e0b", "#06b6d4", "#84cc16", "#a855f7", "#f43f5e", "#64748b"] + +# Mutable agent state (populated at startup) +AGENTS: List[str] = [] +AGENT_TO_OPENCLAW_ID: dict = {} +AGENT_META: dict = {} +MENTIONABLE_AGENTS: List[str] = [] +MENTION_PATTERN = None + + +def _rebuild_mention_pattern(): + global MENTIONABLE_AGENTS, MENTION_PATTERN + MENTIONABLE_AGENTS = list(AGENT_TO_OPENCLAW_ID.keys()) + if MENTIONABLE_AGENTS: + MENTION_PATTERN = re.compile(r'@(' + '|'.join(re.escape(a) for a in MENTIONABLE_AGENTS) + r')', re.IGNORECASE) + else: + MENTION_PATTERN = re.compile(r'(?!)') # never matches + + +def _build_agents_from_env(): + """Build agent lists from AGENTS env var. Format: 'agent_id:Display Name,agent_id2:Name2'""" + global AGENTS, AGENT_TO_OPENCLAW_ID, AGENT_META, MENTIONABLE_AGENTS, MENTION_PATTERN + AGENT_TO_OPENCLAW_ID = {} + AGENT_META = {} + color_idx = 0 + + for entry in AGENTS_ENV.split(","): + entry = entry.strip() + if not entry: + continue + if ":" in entry: + agent_id, agent_name = entry.split(":", 1) + agent_id = agent_id.strip() + agent_name = agent_name.strip() + else: + agent_id = entry.strip() + agent_name = agent_id.replace("-", " ").title() + + defaults = AGENT_DEFAULTS.get(agent_id, {}) + AGENT_TO_OPENCLAW_ID[agent_name] = agent_id + if agent_id == "main": + icon = MAIN_AGENT_EMOJI + else: + icon = defaults.get("icon", "🔧") + color = defaults.get("color", _AGENT_COLORS[color_idx % len(_AGENT_COLORS)]) + if agent_id not in AGENT_DEFAULTS: + color_idx += 1 + AGENT_META[agent_name] = {"id": agent_id, "icon": icon, "color": color, "description": defaults.get("description", "")} + + AGENTS = list(AGENT_TO_OPENCLAW_ID.keys()) + ["User", "Unassigned"] + AGENT_META["User"] = {"id": "user", "icon": "👤", "color": "#22c55e", "description": "Human supervisor"} + AGENT_META["Unassigned"] = {"id": "unassigned", "icon": "○", "color": "#64748b", "description": "Not yet assigned"} + _rebuild_mention_pattern() + print(f"⚙️ AGENTS (from ENV): {AGENTS}") + + +def _build_fallback_agents(): + """Build agent lists from hardcoded defaults (when OpenClaw is unreachable).""" + global AGENTS, AGENT_TO_OPENCLAW_ID, AGENT_META, MENTIONABLE_AGENTS, MENTION_PATTERN + AGENT_TO_OPENCLAW_ID = { + MAIN_AGENT_NAME: "main", + "Architect": "architect", + "Security Auditor": "security-auditor", + "Code Reviewer": "code-reviewer", + "UX Manager": "ux-manager", + } + AGENTS = list(AGENT_TO_OPENCLAW_ID.keys()) + ["User", "Unassigned"] + AGENT_META = {} + for name, agent_id in AGENT_TO_OPENCLAW_ID.items(): + defaults = AGENT_DEFAULTS.get(agent_id, {}) + if agent_id == "main": + AGENT_META[name] = {"id": agent_id, "icon": MAIN_AGENT_EMOJI, "color": defaults.get("color", "#6366f1"), "description": defaults.get("description", "")} + else: + AGENT_META[name] = {"id": agent_id, "icon": defaults.get("icon", "\u25cb"), "color": defaults.get("color", "#64748b"), "description": defaults.get("description", "")} + AGENT_META["User"] = {"id": "user", "icon": "\U0001f464", "color": "#22c55e", "description": "Human supervisor"} + AGENT_META["Unassigned"] = {"id": "unassigned", "icon": "\u25cb", "color": "#64748b", "description": "Not yet assigned"} + _rebuild_mention_pattern() + print(f"\u2699\ufe0f AGENTS (fallback): {AGENTS}") + + +def _populate_agents_from_openclaw(agents_data: list): + """Build agent lists from OpenClaw API response.""" + global AGENTS, AGENT_TO_OPENCLAW_ID, AGENT_META, MENTIONABLE_AGENTS, MENTION_PATTERN + AGENT_TO_OPENCLAW_ID = {} + AGENT_META = {} + color_idx = 0 + + for agent in agents_data: + agent_id = agent["id"] + agent_name = agent["name"] + defaults = AGENT_DEFAULTS.get(agent_id, {}) + + if agent_id == "main": + agent_name = MAIN_AGENT_NAME if MAIN_AGENT_NAME != "Jarvis" else agent["name"] + icon = MAIN_AGENT_EMOJI + else: + icon = defaults.get("icon", "\U0001f527") + + color = defaults.get("color") + if not color: + color = _AGENT_COLORS[color_idx % len(_AGENT_COLORS)] + color_idx += 1 + + description = defaults.get("description", f"{agent_name} agent") + + AGENT_TO_OPENCLAW_ID[agent_name] = agent_id + AGENT_META[agent_name] = {"id": agent_id, "icon": icon, "color": color, "description": description} + + AGENTS = list(AGENT_TO_OPENCLAW_ID.keys()) + ["User", "Unassigned"] + AGENT_META["User"] = {"id": "user", "icon": "\U0001f464", "color": "#22c55e", "description": "Human supervisor"} + AGENT_META["Unassigned"] = {"id": "unassigned", "icon": "\u25cb", "color": "#64748b", "description": "Not yet assigned"} + _rebuild_mention_pattern() + print(f"\u2705 AGENTS (from OpenClaw): {AGENTS}") + + +# Initialize with fallback; replaced at startup if OpenClaw is reachable +_build_fallback_agents() + +# ============================================================================= +# SECURITY / TOKENS +# ============================================================================= +import secrets + +OPENCLAW_GATEWAY_URL = os.getenv("OPENCLAW_GATEWAY_URL", "http://host.docker.internal:18789") +OPENCLAW_TOKEN = os.getenv("OPENCLAW_TOKEN", "") +TASKBOARD_API_KEY = os.getenv("TASKBOARD_API_KEY", "") +TASKBOARD_BASE_URL = os.getenv("TASKBOARD_BASE_URL", "http://localhost:8080") +OPENCLAW_ENABLED = bool(OPENCLAW_TOKEN) +AGENT_AUTO_DETECT = os.getenv("AGENT_AUTO_DETECT", "true").lower() in ("true", "1", "yes") +# Auto-stop agent sessions when a task is moved to Done +AUTO_STOP_ON_DONE = os.getenv("AUTO_STOP_ON_DONE", "true").lower() in ("true", "1", "yes") +# Comma-separated list of agent_id:Display Name pairs, e.g. "main:Jarvis,architect:Architect,code-reviewer:Code Reviewer" +AGENTS_ENV = os.getenv("AGENTS", "") + +# Project configuration +PROJECT_NAME = os.getenv("PROJECT_NAME", "My Project") +COMPANY_NAME = os.getenv("COMPANY_NAME", "Acme Corp") +COMPANY_CONTEXT = os.getenv("COMPANY_CONTEXT", "software development") +ALLOWED_PATHS = os.getenv("ALLOWED_PATHS", "/workspace, /project") +COMPLIANCE_FRAMEWORKS = os.getenv("COMPLIANCE_FRAMEWORKS", "your security requirements") + +# IP-based access restriction +ALWAYS_ALLOWED_IPS = {"127.0.0.1", "localhost", "::1"} +_env_ips = os.getenv("ALLOWED_IPS", "").strip() +ALLOWED_IPS = set(ip.strip() for ip in _env_ips.split(",") if ip.strip()) if _env_ips else set() +print(f"\U0001f512 IP Restriction: localhost + 172.20.200.59 + 172.20.200.119 + 172.18.0.1 (internal) + {ALLOWED_IPS if ALLOWED_IPS else 'no external IPs'}") + +if not TASKBOARD_API_KEY: + print("\u26a0\ufe0f WARNING: TASKBOARD_API_KEY not set. API authentication disabled!") +if not OPENCLAW_TOKEN: + print("\u26a0\ufe0f WARNING: OPENCLAW_TOKEN not set. OPENCLAW integration disabled!") + +# File upload limits +MAX_ATTACHMENT_SIZE_MB = 10 +MAX_ATTACHMENT_SIZE_BYTES = MAX_ATTACHMENT_SIZE_MB * 1024 * 1024 + +# Specific Docker IPs allowed +ALLOWED_DOCKER_IPS = { + "172.20.200.59", + "172.20.200.119", + "172.18.0.1", +} + +# CORS allowed origins +ALLOWED_ORIGINS = [ + "{TASKBOARD_BASE_URL}", + "http://127.0.0.1:8080", + "http://localhost:3000", + "http://127.0.0.1:3000", +] +if TASKBOARD_BASE_URL not in ALLOWED_ORIGINS: + ALLOWED_ORIGINS.append(TASKBOARD_BASE_URL) + if TASKBOARD_BASE_URL.startswith("https://"): + http_variant = TASKBOARD_BASE_URL.replace("https://", "http://") + if http_variant not in ALLOWED_ORIGINS: + ALLOWED_ORIGINS.append(http_variant) diff --git a/app/database.py b/app/database.py new file mode 100644 index 0000000..2676cd5 --- /dev/null +++ b/app/database.py @@ -0,0 +1,186 @@ +""" +Database: connection manager, schema initialization, activity logging. + +Concurrency model: +- SQLite WAL mode for concurrent reads +- BEGIN IMMEDIATE for write transactions (serializes writes) +- busy_timeout=30s to wait for locks instead of failing +- All writes go through get_db_write() which acquires IMMEDIATE lock +- Reads can use get_db() (no lock needed with WAL) +""" + +import logging +import sqlite3 +import threading +from contextlib import contextmanager +from datetime import datetime + +from app.config import DB_PATH + +logger = logging.getLogger(__name__) + +# Global write lock to serialize all database writes at the application level. +# This prevents SQLite BUSY errors when multiple async handlers try to write concurrently. +_write_lock = threading.Lock() + + +@contextmanager +def get_db(): + """Read-only database connection. Use get_db_write() for writes.""" + conn = sqlite3.connect(str(DB_PATH), timeout=30) + conn.execute("PRAGMA journal_mode=WAL") + conn.execute("PRAGMA busy_timeout=30000") + conn.row_factory = sqlite3.Row + try: + yield conn + finally: + conn.close() + + +@contextmanager +def get_db_write(): + """ + Write-safe database connection with serialization. + + Acquires a thread lock + BEGIN IMMEDIATE to ensure: + 1. Only one write transaction at a time (app-level lock) + 2. SQLite write lock acquired immediately (not deferred) + 3. Auto-commit on success, auto-rollback on exception + """ + with _write_lock: + conn = sqlite3.connect(str(DB_PATH), timeout=30, isolation_level=None) + conn.execute("PRAGMA journal_mode=WAL") + conn.execute("PRAGMA busy_timeout=30000") + conn.row_factory = sqlite3.Row + try: + conn.execute("BEGIN IMMEDIATE") + yield conn + conn.execute("COMMIT") + except Exception: + try: + conn.execute("ROLLBACK") + except Exception: + pass + raise + finally: + conn.close() + + +def init_db(): + """Initialize the database.""" + with get_db() as conn: + conn.execute(""" + CREATE TABLE IF NOT EXISTS tasks ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + title TEXT NOT NULL, + description TEXT DEFAULT '', + status TEXT DEFAULT 'Backlog', + priority TEXT DEFAULT 'Medium', + agent TEXT DEFAULT 'Unassigned', + due_date TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + board TEXT DEFAULT 'tasks' + ) + """) + conn.execute(""" + CREATE TABLE IF NOT EXISTS activity_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id INTEGER, + action TEXT NOT NULL, + agent TEXT, + details TEXT, + timestamp TEXT NOT NULL + ) + """) + conn.execute(""" + CREATE TABLE IF NOT EXISTS comments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id INTEGER NOT NULL, + agent TEXT NOT NULL, + content TEXT NOT NULL, + created_at TEXT NOT NULL + ) + """) + conn.execute(""" + CREATE TABLE IF NOT EXISTS action_items ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id INTEGER NOT NULL, + comment_id INTEGER, + agent TEXT NOT NULL, + content TEXT NOT NULL, + item_type TEXT DEFAULT 'question', + resolved INTEGER DEFAULT 0, + created_at TEXT NOT NULL, + resolved_at TEXT + ) + """) + # Add columns if they don't exist + for alter in [ + "ALTER TABLE tasks ADD COLUMN working_agent TEXT DEFAULT NULL", + "ALTER TABLE tasks ADD COLUMN agent_session_key TEXT DEFAULT NULL", + "ALTER TABLE action_items ADD COLUMN archived INTEGER DEFAULT 0", + "ALTER TABLE tasks ADD COLUMN source_file TEXT DEFAULT NULL", + "ALTER TABLE tasks ADD COLUMN source_ref TEXT DEFAULT NULL", + ]: + try: + conn.execute(alter) + except Exception: + pass + + conn.execute(""" + CREATE TABLE IF NOT EXISTS chat_messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + session_key TEXT DEFAULT 'main', + role TEXT NOT NULL, + content TEXT NOT NULL, + attachments TEXT, + created_at TEXT NOT NULL + ) + """) + try: + conn.execute("ALTER TABLE chat_messages ADD COLUMN session_key TEXT DEFAULT 'main'") + except Exception: + pass + + # Drop legacy soft-delete table (sessions now deleted via WS RPC) + conn.execute("DROP TABLE IF EXISTS deleted_sessions") + + # Projects table + conn.execute(""" + CREATE TABLE IF NOT EXISTS projects ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL, + slug TEXT UNIQUE NOT NULL, + description TEXT DEFAULT '', + color TEXT DEFAULT '#00b4d8', + created_at TEXT NOT NULL + ) + """) + + # Add project_id to tasks if not exists + try: + conn.execute("ALTER TABLE tasks ADD COLUMN project_id INTEGER DEFAULT 1 REFERENCES projects(id)") + except Exception: + pass # Column already exists + + # Ensure default project exists + cursor = conn.execute("SELECT id FROM projects WHERE slug = 'default'") + if not cursor.fetchone(): + from datetime import datetime as dt, timezone + conn.execute( + "INSERT INTO projects (name, slug, description, color, created_at) VALUES (?, ?, ?, ?, ?)", + ("Default", "default", "Default project", "#00b4d8", dt.now(timezone.utc).isoformat()) + ) + + conn.commit() + + +def log_activity(task_id: int, action: str, agent: str = None, details: str = None): + """Log an activity with full audit trail.""" + with get_db_write() as conn: + conn.execute( + "INSERT INTO activity_log (task_id, action, agent, details, timestamp) VALUES (?, ?, ?, ?, ?)", + (task_id, action, agent, details, datetime.now().isoformat()) + ) + logger.info(f"ACTIVITY: task={task_id} action={action} agent={agent} details={details}") diff --git a/app/main.py b/app/main.py new file mode 100644 index 0000000..94cae28 --- /dev/null +++ b/app/main.py @@ -0,0 +1,217 @@ +""" +RIZQ Task Board - FastAPI Backend +Main app: middleware, WebSocket endpoint, startup, static serving. +""" + +import httpx +from datetime import datetime + +from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Request +from fastapi.staticfiles import StaticFiles +from fastapi.responses import FileResponse, PlainTextResponse +from fastapi.middleware.cors import CORSMiddleware +from starlette.middleware.base import BaseHTTPMiddleware + +from app.config import ( + STATIC_PATH, ALLOWED_ORIGINS, ALWAYS_ALLOWED_IPS, ALLOWED_DOCKER_IPS, ALLOWED_IPS, + OPENCLAW_ENABLED, OPENCLAW_GATEWAY_URL, OPENCLAW_TOKEN, + ATTACHMENTS_PATH, DATA_DIR, + AGENT_AUTO_DETECT, AGENTS_ENV, + _populate_agents_from_openclaw, _build_agents_from_env, +) +from app.database import init_db +from app.websocket import manager +from app.routes import api_router + +# ============================================================================= +# APP +# ============================================================================= + +app = FastAPI(title="RIZQ Task Board", version="1.2.0") + +app.add_middleware( + CORSMiddleware, + allow_origins=ALLOWED_ORIGINS, + allow_credentials=True, + allow_methods=["GET", "POST", "PATCH", "DELETE"], + allow_headers=["Authorization", "X-API-Key", "Content-Type"], +) + + +# IP Restriction Middleware +class IPRestrictionMiddleware(BaseHTTPMiddleware): + """Block requests from IPs not in the allowed list.""" + + async def dispatch(self, request: Request, call_next): + client_ip = request.client.host if request.client else None + if client_ip in ALWAYS_ALLOWED_IPS: + return await call_next(request) + if client_ip in ALLOWED_DOCKER_IPS: + return await call_next(request) + if client_ip in ALLOWED_IPS: + return await call_next(request) + print(f"\U0001f6ab Blocked request from {client_ip} - not in allowed IPs") + return PlainTextResponse(f"Access denied. IP {client_ip} not authorized.", status_code=403) + + +app.add_middleware(IPRestrictionMiddleware) + + +# Request Logging Middleware +class RequestLoggingMiddleware(BaseHTTPMiddleware): + """Batch API requests and log summaries to reduce spam.""" + + def __init__(self, app): + super().__init__(app) + self.batch = [] + self.batch_window = 0.5 + self.last_flush = datetime.now().timestamp() + + async def dispatch(self, request: Request, call_next): + if request.url.path.startswith(("/static/", "/data/")): + return await call_next(request) + + path = request.url.path + method = request.method + client_ip = request.client.host if request.client else "unknown" + + start_time = datetime.now() + response = await call_next(request) + duration = (datetime.now() - start_time).total_seconds() + + if path.startswith("/api/"): + pattern = self._get_pattern(path) + task_id = self._extract_task_id(path) + self.batch.append({ + "method": method, "pattern": pattern, "path": path, + "task_id": task_id, "status": response.status_code, + "duration": duration, "client_ip": client_ip + }) + now = datetime.now().timestamp() + if now - self.last_flush >= self.batch_window and self.batch: + self._flush_batch() + elif method in ["POST", "PATCH", "DELETE", "PUT"]: + emoji = self._get_emoji(method, response.status_code) + status_emoji = "\u2705" if 200 <= response.status_code < 300 else "\u26a0\ufe0f" if response.status_code < 500 else "\u274c" + print(f"{emoji} {method} {path} - {status_emoji} {response.status_code} ({duration:.3f}s)") + + return response + + def _flush_batch(self): + if not self.batch: + return + groups = {} + for req in self.batch: + key = f"{req['method']}:{req['pattern']}" + if key not in groups: + groups[key] = {"requests": [], "task_ids": set(), "total_duration": 0, "errors": 0} + groups[key]["requests"].append(req) + if req["task_id"]: + groups[key]["task_ids"].add(req["task_id"]) + groups[key]["total_duration"] += req["duration"] + if req["status"] >= 400: + groups[key]["errors"] += 1 + total = len(self.batch) + print(f"\n\U0001f4ca {total} requests ({self.batch_window}s):") + for key, group in groups.items(): + method, pattern = key.split(":", 1) + count = len(group["requests"]) + task_ids = sorted(group["task_ids"]) + avg_duration = group["total_duration"] / count + emoji = self._get_emoji(method, 200) + task_info = f" [{','.join(map(str, task_ids[:10]))}{'...' if len(task_ids) > 10 else ''}]" if task_ids else "" + error_info = f" \u26a0\ufe0f{group['errors']}" if group['errors'] > 0 else "" + print(f" {emoji} {method} {pattern}: {count}x @ {avg_duration:.3f}s{task_info}{error_info}") + self.batch = [] + self.last_flush = datetime.now().timestamp() + + def _get_pattern(self, path: str) -> str: + import re + return re.sub(r'/\d+(/|$)', '/{id}\\1', path) + + def _extract_task_id(self, path: str) -> int: + if "/tasks/" in path: + parts = path.split("/tasks/") + if len(parts) > 1: + id_part = parts[1].split("/")[0] + if id_part.isdigit(): + return int(id_part) + return None + + def _get_emoji(self, method: str, status: int) -> str: + if status >= 400: + return "\u274c" + return {"GET": "\U0001f4d6", "POST": "\U0001f4dd", "PATCH": "\u270f\ufe0f", "PUT": "\U0001f4e4", "DELETE": "\U0001f5d1\ufe0f"}.get(method, "\U0001f537") + + +app.add_middleware(RequestLoggingMiddleware) + +# Include all API routes +app.include_router(api_router) + + +# Startup +@app.on_event("startup") +async def startup(): + init_db() + # Agent detection: ENV > Auto-detect from OpenClaw > Fallback defaults + if AGENTS_ENV: + _build_agents_from_env() + elif AGENT_AUTO_DETECT and OPENCLAW_ENABLED: + try: + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.post( + f"{OPENCLAW_GATEWAY_URL}/tools/invoke", + json={"tool": "agents_list", "args": {}}, + headers={"Authorization": f"Bearer {OPENCLAW_TOKEN}", "Content-Type": "application/json"} + ) + result = response.json() if response.status_code == 200 else None + if result and result.get("ok"): + raw_result = result.get("result", {}) + details = raw_result.get("details", raw_result) + agents_data = details.get("agents", []) + if agents_data: + _populate_agents_from_openclaw(agents_data) + else: + print("\u26a0\ufe0f OpenClaw returned empty agents list, using fallback") + else: + print(f"\u26a0\ufe0f OpenClaw agents_list failed: {response.status_code}, using fallback") + except Exception as e: + print(f"\u26a0\ufe0f Could not fetch agents from OpenClaw: {e}, using fallback") + else: + if not AGENT_AUTO_DETECT: + print("ℹ️ AGENT_AUTO_DETECT=false, using fallback agents") + + +# Serve static files +STATIC_PATH.mkdir(exist_ok=True) +app.mount("/static", StaticFiles(directory=STATIC_PATH), name="static") + +# Serve data attachments +ATTACHMENTS_PATH.mkdir(exist_ok=True) +app.mount("/data/attachments", StaticFiles(directory=ATTACHMENTS_PATH), name="attachments") + + +@app.get("/") +def read_root(): + """Serve the Kanban UI.""" + return FileResponse(STATIC_PATH / "index.html") + + +# WebSocket endpoint +@app.websocket("/ws") +async def websocket_endpoint(websocket: WebSocket): + """WebSocket for live updates.""" + await manager.connect(websocket) + try: + while True: + data = await websocket.receive_text() + if data == "ping": + await websocket.send_text("pong") + except WebSocketDisconnect: + manager.disconnect(websocket) + + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8080) diff --git a/app/models.py b/app/models.py new file mode 100644 index 0000000..00ead82 --- /dev/null +++ b/app/models.py @@ -0,0 +1,162 @@ +""" +Pydantic models/schemas for request/response validation. +""" + +from typing import Optional, List +from pydantic import BaseModel, Field, field_validator + +from app.config import VALID_STATUSES, VALID_PRIORITIES, MAX_ATTACHMENT_SIZE_BYTES, MAX_ATTACHMENT_SIZE_MB + + +class TaskCreate(BaseModel): + title: str + description: str = "" + status: str = "Backlog" + priority: str = "Medium" + agent: str = "Unassigned" + due_date: Optional[str] = None + board: str = "tasks" + source_file: Optional[str] = None + source_ref: Optional[str] = None + project_id: int = 1 + + @field_validator('status') + @classmethod + def validate_status(cls, v): + if v not in VALID_STATUSES: + raise ValueError(f'Invalid status "{v}". Must be one of: {VALID_STATUSES}') + return v + + @field_validator('priority') + @classmethod + def validate_priority(cls, v): + if v not in VALID_PRIORITIES: + raise ValueError(f'Invalid priority "{v}". Must be one of: {VALID_PRIORITIES}') + return v + + +class TaskUpdate(BaseModel): + title: Optional[str] = None + description: Optional[str] = None + status: Optional[str] = None + priority: Optional[str] = None + agent: Optional[str] = None + due_date: Optional[str] = None + source_file: Optional[str] = None + source_ref: Optional[str] = None + project_id: Optional[int] = None + + @field_validator('status') + @classmethod + def validate_status(cls, v): + if v is not None and v not in VALID_STATUSES: + raise ValueError(f'Invalid status "{v}". Must be one of: {VALID_STATUSES}') + return v + + @field_validator('priority') + @classmethod + def validate_priority(cls, v): + if v is not None and v not in VALID_PRIORITIES: + raise ValueError(f'Invalid priority "{v}". Must be one of: {VALID_PRIORITIES}') + return v + + +class Task(BaseModel): + id: int + title: str + description: str + status: str + priority: str + agent: str + due_date: Optional[str] + created_at: str + updated_at: str + board: str + source_file: Optional[str] = None + source_ref: Optional[str] = None + working_agent: Optional[str] = None + agent_session_key: Optional[str] = None + project_id: int = 1 + + +class MoveRequest(BaseModel): + status: str + agent: str = None + reason: str = None + + +class CommentCreate(BaseModel): + agent: str + content: str + + @field_validator('content') + @classmethod + def validate_content_size(cls, v): + if len(v) > MAX_ATTACHMENT_SIZE_BYTES: + raise ValueError(f'Content exceeds maximum size of {MAX_ATTACHMENT_SIZE_MB}MB') + return v + + @field_validator('agent') + @classmethod + def validate_agent(cls, v): + if len(v) > 100: + raise ValueError('Agent name too long') + return v + + +class ActionItemCreate(BaseModel): + agent: str + content: str + item_type: str = "question" + comment_id: Optional[int] = None + + +class ImageUpload(BaseModel): + data: str + filename: Optional[str] = "image" + + +class JarvisMessage(BaseModel): + message: str + session: str = "main" + attachments: Optional[List[dict]] = None + + @field_validator('message') + @classmethod + def validate_message_size(cls, v): + if len(v) > MAX_ATTACHMENT_SIZE_BYTES: + raise ValueError(f'Message exceeds maximum size of {MAX_ATTACHMENT_SIZE_MB}MB') + return v + + +class JarvisResponse(BaseModel): + response: str + session: str = "main" + + @field_validator('response') + @classmethod + def validate_response_size(cls, v): + if len(v) > 1024 * 1024: + raise ValueError('Response too large') + return v + + +class ProjectCreate(BaseModel): + name: str = Field(..., min_length=1, max_length=100) + description: str = Field(default="", max_length=500) + color: str = Field(default="#00b4d8", pattern=r"^#[0-9a-fA-F]{6}$") + + +class ProjectResponse(BaseModel): + id: int + name: str + slug: str + description: str + color: str + created_at: str + + +class SessionCreate(BaseModel): + label: str = None + agentId: str = "main" + task: str = "New session started from Task Board. Awaiting instructions." diff --git a/app/openclaw.py b/app/openclaw.py new file mode 100644 index 0000000..d972772 --- /dev/null +++ b/app/openclaw.py @@ -0,0 +1,719 @@ +""" +OpenClaw/agent integration: spawn, send, stop, prompts, guardrails. +""" + +import asyncio +import httpx +import json as _json +import uuid +import websockets +from typing import Optional +from datetime import datetime + +from app.config import ( + OPENCLAW_ENABLED, OPENCLAW_GATEWAY_URL, OPENCLAW_TOKEN, + TASKBOARD_BASE_URL, AGENT_TO_OPENCLAW_ID, + MAIN_AGENT_NAME, MAIN_AGENT_EMOJI, HUMAN_SUPERVISOR_LABEL, + PROJECT_NAME, COMPANY_NAME, COMPANY_CONTEXT, + ALLOWED_PATHS, COMPLIANCE_FRAMEWORKS, +) +from app.database import get_db + + +# ============================================================================= +# SECURITY HELPERS (notify, send, session management) +# ============================================================================= + +async def notify_OPENCLAW(task_id: int, task_title: str, comment_agent: str, comment_content: str): + """Send webhook to OpenClaw when a comment needs attention.""" + if not OPENCLAW_ENABLED or comment_agent == MAIN_AGENT_NAME: + return + try: + async with httpx.AsyncClient(timeout=5.0) as client: + payload = { + "action": "wake", + "text": f"\U0001f4ac Task Board: New comment on #{task_id} ({task_title}) from {comment_agent}:\n\n{comment_content[:200]}{'...' if len(comment_content) > 200 else ''}\n\nCheck and respond: {TASKBOARD_BASE_URL}" + } + headers = { + "Authorization": f"Bearer {OPENCLAW_TOKEN}", + "Content-Type": "application/json" + } + await client.post(f"{OPENCLAW_GATEWAY_URL}/api/cron/wake", json=payload, headers=headers) + print(f"Notified OPENCLAW about comment from {comment_agent}") + except Exception as e: + print(f"Webhook to OPENCLAW failed: {e}") + + +async def send_to_agent_session(session_key: str, message: str) -> bool: + """Send a follow-up message to an active agent session.""" + if not OPENCLAW_ENABLED or not session_key: + return False + try: + async with httpx.AsyncClient(timeout=30.0) as client: + payload = { + "tool": "sessions_send", + "args": { + "sessionKey": session_key, + "message": message + } + } + headers = { + "Authorization": f"Bearer {OPENCLAW_TOKEN}", + "Content-Type": "application/json" + } + response = await client.post( + f"{OPENCLAW_GATEWAY_URL}/tools/invoke", + json=payload, + headers=headers + ) + result = response.json() if response.status_code == 200 else None + if result and result.get("ok"): + print(f"\u2705 Sent message to session {session_key}") + return True + else: + print(f"\u274c Failed to send to session: {response.text}") + return False + except Exception as e: + print(f"\u274c Failed to send to agent session: {e}") + return False + + +def get_task_session(task_id: int) -> Optional[str]: + """Get the active agent session key for a task.""" + with get_db() as conn: + row = conn.execute("SELECT agent_session_key FROM tasks WHERE id = ?", (task_id,)).fetchone() + return row["agent_session_key"] if row and row["agent_session_key"] else None + + +def set_task_session(task_id: int, session_key: Optional[str]): + """Set or clear the agent session key for a task.""" + from app.database import get_db_write + with get_db_write() as conn: + conn.execute( + "UPDATE tasks SET agent_session_key = ?, updated_at = ? WHERE id = ?", + (session_key, datetime.now().isoformat(), task_id) + ) + + +# ============================================================================= +# GUARDRAILS & PROMPTS +# ============================================================================= + +AGENT_GUARDRAILS = f""" +\u26a0\ufe0f MANDATORY CONSTRAINTS (Approved by User via Task Board assignment): + +FILESYSTEM BOUNDARIES: +- ONLY access: {ALLOWED_PATHS} +- Everything else is FORBIDDEN without explicit authorization + +FORBIDDEN ACTIONS (do not attempt without approval): +- Browser tool (except UX Manager on localhost only) +- git commit (requires safeword from User) +- Any action outside the authorized paths + +WEB_FETCH (requires approval): +- You have web_fetch available but MUST ask User first +- Create an action item (type: question) explaining what URL you need and why +- Wait for User to resolve the action item before fetching +- Only fetch after explicit approval + +COMPLIANCE CONTEXT: +- {COMPANY_NAME}, {COMPANY_CONTEXT} +- {COMPLIANCE_FRAMEWORKS} +- Security over convenience \u2014 always + +COMMUNICATION & ESCALATION: +- Post comments on the task card to communicate +- Create action items for questions that need answers (type: question) +- Create action items for blockers (type: blocker) + +ESCALATION CHAIN: +1. {MAIN_AGENT_NAME} (coordinator) monitors your action items and may answer if confident +2. If {MAIN_AGENT_NAME} answers, the item gets resolved and you can proceed +3. If {MAIN_AGENT_NAME} is unsure, they leave it for {HUMAN_SUPERVISOR_LABEL} to review +4. {HUMAN_SUPERVISOR_LABEL} has final authority on all decisions + +TASK BOARD INTEGRATION: +- Use start-work API when beginning: POST {TASKBOARD_BASE_URL}/api/tasks/{{task_id}}/start-work?agent={{your_name}} +- Post updates as comments: POST {TASKBOARD_BASE_URL}/api/tasks/{{task_id}}/comments (json: {{"agent": "your_name", "content": "message"}}) +- Create action items for questions: POST {TASKBOARD_BASE_URL}/api/tasks/{{task_id}}/action-items (json: {{"agent": "your_name", "content": "question", "item_type": "question"}}) +- Move to Review when done: POST {TASKBOARD_BASE_URL}/api/tasks/{{task_id}}/move?status=Review&agent={{your_name}}&reason=... +- Use stop-work API when finished: POST {TASKBOARD_BASE_URL}/api/tasks/{{task_id}}/stop-work + +REPORT FORMAT: +When complete, post a comment with your findings using this format: +## [Your Role] Report +**Task:** [task title] +**Verdict:** \u2705 APPROVED / \u26a0\ufe0f CONCERNS / \U0001f6d1 BLOCKED +### Findings +- [SEVERITY] Issue description +### Summary +[1-2 sentence assessment] +""" + +AGENT_SYSTEM_PROMPTS = { + "main": f"""You are {MAIN_AGENT_NAME}, the primary coordinator for {COMPANY_NAME}. + +Your focus: +- General task implementation and coordination +- Code writing and debugging +- Cross-cutting concerns that don't fit specialist roles +- Synthesizing input from other agents +- Direct implementation work + +Project: {PROJECT_NAME} +You're the hands-on executor. When assigned a task, dig in and get it done.""", + + "architect": f"""You are the Architect for {COMPANY_NAME}. + +Your focus: +- System design and architectural patterns +- Scalability and performance implications +- Technical trade-offs and recommendations +- Integration architecture +- Database design and data modeling + +Project: {PROJECT_NAME} +Be concise. Flag concerns with severity (CRITICAL/HIGH/MEDIUM/LOW).""", + + "security-auditor": f"""You are the Security Auditor for {COMPANY_NAME}. + +Your focus: +- SOC2 Trust Services Criteria (Security, Availability, Confidentiality, Privacy) +- HIPAA compliance (PHI handling, access controls, audit logging) +- CIS Controls benchmarks +- OWASP Top 10 vulnerabilities +- Secure credential storage and handling +- Tenant data isolation (multi-tenant SaaS) + +NON-NEGOTIABLE: Security over convenience. Always. +Rate findings: CRITICAL (blocks deploy) / HIGH / MEDIUM / LOW""", + + "code-reviewer": f"""You are the Code Reviewer for {COMPANY_NAME}. + +Your focus: +- Code quality and best practices +- DRY, SOLID principles +- Error handling and edge cases +- Performance considerations +- Code readability and maintainability +- Test coverage gaps + +Project: {PROJECT_NAME} +Format: MUST FIX / SHOULD FIX / CONSIDER / NICE TO HAVE""", + + "ux-manager": f"""You are the UX Manager for {COMPANY_NAME}. + +Your focus: +- User flow clarity and efficiency +- Error message helpfulness +- Form design and validation feedback +- UI consistency across the platform +- Accessibility basics +- Onboarding experience + +Project: {PROJECT_NAME} + +BROWSER ACCESS (localhost only): +You have browser access to review the app UI. Use it to: +- Take snapshots of pages to analyze layout, spacing, colors +- Check user flows and navigation +- Verify form designs and error states +- Assess overall visual consistency + +ALLOWED URLs (localhost only): +- http://localhost:* (any port) +- http://127.0.0.1:* + +DO NOT navigate to any external URLs. Your browser access is strictly for reviewing the local app.""" +} + +async def is_session_alive(session_key: str) -> bool: + """Check if an OpenClaw session is still active via sessions_list.""" + if not OPENCLAW_ENABLED or not session_key: + return False + try: + async with httpx.AsyncClient(timeout=10.0) as client: + payload = { + "tool": "sessions_list", + "args": {} + } + headers = { + "Authorization": f"Bearer {OPENCLAW_TOKEN}", + "Content-Type": "application/json" + } + response = await client.post( + f"{OPENCLAW_GATEWAY_URL}/tools/invoke", + json=payload, + headers=headers + ) + result = response.json() if response.status_code == 200 else None + if result and result.get("ok"): + sessions = result.get("result", []) + # Handle various response formats + if isinstance(sessions, dict): + # Could be {"sessions": [...]} or {"content": [...]} + sessions = sessions.get("sessions", sessions.get("content", [])) + # If result is a list of content blocks (tool response format) + if isinstance(sessions, list) and len(sessions) > 0 and isinstance(sessions[0], dict) and "text" in sessions[0]: + import json as _json + try: + parsed = _json.loads(sessions[0]["text"]) + if isinstance(parsed, list): + sessions = parsed + elif isinstance(parsed, dict): + sessions = parsed.get("sessions", []) + except (ValueError, KeyError): + pass + print(f"🔍 is_session_alive: Looking for {session_key} in {len(sessions)} sessions") + for s in sessions: + key = s.get("sessionKey") or s.get("key") or s.get("id", "") + if key == session_key: + status = s.get("status", "").lower() + print(f"🔍 is_session_alive: Found! Status={status}") + return status not in ("stopped", "dead", "terminated", "error") + print(f"🔍 is_session_alive: Session {session_key} NOT found in list") + else: + print(f"🔍 is_session_alive: API response not ok: {result}") + return False + except Exception as e: + print(f"⚠️ is_session_alive check failed: {e}") + return False + + +def _get_ws_url() -> str: + """Derive WebSocket URL from gateway HTTP URL.""" + url = OPENCLAW_GATEWAY_URL.replace("http://", "ws://").replace("https://", "wss://") + return url.rstrip("/") + "/ws" + + +async def _ws_rpc(method: str, params: dict, timeout: float = 10.0) -> dict: + """Send a single JSON-RPC-style request over WebSocket to OpenClaw gateway.""" + ws_url = _get_ws_url() + req_id = str(uuid.uuid4()) + msg = _json.dumps({"type": "req", "id": req_id, "method": method, "params": params}) + print(f"🔌 WS-RPC: Connecting to {ws_url}") + print(f"🔌 WS-RPC: Sending {method} → {_json.dumps(params)[:200]}") + async with websockets.connect( + ws_url, + origin=OPENCLAW_GATEWAY_URL, + open_timeout=5, + close_timeout=5, + ) as ws: + # Step 1: Wait for connect.challenge event from server + nonce = None + while True: + raw = await asyncio.wait_for(ws.recv(), timeout=5) + data = _json.loads(raw) + print(f"🔌 WS-RPC: Recv type={data.get('type')} event={data.get('event', '')}") + if data.get("type") == "event" and data.get("event") == "connect.challenge": + nonce = data.get("payload", {}).get("nonce", "") + print(f"🔌 WS-RPC: Got challenge nonce={nonce[:8]}...") + break + + # Step 2: Send connect with auth token + connect_id = str(uuid.uuid4()) + connect_msg = _json.dumps({ + "type": "req", + "id": connect_id, + "method": "connect", + "params": { + "minProtocol": 3, + "maxProtocol": 3, + "client": {"id": "openclaw-control-ui", "version": "dev", "platform": "linux", "mode": "webchat"}, + "role": "operator", + "scopes": ["operator.admin", "operator.approvals", "operator.pairing"], + "auth": {"token": OPENCLAW_TOKEN}, + } + }) + await ws.send(connect_msg) + print(f"🔌 WS-RPC: Sent connect (id={connect_id[:8]}...)") + + # Step 3: Wait for connect response + while True: + raw = await asyncio.wait_for(ws.recv(), timeout=5) + data = _json.loads(raw) + print(f"🔌 WS-RPC: Handshake recv type={data.get('type')} ok={data.get('ok')}") + if data.get("id") == connect_id: + if data.get("ok") is False: + print(f"🔌 WS-RPC: Connect failed: {data.get('error')}") + return {} + print(f"🔌 WS-RPC: Connected!") + break + + # Step 4: Send actual request + await ws.send(msg) + print(f"🔌 WS-RPC: Sent {method} (id={req_id[:8]}...)") + deadline = asyncio.get_event_loop().time() + timeout + while asyncio.get_event_loop().time() < deadline: + try: + raw = await asyncio.wait_for(ws.recv(), timeout=timeout) + data = _json.loads(raw) + msg_type = data.get("type", "") + print(f"🔌 WS-RPC: Recv type={msg_type} id={str(data.get('id',''))[:8]}") + if data.get("id") == req_id: + print(f"🔌 WS-RPC: Response: {_json.dumps(data)[:500]}") + return data + except asyncio.TimeoutError: + print(f"🔌 WS-RPC: Timeout waiting for response") + break + print(f"🔌 WS-RPC: No matching response received") + return {} + + +async def stop_agent_session(session_key: str) -> bool: + """Stop an OpenClaw agent session via WebSocket RPC sessions.delete.""" + if not OPENCLAW_ENABLED or not session_key: + print(f"⚠️ stop_agent_session: Skipped (enabled={OPENCLAW_ENABLED}, key={session_key})") + return False + print(f"🛑 stop_agent_session: Deleting session {session_key}") + try: + result = await _ws_rpc("sessions.delete", {"key": session_key, "deleteTranscript": True}) + if result.get("type") == "resp" or result.get("result") is not None: + print(f"✅ Deleted session {session_key} via WS RPC") + return True + elif result.get("error"): + print(f"❌ WS RPC sessions.delete error: {_json.dumps(result.get('error'))}") + return False + else: + print(f"⚠️ WS RPC sessions.delete — no matching response: {_json.dumps(result)[:300]}") + return False + except Exception as e: + print(f"❌ Failed to delete agent session via WS: {type(e).__name__}: {e}") + return False + + +_spawning_tasks = set() + + +async def spawn_agent_session(task_id: int, task_title: str, task_description: str, agent_name: str): + """Spawn an OpenClaw sub-agent session for a task via tools/invoke API.""" + print(f"\U0001f680 SPAWN-AGENT: Task #{task_id} | Agent: {agent_name}") + + if task_id in _spawning_tasks: + print(f"\u23e9 SPAWN-AGENT SKIPPED: Already spawning for task #{task_id}") + return None + _spawning_tasks.add(task_id) + + try: + return await _do_spawn_agent_session(task_id, task_title, task_description, agent_name) + finally: + _spawning_tasks.discard(task_id) + + +async def _do_spawn_agent_session(task_id: int, task_title: str, task_description: str, agent_name: str): + """Internal spawn implementation.""" + if not OPENCLAW_ENABLED: + print(f"\u26a0\ufe0f SPAWN-AGENT SKIPPED: OpenClaw not enabled (OPENCLAW_TOKEN not set)") + return None + + # Guard against double-spawn: check if session already exists and is alive + existing_key = get_task_session(task_id) + if existing_key: + alive = await is_session_alive(existing_key) + if alive: + print(f"ℹ️ SPAWN-AGENT SKIPPED: Task #{task_id} already has live session {existing_key}") + return None + else: + print(f"🧹 SPAWN-AGENT: Clearing dead session {existing_key} for task #{task_id}") + set_task_session(task_id, None) + + agent_id = AGENT_TO_OPENCLAW_ID.get(agent_name) + if not agent_id: + print(f"\u26a0\ufe0f SPAWN-AGENT SKIPPED: Unknown agent '{agent_name}' (not in AGENT_TO_OPENCLAW_ID)") + return None + + print(f"\U0001f50d SPAWN-AGENT: Mapped {agent_name} \u2192 {agent_id}") + + system_prompt = AGENT_SYSTEM_PROMPTS.get(agent_id, "") + task_prompt = f"""# Task Assignment from RIZQ Task Board (Approved by {HUMAN_SUPERVISOR_LABEL}) + +**Task #{task_id}:** {task_title} + +**Description:** +{task_description or 'No description provided.'} + +{AGENT_GUARDRAILS} + +## Your Role +{system_prompt} + +--- + +## API Base URL (MANDATORY \u2014 do NOT use localhost or 127.0.0.1) +All Task Board API calls MUST use this base URL: {TASKBOARD_BASE_URL} +Do NOT use localhost, 127.0.0.1, or any other address. The task board is ONLY reachable at {TASKBOARD_BASE_URL}. + +## Instructions +1. Call start-work API: POST {TASKBOARD_BASE_URL}/api/tasks/{task_id}/start-work?agent={agent_name} + - This auto-moves the card to "In Progress" if needed +2. Analyze the task thoroughly +3. Post your findings as a comment: POST {TASKBOARD_BASE_URL}/api/tasks/{task_id}/comments (json: {{"agent": "{agent_name}", "content": "your message"}}) +4. When done, call stop-work with outcome: POST {TASKBOARD_BASE_URL}/api/tasks/{task_id}/stop-work?agent={agent_name}&outcome=review&reason= + - Use outcome=review when work is complete (auto-moves to Review) + - Use outcome=blocked&reason= if you need input (auto-moves to Blocked) + +## IMPORTANT: Stay Available +After posting your findings, **remain available for follow-up questions**. User may reply with questions or requests for clarification. When you receive a message starting with "\U0001f4ac **User replied**", respond thoughtfully and post your response as a comment on the task. + +Your session will automatically end when User marks the task as Done. + +Begin now. +""" + + try: + async with httpx.AsyncClient(timeout=60.0) as client: + payload = { + "tool": "sessions_spawn", + "args": { + "agentId": agent_id, + "task": task_prompt, + "label": f"task-{task_id}", + "cleanup": "keep" + } + } + headers = { + "Authorization": f"Bearer {OPENCLAW_TOKEN}", + "Content-Type": "application/json" + } + + print(f"\U0001f4e1 SPAWN-AGENT: Calling OpenClaw API - {OPENCLAW_GATEWAY_URL}/tools/invoke") + print(f"\U0001f4e6 SPAWN-AGENT: Payload - tool: sessions_spawn, agentId: {agent_id}, label: task-{task_id}") + + response = await client.post( + f"{OPENCLAW_GATEWAY_URL}/tools/invoke", + json=payload, + headers=headers + ) + + print(f"\U0001f4e5 SPAWN-AGENT: Response status: {response.status_code}") + + result = response.json() if response.status_code == 200 else None + print(f"\U0001f4e5 SPAWN-AGENT: Response body: {result}") + + if result and result.get("ok"): + raw_result = result.get("result", {}) + spawn_info = raw_result.get("details", raw_result) + run_id = spawn_info.get("runId", "unknown") + session_key = spawn_info.get("childSessionKey", None) + + print(f"\u2705 SPAWN-AGENT SUCCESS: {agent_name} ({agent_id}) for task #{task_id}") + print(f"\U0001f4cb SPAWN-AGENT: Session key: {session_key} | Run ID: {run_id}") + + if session_key: + print(f"\U0001f4be SPAWN-AGENT: Saving session key to database") + set_task_session(task_id, session_key) + else: + print(f"\u26a0\ufe0f SPAWN-AGENT: No session key in response!") + + print(f"\U0001f4ac SPAWN-AGENT: Posting spawn notification comment") + async with httpx.AsyncClient(timeout=5.0) as comment_client: + await comment_client.post( + f"{TASKBOARD_BASE_URL}/api/tasks/{task_id}/comments", + json={ + "agent": "System", + "content": f"\U0001f916 **{agent_name}** agent spawned automatically.\n\nSession: `{session_key or 'unknown'}`\nRun ID: `{run_id}`\n\n\U0001f4ac *Reply to this task and the agent will respond.*" + } + ) + print(f"\u2705 SPAWN-AGENT COMPLETE") + return result + else: + error_msg = response.text if response.status_code != 200 else result + print(f"\u274c SPAWN-AGENT FAILED: Status {response.status_code}") + print(f"\u274c SPAWN-AGENT ERROR: {error_msg}") + return None + except Exception as e: + print(f"\u274c SPAWN-AGENT EXCEPTION: {type(e).__name__}: {e}") + import traceback + print(f"\u274c SPAWN-AGENT TRACEBACK: {traceback.format_exc()}") + return None + + +async def spawn_followup_session(task_id: int, task_title: str, agent_name: str, previous_context: str, new_message: str): + """Spawn a follow-up session for an agent with conversation context.""" + if not OPENCLAW_ENABLED: + return None + + # In-flight guard: reuse same set as spawn_agent_session + if task_id in _spawning_tasks: + print(f"⏩ FOLLOWUP SKIPPED: Already spawning for task #{task_id}") + return None + + # Double-spawn guard: check if session already exists and is alive + existing_key = get_task_session(task_id) + if existing_key: + alive = await is_session_alive(existing_key) + if alive: + print(f"ℹ️ FOLLOWUP SKIPPED: Task #{task_id} already has live session {existing_key}, sending message instead") + sent = await send_to_agent_session(existing_key, f"💬 **User replied on Task #{task_id}:**\n\n{new_message}\n\n---\nRespond by posting a comment to the task.") + if sent: + return None + # If send failed, session is dead — clear and continue to spawn + print(f"🧹 FOLLOWUP: Session {existing_key} unresponsive, clearing") + set_task_session(task_id, None) + + _spawning_tasks.add(task_id) + try: + return await _do_spawn_followup(task_id, task_title, agent_name, previous_context, new_message) + finally: + _spawning_tasks.discard(task_id) + + +async def _do_spawn_followup(task_id: int, task_title: str, agent_name: str, previous_context: str, new_message: str): + agent_id = AGENT_TO_OPENCLAW_ID.get(agent_name) + if not agent_id: + return None + + system_prompt = AGENT_SYSTEM_PROMPTS.get(agent_id, "") + + followup_prompt = f"""# Follow-up on Task #{task_id}: {task_title} + +You previously worked on this task and moved it to Review. User has a follow-up question. + +## Previous Conversation: +{previous_context if previous_context else "(No previous messages)"} + +## User's New Message: +{new_message} + +## Your Role: +{system_prompt} + +## API Base URL (MANDATORY \u2014 do NOT use localhost or 127.0.0.1) +All Task Board API calls MUST use this base URL: {TASKBOARD_BASE_URL} + +## Instructions: +1. Call start-work API: POST {TASKBOARD_BASE_URL}/api/tasks/{task_id}/start-work?agent={agent_name} +2. Read the context and User's question +3. Respond helpfully by posting a comment: POST {TASKBOARD_BASE_URL}/api/tasks/{task_id}/comments (json: {{"agent": "{agent_name}", "content": "your message"}}) +4. Keep your response focused on what User asked +5. Call stop-work API: POST {TASKBOARD_BASE_URL}/api/tasks/{task_id}/stop-work?agent={agent_name} + - Add &outcome=review&reason= if work is complete + - Add &outcome=blocked&reason= if you need more input + +Respond now. +""" + + try: + async with httpx.AsyncClient(timeout=60.0) as client: + payload = { + "tool": "sessions_spawn", + "args": { + "agentId": agent_id, + "task": followup_prompt, + "label": f"task-{task_id}-followup", + "cleanup": "keep" + } + } + headers = { + "Authorization": f"Bearer {OPENCLAW_TOKEN}", + "Content-Type": "application/json" + } + response = await client.post( + f"{OPENCLAW_GATEWAY_URL}/tools/invoke", + json=payload, + headers=headers + ) + result = response.json() if response.status_code == 200 else None + if result and result.get("ok"): + raw_result = result.get("result", {}) + spawn_info = raw_result.get("details", raw_result) + session_key = spawn_info.get("childSessionKey", None) + if session_key: + set_task_session(task_id, session_key) + print(f"\u2705 Spawned follow-up session for {agent_name} on task #{task_id}") + return result + else: + print(f"\u274c Failed to spawn follow-up: {response.text}") + return None + except Exception as e: + print(f"\u274c Failed to spawn follow-up session: {e}") + return None + + +async def spawn_mentioned_agent(task_id: int, task_title: str, task_description: str, + mentioned_agent: str, mentioner: str, comment_content: str, + previous_context: str = ""): + """Spawn a session for an @mentioned agent to contribute to a task they don't own.""" + if not OPENCLAW_ENABLED: + return None + + agent_id = AGENT_TO_OPENCLAW_ID.get(mentioned_agent) + if not agent_id: + return None + + system_prompt = AGENT_SYSTEM_PROMPTS.get(agent_id, "") + + mention_prompt = f"""# You've Been Tagged: Task #{task_id} + +**{mentioner}** mentioned you on a task and needs your input. + +## Task: {task_title} +{task_description or '(No description)'} + +## What {mentioner} Said: +{comment_content} + +## Previous Conversation: +{previous_context if previous_context else "(No prior comments)"} + +## Your Role: +{system_prompt} + +## Instructions: +1. Call start-work API: POST {TASKBOARD_BASE_URL}/api/tasks/{task_id}/start-work?agent={mentioned_agent} +2. Review the task from YOUR perspective ({mentioned_agent}) +3. Post your findings/response as a comment: POST {TASKBOARD_BASE_URL}/api/tasks/{task_id}/comments +4. Call stop-work API: POST {TASKBOARD_BASE_URL}/api/tasks/{task_id}/stop-work?agent={mentioned_agent} + +**Note:** You are NOT the assigned owner of this task. You're providing your expertise because you were tagged. +Do NOT move the task (no outcome param) \u2014 that's the owner's job. + +{AGENT_GUARDRAILS} + +Respond now with your assessment. +""" + + try: + async with httpx.AsyncClient(timeout=60.0) as client: + payload = { + "tool": "sessions_spawn", + "args": { + "agentId": agent_id, + "task": mention_prompt, + "label": f"task-{task_id}-mention-{agent_id}", + "cleanup": "delete" + } + } + headers = { + "Authorization": f"Bearer {OPENCLAW_TOKEN}", + "Content-Type": "application/json" + } + response = await client.post( + f"{OPENCLAW_GATEWAY_URL}/tools/invoke", + json=payload, + headers=headers + ) + result = response.json() if response.status_code == 200 else None + if result and result.get("ok"): + raw_result = result.get("result", {}) + spawn_info = raw_result.get("details", raw_result) + session_key = spawn_info.get("childSessionKey", "unknown") + + async with httpx.AsyncClient(timeout=5.0) as comment_client: + await comment_client.post( + f"{TASKBOARD_BASE_URL}/api/tasks/{task_id}/comments", + json={ + "agent": "System", + "content": f"\U0001f4e2 **{mentioned_agent}** was tagged by {mentioner} and is now reviewing this task." + } + ) + + print(f"\u2705 Spawned {mentioned_agent} for mention on task #{task_id}") + return result + else: + print(f"\u274c Failed to spawn {mentioned_agent} for mention: {response.text}") + return None + except Exception as e: + print(f"\u274c Failed to spawn mentioned agent: {e}") + return None diff --git a/app/routes/__init__.py b/app/routes/__init__.py new file mode 100644 index 0000000..fa17a21 --- /dev/null +++ b/app/routes/__init__.py @@ -0,0 +1,22 @@ +""" +Collects all APIRouters into a single api_router. +""" + +from fastapi import APIRouter + +from app.routes.tasks import router as tasks_router +from app.routes.comments import router as comments_router +from app.routes.action_items import router as action_items_router +from app.routes.sessions import router as sessions_router +from app.routes.chat import router as chat_router +from app.routes.uploads import router as uploads_router +from app.routes.projects import router as projects_router + +api_router = APIRouter() +api_router.include_router(tasks_router) +api_router.include_router(comments_router) +api_router.include_router(action_items_router) +api_router.include_router(sessions_router) +api_router.include_router(chat_router) +api_router.include_router(uploads_router) +api_router.include_router(projects_router) diff --git a/app/routes/action_items.py b/app/routes/action_items.py new file mode 100644 index 0000000..91e3802 --- /dev/null +++ b/app/routes/action_items.py @@ -0,0 +1,124 @@ +""" +Action items: CRUD, resolve/unresolve, archive/unarchive. +""" + +import logging +from datetime import datetime +from fastapi import APIRouter, HTTPException + +from app.database import get_db, get_db_write +from app.models import ActionItemCreate +from app.websocket import manager + +logger = logging.getLogger(__name__) +router = APIRouter() + + +@router.get("/api/tasks/{task_id}/action-items") +def get_action_items(task_id: int, resolved: bool = False, archived: bool = False): + """Get action items for a task.""" + with get_db() as conn: + if archived: + rows = conn.execute( + "SELECT * FROM action_items WHERE task_id = ? AND archived = 1 ORDER BY created_at ASC", + (task_id,) + ).fetchall() + else: + rows = conn.execute( + "SELECT * FROM action_items WHERE task_id = ? AND resolved = ? AND archived = 0 ORDER BY created_at ASC", + (task_id, 1 if resolved else 0) + ).fetchall() + return [dict(row) for row in rows] + + +@router.post("/api/tasks/{task_id}/action-items") +async def add_action_item(task_id: int, item: ActionItemCreate): + """Add an action item to a task.""" + now = datetime.now().isoformat() + with get_db_write() as conn: + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Task not found") + cursor = conn.execute( + "INSERT INTO action_items (task_id, comment_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?, ?)", + (task_id, item.comment_id, item.agent, item.content, item.item_type, now) + ) + result = { + "id": cursor.lastrowid, "task_id": task_id, "comment_id": item.comment_id, + "agent": item.agent, "content": item.content, "item_type": item.item_type, + "resolved": 0, "created_at": now, "resolved_at": None + } + logger.info(f"Action item added on task #{task_id} by {item.agent}: {item.content[:50]}") + await manager.broadcast({"type": "action_item_added", "task_id": task_id, "item": result}) + return result + + +@router.post("/api/action-items/{item_id}/resolve") +async def resolve_action_item(item_id: int): + """Resolve an action item.""" + now = datetime.now().isoformat() + with get_db_write() as conn: + row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Action item not found") + conn.execute("UPDATE action_items SET resolved = 1, resolved_at = ? WHERE id = ?", (now, item_id)) + task_id = row["task_id"] + logger.info(f"Action item #{item_id} resolved on task #{task_id}") + await manager.broadcast({"type": "action_item_resolved", "task_id": task_id, "item_id": item_id}) + return {"success": True, "item_id": item_id} + + +@router.post("/api/action-items/{item_id}/unresolve") +async def unresolve_action_item(item_id: int): + """Unresolve an action item.""" + with get_db_write() as conn: + row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Action item not found") + conn.execute("UPDATE action_items SET resolved = 0, resolved_at = NULL WHERE id = ?", (item_id,)) + task_id = row["task_id"] + logger.info(f"Action item #{item_id} unresolved on task #{task_id}") + await manager.broadcast({"type": "action_item_unresolved", "task_id": task_id, "item_id": item_id}) + return {"success": True, "item_id": item_id} + + +@router.post("/api/action-items/{item_id}/archive") +async def archive_action_item(item_id: int): + """Archive a resolved action item.""" + with get_db_write() as conn: + row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Action item not found") + conn.execute("UPDATE action_items SET archived = 1 WHERE id = ?", (item_id,)) + task_id = row["task_id"] + logger.info(f"Action item #{item_id} archived on task #{task_id}") + await manager.broadcast({"type": "action_item_archived", "task_id": task_id, "item_id": item_id}) + return {"success": True, "item_id": item_id} + + +@router.post("/api/action-items/{item_id}/unarchive") +async def unarchive_action_item(item_id: int): + """Unarchive an action item.""" + with get_db_write() as conn: + row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Action item not found") + conn.execute("UPDATE action_items SET archived = 0 WHERE id = ?", (item_id,)) + task_id = row["task_id"] + logger.info(f"Action item #{item_id} unarchived on task #{task_id}") + await manager.broadcast({"type": "action_item_unarchived", "task_id": task_id, "item_id": item_id}) + return {"success": True, "item_id": item_id} + + +@router.delete("/api/action-items/{item_id}") +async def delete_action_item(item_id: int): + """Delete an action item.""" + with get_db_write() as conn: + row = conn.execute("SELECT * FROM action_items WHERE id = ?", (item_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Action item not found") + task_id = row["task_id"] + conn.execute("DELETE FROM action_items WHERE id = ?", (item_id,)) + logger.info(f"Action item #{item_id} deleted from task #{task_id}") + await manager.broadcast({"type": "action_item_deleted", "task_id": task_id, "item_id": item_id}) + return {"success": True, "item_id": item_id} diff --git a/app/routes/chat.py b/app/routes/chat.py new file mode 100644 index 0000000..0cc2f4b --- /dev/null +++ b/app/routes/chat.py @@ -0,0 +1,214 @@ +""" +Chat: Jarvis history, chat, respond endpoints + legacy endpoints. +""" + +import json +import logging +import secrets +from datetime import datetime +from fastapi import APIRouter, Depends, Header, HTTPException + +import httpx + +from app.config import ( + OPENCLAW_ENABLED, OPENCLAW_GATEWAY_URL, OPENCLAW_TOKEN, + TASKBOARD_API_KEY, DATA_DIR, +) +from app.database import get_db, get_db_write +from app.models import JarvisMessage, JarvisResponse +from app.websocket import manager + +logger = logging.getLogger(__name__) +router = APIRouter() + + +def verify_api_key(authorization: str = Header(None), x_api_key: str = Header(None)): + """Verify API key from Authorization header or X-API-Key header.""" + if not TASKBOARD_API_KEY: + return True + if authorization: + if authorization.startswith("Bearer "): + token = authorization[7:] + if secrets.compare_digest(token, TASKBOARD_API_KEY): + return True + if x_api_key: + if secrets.compare_digest(x_api_key, TASKBOARD_API_KEY): + return True + raise HTTPException(status_code=401, detail="Invalid or missing API key") + + +@router.get("/api/jarvis/history") +def get_chat_history(limit: int = 100, session: str = "main"): + """Get command bar chat history from database, filtered by session.""" + with get_db() as conn: + rows = conn.execute( + "SELECT id, session_key, role, content, attachments, created_at FROM chat_messages WHERE session_key = ? ORDER BY id DESC LIMIT ?", + (session, limit) + ).fetchall() + messages = [] + for row in reversed(rows): + msg = { + "id": row["id"], + "session_key": row["session_key"], + "role": row["role"], + "content": row["content"], + "timestamp": row["created_at"] + } + if row["attachments"]: + msg["attachments"] = json.loads(row["attachments"]) + messages.append(msg) + return {"history": messages, "session": session} + + +@router.post("/api/jarvis/chat") +async def chat_with_jarvis(msg: JarvisMessage): + """Send a message to Jarvis via sessions_send.""" + if not OPENCLAW_ENABLED: + return {"sent": False, "error": "OpenClaw integration not enabled."} + + now = datetime.now().isoformat() + message_content = f"System: [TASKBOARD_CHAT] User says: {msg.message}\n\nRespond naturally." + + if msg.attachments: + import base64 as b64_module + import uuid + + attachments_dir = DATA_DIR / "attachments" + attachments_dir.mkdir(exist_ok=True) + + for att in msg.attachments: + att_type = att.get("type", "") + att_data = att.get("data", "") + att_filename = att.get("filename", "file") + + if att_type.startswith("image/") and att_data: + try: + if att_data.startswith("data:") and ";base64," in att_data: + header, b64_content = att_data.split(",", 1) + mime_type = header.split(":")[1].split(";")[0] + ext = mime_type.split("/")[1] if "/" in mime_type else "png" + else: + b64_content = att_data + ext = "png" + if ext not in ["png", "jpg", "jpeg", "gif", "webp"]: + ext = "png" + img_filename = f"{uuid.uuid4().hex[:8]}_{att_filename or 'image'}" + if not img_filename.endswith(f".{ext}"): + img_filename = f"{img_filename}.{ext}" + img_path = attachments_dir / img_filename + with open(img_path, "wb") as f: + f.write(b64_module.b64decode(b64_content)) + message_content += f"\n\n\U0001f4f7 **Image attached:** `/app/data/attachments/{img_filename}`\nUse the Read tool to view this image." + except Exception as e: + print(f"Failed to save image attachment: {e}") + message_content += f"\n\n[Image attachment failed to save: {e}]" + elif att_data: + if att_data.startswith("data:") and ";base64," in att_data: + try: + import base64 + b64_content = att_data.split(",", 1)[1] + decoded = base64.b64decode(b64_content).decode("utf-8", errors="replace") + message_content += f"\n\n**\U0001f4ce Attached file: {att_filename}**\n```\n{decoded}\n```" + except Exception as e: + message_content += f"\n\n[Attached File: {att_filename} (decode error: {e})]" + else: + message_content += f"\n\n[Attached File: {att_filename}]" + + session_key = msg.session or "main" + + attachments_json = json.dumps(msg.attachments) if msg.attachments else None + with get_db_write() as conn: + cursor = conn.execute( + "INSERT INTO chat_messages (session_key, role, content, attachments, created_at) VALUES (?, ?, ?, ?, ?)", + (session_key, "user", msg.message, attachments_json, now) + ) + user_msg_id = cursor.lastrowid + + logger.info(f"Chat message from user in session {session_key}") + + user_msg = { + "id": user_msg_id, "session_key": session_key, "role": "user", + "content": msg.message, "timestamp": now, "attachments": msg.attachments + } + await manager.broadcast({"type": "command_bar_message", "message": user_msg}) + + try: + async with httpx.AsyncClient(timeout=120.0) as client: + payload = { + "tool": "sessions_send", + "args": {"message": message_content, "sessionKey": session_key, "timeoutSeconds": 90} + } + headers = {"Authorization": f"Bearer {OPENCLAW_TOKEN}", "Content-Type": "application/json"} + response = await client.post(f"{OPENCLAW_GATEWAY_URL}/tools/invoke", json=payload, headers=headers) + + if response.status_code == 200: + result = response.json() + inner = result.get("result", {}) + if isinstance(inner, dict): + details = inner.get("details", {}) + assistant_reply = details.get("reply") or inner.get("reply") or inner.get("response") + else: + assistant_reply = str(inner) if inner else None + + if assistant_reply and not isinstance(assistant_reply, str): + import json as json_module + assistant_reply = json_module.dumps(assistant_reply) if isinstance(assistant_reply, (dict, list)) else str(assistant_reply) + + if assistant_reply: + with get_db_write() as conn: + cursor = conn.execute( + "INSERT INTO chat_messages (session_key, role, content, attachments, created_at) VALUES (?, ?, ?, ?, ?)", + (session_key, "assistant", assistant_reply, None, now) + ) + assistant_msg_id = cursor.lastrowid + + jarvis_msg = { + "id": assistant_msg_id, "session_key": session_key, + "role": "assistant", "content": assistant_reply, + "timestamp": datetime.now().isoformat() + } + return {"sent": True, "response": assistant_reply, "session": session_key} + + return {"sent": True, "response": "No response received"} + else: + error_text = response.text[:200] if response.text else f"HTTP {response.status_code}" + return {"sent": False, "error": error_text} + except Exception as e: + print(f"Error sending to Jarvis: {e}") + return {"sent": False, "error": str(e)} + + +@router.post("/api/jarvis/respond") +async def jarvis_respond(msg: JarvisResponse, _: bool = Depends(verify_api_key)): + """Endpoint for Jarvis to push responses back to the command bar. Requires API key.""" + now = datetime.now().isoformat() + session_key = msg.session or "main" + + with get_db_write() as conn: + cursor = conn.execute( + "INSERT INTO chat_messages (session_key, role, content, attachments, created_at) VALUES (?, ?, ?, ?, ?)", + (session_key, "assistant", msg.response, None, now) + ) + msg_id = cursor.lastrowid + + logger.info(f"Jarvis response pushed to session {session_key}") + + jarvis_msg = { + "id": msg_id, "session_key": session_key, + "role": "assistant", "content": msg.response, "timestamp": now + } + await manager.broadcast({"type": "command_bar_message", "message": jarvis_msg}) + return {"delivered": True} + + +# Legacy endpoints +@router.post("/api/molt/chat") +async def chat_with_molt_legacy(msg: JarvisMessage): + """Legacy endpoint - redirects to /api/jarvis/chat.""" + return await chat_with_jarvis(msg) + + +@router.post("/api/molt/respond") +async def jarvis_respond_legacy(msg: JarvisResponse, _: bool = Depends(verify_api_key)): + """Legacy endpoint - redirects to /api/jarvis/respond.""" + return await jarvis_respond(msg, _) diff --git a/app/routes/comments.py b/app/routes/comments.py new file mode 100644 index 0000000..7748a5a --- /dev/null +++ b/app/routes/comments.py @@ -0,0 +1,165 @@ +""" +Comments: GET/POST/DELETE for task comments, with @mention spawning and agent relay. +""" + +import logging +from datetime import datetime +from fastapi import APIRouter, HTTPException + +from app.config import AGENT_TO_OPENCLAW_ID, MENTION_PATTERN +from app.database import get_db, get_db_write +from app.models import CommentCreate +from app.websocket import manager +from app.openclaw import ( + notify_OPENCLAW, send_to_agent_session, spawn_mentioned_agent, + spawn_followup_session, +) + +logger = logging.getLogger(__name__) +router = APIRouter() + + +@router.get("/api/tasks/{task_id}/comments") +def get_comments(task_id: int): + """Get comments for a task.""" + with get_db() as conn: + rows = conn.execute( + "SELECT * FROM comments WHERE task_id = ? ORDER BY created_at ASC", + (task_id,) + ).fetchall() + return [dict(row) for row in rows] + + +@router.post("/api/tasks/{task_id}/comments") +async def add_comment(task_id: int, comment: CommentCreate): + """Add a comment to a task.""" + now = datetime.now().isoformat() + task_title = "" + task_status = "" + agent_session = None + + with get_db_write() as conn: + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Task not found") + + task_title = row["title"] + task_status = row["status"] + agent_session = row["agent_session_key"] if "agent_session_key" in row.keys() else None + + cursor = conn.execute( + "INSERT INTO comments (task_id, agent, content, created_at) VALUES (?, ?, ?, ?)", + (task_id, comment.agent, comment.content, now) + ) + + result = { + "id": cursor.lastrowid, + "task_id": task_id, + "agent": comment.agent, + "content": comment.content, + "created_at": now + } + + working_agent_cleared = None + if comment.agent and comment.agent != "User": + task_row = conn.execute("SELECT working_agent FROM tasks WHERE id = ?", (task_id,)).fetchone() + if task_row and task_row["working_agent"] == comment.agent: + conn.execute( + "UPDATE tasks SET working_agent = NULL, updated_at = ? WHERE id = ?", + (now, task_id) + ) + working_agent_cleared = comment.agent + + logger.info(f"Comment added on task #{task_id} by {comment.agent}") + await manager.broadcast({"type": "comment_added", "task_id": task_id, "comment": result}) + + if working_agent_cleared: + await manager.broadcast({"type": "work_stopped", "task_id": task_id, "agent": working_agent_cleared}) + + # Check for @mentions + mentions = MENTION_PATTERN.findall(comment.content) + if mentions: + task_description = "" + previous_context = "" + with get_db() as conn: + task_row = conn.execute("SELECT description FROM tasks WHERE id = ?", (task_id,)).fetchone() + task_description = task_row["description"] if task_row else "" + comment_rows = conn.execute( + "SELECT agent, content FROM comments WHERE task_id = ? AND id != ? ORDER BY created_at DESC LIMIT 5", + (task_id, result["id"]) + ).fetchall() + if comment_rows: + previous_context = "\n".join([f"**{r['agent']}:** {r['content'][:500]}" for r in reversed(comment_rows)]) + + for mentioned_agent in set(mentions): + matched_agent = None + for agent_name in AGENT_TO_OPENCLAW_ID.keys(): + if agent_name.lower() == mentioned_agent.lower(): + matched_agent = agent_name + break + if matched_agent and matched_agent != comment.agent: + agent_id = AGENT_TO_OPENCLAW_ID.get(matched_agent) + if agent_id: + await spawn_mentioned_agent( + task_id=task_id, + task_title=task_title, + task_description=task_description, + mentioned_agent=matched_agent, + mentioner=comment.agent, + comment_content=comment.content, + previous_context=previous_context + ) + print(f"\U0001f4e2 Spawned {matched_agent} for mention in task #{task_id}") + + # If from User, relay to agent session or spawn followup + if comment.agent == "User" and not mentions: + assigned_agent = None + with get_db() as conn: + row = conn.execute("SELECT agent FROM tasks WHERE id = ?", (task_id,)).fetchone() + assigned_agent = row["agent"] if row else None + + if assigned_agent and assigned_agent in AGENT_TO_OPENCLAW_ID and assigned_agent != "User": + sent = False + if agent_session: + message = f"💬 **User replied on Task #{task_id}:**\n\n{comment.content}\n\n---\nRespond by posting a comment to the task." + sent = await send_to_agent_session(agent_session, message) + if sent: + print(f"📨 Relayed user comment to session {agent_session} for task #{task_id}") + + if not sent: + # Session dead or missing — spawn a followup session + print(f"🔄 Spawning followup session for task #{task_id} (assigned: {assigned_agent})") + previous_comments = [] + with get_db() as conn: + rows = conn.execute( + "SELECT agent, content FROM comments WHERE task_id = ? ORDER BY created_at DESC LIMIT 5", + (task_id,) + ).fetchall() + previous_comments = [f"**{r['agent']}:** {r['content'][:500]}" for r in reversed(rows)] + context = "\n".join(previous_comments[:-1]) + await spawn_followup_session(task_id, task_title, assigned_agent, context, comment.content) + elif comment.agent not in ["System", "User"] + list(AGENT_TO_OPENCLAW_ID.keys()): + await notify_OPENCLAW(task_id, task_title, comment.agent, comment.content) + + return result + + +@router.delete("/api/tasks/{task_id}/comments/{comment_id}") +async def delete_comment(task_id: int, comment_id: int): + """Delete a comment from a task.""" + with get_db_write() as conn: + row = conn.execute( + "SELECT id FROM comments WHERE id = ? AND task_id = ?", + (comment_id, task_id) + ).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Comment not found") + conn.execute("DELETE FROM comments WHERE id = ?", (comment_id,)) + + logger.info(f"Comment #{comment_id} deleted from task #{task_id}") + await manager.broadcast({ + "type": "comment_deleted", + "task_id": task_id, + "comment_id": comment_id + }) + return {"status": "deleted", "comment_id": comment_id} diff --git a/app/routes/projects.py b/app/routes/projects.py new file mode 100644 index 0000000..29f5d21 --- /dev/null +++ b/app/routes/projects.py @@ -0,0 +1,72 @@ +""" +Project CRUD endpoints. +""" + +import logging +import re +from typing import List +from datetime import datetime, timezone +from fastapi import APIRouter, HTTPException + +from app.database import get_db, get_db_write +from app.models import ProjectCreate, ProjectResponse +from app.websocket import manager + +logger = logging.getLogger(__name__) +router = APIRouter() + + +def _slugify(name: str) -> str: + """Generate a URL-safe slug from a project name.""" + slug = name.lower().strip() + slug = re.sub(r"[^a-z0-9\s-]", "", slug) + slug = re.sub(r"[\s-]+", "-", slug) + return slug.strip("-") + + +@router.get("/api/projects", response_model=List[ProjectResponse]) +def list_projects(): + """Return all projects ordered by id (Default first).""" + with get_db() as conn: + rows = conn.execute("SELECT * FROM projects ORDER BY id").fetchall() + return [dict(row) for row in rows] + + +@router.post("/api/projects", response_model=ProjectResponse) +async def create_project(project: ProjectCreate): + """Create a new project.""" + slug = _slugify(project.name) + if not slug: + raise HTTPException(status_code=400, detail="Project name produces an empty slug") + now = datetime.now(timezone.utc).isoformat() + with get_db_write() as conn: + # Check for duplicate slug + existing = conn.execute("SELECT id FROM projects WHERE slug = ?", (slug,)).fetchone() + if existing: + raise HTTPException(status_code=409, detail=f"Project with slug '{slug}' already exists") + cursor = conn.execute( + "INSERT INTO projects (name, slug, description, color, created_at) VALUES (?, ?, ?, ?, ?)", + (project.name, slug, project.description, project.color, now) + ) + row = conn.execute("SELECT * FROM projects WHERE id = ?", (cursor.lastrowid,)).fetchone() + result = dict(row) + logger.info(f"Project created: {project.name} (slug={slug})") + await manager.broadcast({"type": "project_created", "project": result}) + return result + + +@router.delete("/api/projects/{project_id}") +async def delete_project(project_id: int): + """Delete a project. Cannot delete Default (id=1). Reassigns tasks to Default.""" + if project_id == 1: + raise HTTPException(status_code=400, detail="Cannot delete the Default project") + with get_db_write() as conn: + row = conn.execute("SELECT * FROM projects WHERE id = ?", (project_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Project not found") + # Reassign tasks to Default project + conn.execute("UPDATE tasks SET project_id = 1 WHERE project_id = ?", (project_id,)) + conn.execute("DELETE FROM projects WHERE id = ?", (project_id,)) + logger.info(f"Project #{project_id} deleted: {row['name']}") + await manager.broadcast({"type": "project_deleted", "project_id": project_id}) + return {"status": "deleted", "id": project_id} diff --git a/app/routes/sessions.py b/app/routes/sessions.py new file mode 100644 index 0000000..af7b1a8 --- /dev/null +++ b/app/routes/sessions.py @@ -0,0 +1,187 @@ +""" +Sessions: list, create, stop, stop-all, delete OpenClaw sessions. +""" + +import json +import logging +from datetime import datetime +from fastapi import APIRouter + +from app.config import OPENCLAW_ENABLED, OPENCLAW_GATEWAY_URL, OPENCLAW_TOKEN +from app.database import get_db_write +from app.models import SessionCreate +from app.websocket import manager + +import httpx + +logger = logging.getLogger(__name__) +router = APIRouter() + + +@router.get("/api/sessions") +async def list_sessions(): + """Proxy to OpenClaw sessions_list to get active sessions.""" + if not OPENCLAW_ENABLED: + return {"sessions": [], "error": "OpenClaw integration not enabled"} + + try: + async with httpx.AsyncClient(timeout=10.0) as client: + payload = {"tool": "sessions_list", "args": {"limit": 20, "messageLimit": 0}} + headers = {"Authorization": f"Bearer {OPENCLAW_TOKEN}", "Content-Type": "application/json"} + response = await client.post(f"{OPENCLAW_GATEWAY_URL}/tools/invoke", json=payload, headers=headers) + + if response.status_code == 200: + result = response.json() + if result.get("ok"): + inner_result = result.get("result", {}) + content = inner_result.get("content", []) + if content and len(content) > 0: + text_content = content[0].get("text", "{}") + sessions_data = json.loads(text_content) + else: + sessions_data = inner_result + sessions = sessions_data.get("sessions", []) + + formatted = [] + for s in sessions: + key = s.get("key", "") + session_label = s.get("label", "") + display = s.get("displayName", key) + if key == "main" or key == "agent:main:main": + label = "\U0001f6e1\ufe0f Jarvis (Main)" + elif session_label: + label = f"\U0001f916 {session_label}" + elif "subagent" in key: + short_id = key.split(":")[-1][:8] if ":" in key else key[:8] + label = f"\U0001f916 Session {short_id}" + elif key.startswith("agent:"): + parts = key.split(":") + agent_name = parts[1] if len(parts) > 1 else key + label = f"\U0001f916 {agent_name.title()}" + else: + label = display + formatted.append({ + "key": key, "label": label, "channel": s.get("channel", ""), + "model": s.get("model", ""), "updatedAt": s.get("updatedAt", 0) + }) + + formatted.sort(key=lambda x: (0 if "main" in x["key"].lower() else 1, -x.get("updatedAt", 0))) + return {"sessions": formatted} + + return {"sessions": [], "error": f"Failed to fetch sessions: {response.status_code}"} + except Exception as e: + print(f"Error fetching sessions: {e}") + return {"sessions": [], "error": str(e)} + + +@router.post("/api/sessions/create") +async def create_session(req: SessionCreate): + """Create a new OpenClaw session via sessions_spawn.""" + if not OPENCLAW_ENABLED: + return {"success": False, "error": "OpenClaw integration not enabled"} + try: + async with httpx.AsyncClient(timeout=30.0) as client: + payload = { + "tool": "sessions_spawn", + "args": { + "agentId": req.agentId, + "task": req.task, + "label": req.label or f"taskboard-{datetime.now().strftime('%H%M%S')}", + "cleanup": "keep" + } + } + headers = {"Authorization": f"Bearer {OPENCLAW_TOKEN}", "Content-Type": "application/json"} + response = await client.post(f"{OPENCLAW_GATEWAY_URL}/tools/invoke", json=payload, headers=headers) + if response.status_code == 200: + result = response.json() + if result.get("ok"): + return {"success": True, "result": result.get("result", {})} + return {"success": False, "error": f"Failed: {response.status_code}"} + except Exception as e: + print(f"Error creating session: {e}") + return {"success": False, "error": str(e)} + + +@router.post("/api/sessions/{session_key}/stop") +async def stop_session(session_key: str): + """Stop/abort a running session.""" + if not OPENCLAW_ENABLED: + return {"success": False, "error": "OpenClaw integration not enabled"} + from app.openclaw import stop_agent_session, set_task_session + try: + success = await stop_agent_session(session_key) + # Clear session key from any task that references it + with get_db_write() as conn: + conn.execute( + "UPDATE tasks SET agent_session_key = NULL WHERE agent_session_key = ?", + (session_key,) + ) + print(f"🧹 Cleared DB session key {session_key[:30]}...") + if success: + return {"success": True, "message": f"Stopped session: {session_key}"} + else: + return {"success": False, "error": f"Failed to stop session: {session_key}"} + except Exception as e: + print(f"Error stopping session: {e}") + return {"success": False, "error": str(e)} + + +@router.post("/api/sessions/stop-all") +async def stop_all_sessions(): + """Emergency stop all non-main sessions.""" + if not OPENCLAW_ENABLED: + return {"success": False, "error": "OpenClaw integration not enabled"} + stopped = [] + errors = [] + try: + async with httpx.AsyncClient(timeout=10.0) as client: + payload = {"tool": "sessions_list", "args": {"limit": 50, "messageLimit": 0}} + headers = {"Authorization": f"Bearer {OPENCLAW_TOKEN}", "Content-Type": "application/json"} + response = await client.post(f"{OPENCLAW_GATEWAY_URL}/tools/invoke", json=payload, headers=headers) + if response.status_code == 200: + result = response.json() + if result.get("ok"): + inner_result = result.get("result", {}) + content = inner_result.get("content", []) + if content and len(content) > 0: + text_content = content[0].get("text", "{}") + sessions_data = json.loads(text_content) + else: + sessions_data = inner_result + sessions = sessions_data.get("sessions", []) + for s in sessions: + key = s.get("key", "") + if key and "main" not in key.lower(): + try: + stop_result = await stop_session(key) + if stop_result.get("success"): + stopped.append(key) + else: + errors.append(key) + except: + errors.append(key) + return {"success": True, "stopped": stopped, "errors": errors, "message": f"Stopped {len(stopped)} sessions"} + except Exception as e: + print(f"Error stopping all sessions: {e}") + return {"success": False, "error": str(e)} + + +@router.delete("/api/sessions/{session_key}") +async def delete_session(session_key: str): + """Delete a session via OpenClaw WebSocket RPC.""" + if not OPENCLAW_ENABLED: + return {"success": False, "error": "OpenClaw integration not enabled"} + + from app.openclaw import stop_agent_session + + # stop_agent_session uses _ws_rpc("sessions.delete") which fully removes the session + success = await stop_agent_session(session_key) + + # Clear local references + with get_db_write() as conn: + conn.execute("DELETE FROM chat_messages WHERE session_key = ?", (session_key,)) + conn.execute("UPDATE tasks SET agent_session_key = NULL WHERE agent_session_key = ?", (session_key,)) + + logger.info(f"Session {session_key} deleted (openclaw={'ok' if success else 'failed'})") + await manager.broadcast({"type": "session_deleted", "session_key": session_key}) + return {"success": True, "message": f"Deleted session: {session_key}"} diff --git a/app/routes/tasks.py b/app/routes/tasks.py new file mode 100644 index 0000000..00c94c8 --- /dev/null +++ b/app/routes/tasks.py @@ -0,0 +1,442 @@ +""" +Task CRUD, move, start-work, stop-work, agent tasks, config, activity. +""" + +import logging +from typing import List +from datetime import datetime +from fastapi import APIRouter, HTTPException + +from app.config import ( + AGENTS, AGENT_META, AGENT_TO_OPENCLAW_ID, + STATUSES, PRIORITIES, + MAIN_AGENT_NAME, MAIN_AGENT_EMOJI, HUMAN_NAME, HUMAN_SUPERVISOR_LABEL, BOARD_TITLE, + AUTO_STOP_ON_DONE, +) +from app.database import get_db, get_db_write, log_activity +from app.models import TaskCreate, TaskUpdate, Task +from app.websocket import manager +from app.openclaw import ( + spawn_agent_session, send_to_agent_session, + get_task_session, set_task_session, + is_session_alive, stop_agent_session, +) + +logger = logging.getLogger(__name__) +router = APIRouter() + + +@router.get("/api/config") +def get_config(): + """Get board configuration including branding.""" + with get_db() as conn: + projects = [dict(row) for row in conn.execute("SELECT * FROM projects ORDER BY id").fetchall()] + return { + "agents": AGENTS, + "agentMeta": AGENT_META, + "statuses": STATUSES, + "priorities": PRIORITIES, + "projects": projects, + "branding": { + "mainAgentName": MAIN_AGENT_NAME, + "mainAgentEmoji": MAIN_AGENT_EMOJI, + "humanName": HUMAN_NAME, + "humanSupervisorLabel": HUMAN_SUPERVISOR_LABEL, + "boardTitle": BOARD_TITLE, + } + } + + +@router.get("/api/tasks", response_model=List[Task]) +def list_tasks(board: str = "tasks", agent: str = None, status: str = None, project_id: int = None): + """List all tasks with optional filters.""" + with get_db() as conn: + query = "SELECT * FROM tasks WHERE board = ?" + params = [board] + if agent: + query += " AND agent = ?" + params.append(agent) + if status: + query += " AND status = ?" + params.append(status) + if project_id is not None: + query += " AND project_id = ?" + params.append(project_id) + query += " ORDER BY CASE priority WHEN 'Critical' THEN 1 WHEN 'High' THEN 2 WHEN 'Medium' THEN 3 ELSE 4 END, created_at DESC" + rows = conn.execute(query, params).fetchall() + return [dict(row) for row in rows] + + +@router.get("/api/tasks/{task_id}", response_model=Task) +def get_task(task_id: int): + """Get a single task.""" + with get_db() as conn: + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Task not found") + return dict(row) + + +@router.post("/api/tasks", response_model=Task) +async def create_task(task: TaskCreate): + """Create a new task.""" + print(f"\U0001f4dd CREATE-TASK: {task.title} | Status: {task.status} | Agent: {task.agent} | Priority: {task.priority}") + now = datetime.now().isoformat() + try: + with get_db_write() as conn: + print(f"\U0001f4be CREATE-TASK: Inserting into database") + cursor = conn.execute( + """INSERT INTO tasks (title, description, status, priority, agent, due_date, created_at, updated_at, board, source_file, source_ref, project_id) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", + (task.title, task.description, task.status, task.priority, task.agent, task.due_date, now, now, task.board, task.source_file, task.source_ref, task.project_id) + ) + task_id = cursor.lastrowid + print(f"\u2705 CREATE-TASK: Database insert successful - Task ID: {task_id}") + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + result = dict(row) + logger.info(f"Task #{task_id} created by {task.agent}: {task.title}") + log_activity(task_id, "created", task.agent, f"Created: {task.title}") + except Exception as e: + print(f"\u274c CREATE-TASK: Database error - {type(e).__name__}: {e}") + import traceback + print(f"\u274c CREATE-TASK: Traceback - {traceback.format_exc()}") + raise HTTPException(status_code=500, detail=f"Failed to create task: {str(e)}") + + print(f"\U0001f4e1 CREATE-TASK: Broadcasting task_created event") + await manager.broadcast({"type": "task_created", "task": result}) + print(f"\u2705 CREATE-TASK COMPLETE: Task #{task_id}") + return result + + +@router.patch("/api/tasks/{task_id}", response_model=Task) +async def update_task(task_id: int, updates: TaskUpdate): + """Update a task.""" + with get_db_write() as conn: + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Task not found") + current = dict(row) + changes = [] + update_fields = [] + params = [] + for field in ["title", "description", "status", "priority", "agent", "due_date", "source_file", "source_ref", "project_id"]: + new_value = getattr(updates, field) + if new_value is not None and new_value != current[field]: + update_fields.append(f"{field} = ?") + params.append(new_value) + changes.append(f"{field}: {current[field]} \u2192 {new_value}") + + # If moving to Done, also clear working_agent/agent_session_key in same transaction + moving_to_done = updates.status == "Done" and current.get("status") != "Done" + if moving_to_done: + if "working_agent = ?" not in update_fields: + update_fields.append("working_agent = ?") + params.append(None) + if "agent_session_key = ?" not in update_fields: + update_fields.append("agent_session_key = ?") + params.append(None) + + if update_fields: + update_fields.append("updated_at = ?") + params.append(datetime.now().isoformat()) + params.append(task_id) + conn.execute(f"UPDATE tasks SET {', '.join(update_fields)} WHERE id = ?", params) + logger.info(f"Task #{task_id} updated by {updates.agent or current['agent']}: {'; '.join(changes)}") + + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + result = dict(row) + + if changes: + log_activity(task_id, "updated", updates.agent or current["agent"], "; ".join(changes)) + + # Auto-stop agent when task is moved to Done via PATCH + if moving_to_done and AUTO_STOP_ON_DONE: + # Use session key from pre-write data (already NULL in DB after write) + session_key = current.get("agent_session_key") + if session_key: + print(f"🛑 Auto-stopping agent session {session_key} for task #{task_id} (PATCH to Done)") + await stop_agent_session(session_key) + set_task_session(task_id, None) + logger.info(f"Task #{task_id} moved to Done — cleared agent session {session_key}") + await manager.broadcast({"type": "work_stopped", "task_id": task_id, "agent": current.get("working_agent")}) + print(f"🧹 Cleared agent session for task #{task_id} (PATCH)") + # Refresh result after clearing + with get_db() as conn: + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + result = dict(row) + + await manager.broadcast({"type": "task_updated", "task": result}) + return result + + +@router.delete("/api/tasks/{task_id}") +async def delete_task(task_id: int): + """Delete a task.""" + with get_db_write() as conn: + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Task not found") + conn.execute("DELETE FROM tasks WHERE id = ?", (task_id,)) + logger.info(f"Task #{task_id} deleted: {row['title']}") + log_activity(task_id, "deleted", None, f"Deleted: {row['title']}") + await manager.broadcast({"type": "task_deleted", "task_id": task_id}) + return {"status": "deleted", "id": task_id} + + +@router.get("/api/agents/{agent}/tasks") +def get_agent_tasks(agent: str): + """Get all tasks assigned to an agent.""" + with get_db() as conn: + rows = conn.execute( + "SELECT * FROM tasks WHERE agent = ? AND status NOT IN ('Done', 'Blocked') ORDER BY priority, created_at", + (agent,) + ).fetchall() + return [dict(row) for row in rows] + + +@router.post("/api/tasks/{task_id}/start-work") +async def start_work(task_id: int, agent: str): + """Mark that an agent is actively working on a task. Auto-moves to In Progress.""" + print(f"\U0001f916 START-WORK: Task #{task_id} | Agent: {agent}") + with get_db_write() as conn: + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + if not row: + print(f"\u274c START-WORK FAILED: Task #{task_id} not found") + raise HTTPException(status_code=404, detail="Task not found") + task = dict(row) + current_status = task["status"] + current_working = task.get("working_agent") + now = datetime.now().isoformat() + print(f"\U0001f4cb START-WORK: Current status: {current_status} | Current working_agent: {current_working}") + moved = False + if current_status in ["Backlog", "Blocked"]: + print(f"\U0001f504 START-WORK: Auto-moving from {current_status} \u2192 In Progress") + conn.execute( + "UPDATE tasks SET working_agent = ?, status = ?, updated_at = ? WHERE id = ?", + (agent, "In Progress", now, task_id) + ) + moved = True + logger.info(f"Task #{task_id} status change: {current_status} -> In Progress by {agent} (start-work)") + else: + print(f"\U0001f4be START-WORK: Updating working_agent={agent} (status unchanged: {current_status})") + conn.execute( + "UPDATE tasks SET working_agent = ?, updated_at = ? WHERE id = ?", + (agent, now, task_id) + ) + print(f"\u2705 START-WORK: Database updated successfully") + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + result = dict(row) + + if moved: + log_activity(task_id, "status_change", agent, f"Auto-moved from {current_status} to In Progress (agent started work)") + + print(f"\U0001f4e1 START-WORK: Broadcasting work_started event") + await manager.broadcast({"type": "work_started", "task_id": task_id, "agent": agent}) + if moved: + print(f"\U0001f4e1 START-WORK: Broadcasting task_updated event (status changed)") + await manager.broadcast({"type": "task_updated", "task": result}) + + # Spawn agent if no session exists yet (Play button or manual start-work). + # If agent was already spawned (e.g. via /move), existing session prevents double-spawn. + if agent in AGENT_TO_OPENCLAW_ID and agent != "User": + existing_session = get_task_session(task_id) + if not existing_session: + print(f"🚀 START-WORK: Spawning {agent} for task #{task_id}") + await spawn_agent_session(task_id, result["title"], result.get("description", ""), agent) + else: + print(f"⏩ START-WORK: Session already exists for task #{task_id}: {existing_session}") + + print(f"\u2705 START-WORK COMPLETE: Task #{task_id} | Agent: {agent} | Moved: {moved}") + return {"status": "working", "task_id": task_id, "agent": agent, "moved_to": "In Progress" if moved else None} + + +@router.post("/api/tasks/{task_id}/stop-work") +async def stop_work(task_id: int, agent: str = None, outcome: str = None, reason: str = None): + """Mark that an agent has stopped working on a task.""" + print(f"\U0001f6d1 STOP-WORK: Task #{task_id} | Agent: {agent} | Outcome: {outcome} | Reason: {reason}") + with get_db_write() as conn: + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + if not row: + print(f"\u274c STOP-WORK FAILED: Task #{task_id} not found") + raise HTTPException(status_code=404, detail="Task not found") + task = dict(row) + now = datetime.now().isoformat() + current_status = task["status"] + current_working = task.get("working_agent") + new_status = None + action_item = None + print(f"\U0001f4cb STOP-WORK: Current status: {current_status} | Current working_agent: {current_working}") + + if outcome == "review" and current_status not in ("Review", "Done"): + new_status = "Review" + print(f"\U0001f504 STOP-WORK: Auto-moving to Review (outcome=review)") + reason_text = reason or "Work completed, ready for review" + cursor = conn.execute( + "INSERT INTO action_items (task_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?)", + (task_id, agent or "Agent", reason_text, "completion", now) + ) + action_item = {"id": cursor.lastrowid, "task_id": task_id, "agent": agent or "Agent", + "content": reason_text, "item_type": "completion", "resolved": False, "created_at": now} + print(f"\U0001f4dd STOP-WORK: Created completion action item #{cursor.lastrowid}") + elif outcome == "blocked" and current_status == "In Progress": + new_status = "Blocked" + print(f"\U0001f504 STOP-WORK: Auto-moving to Blocked (outcome=blocked)") + reason_text = reason or "Blocked - awaiting input" + cursor = conn.execute( + "INSERT INTO action_items (task_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?)", + (task_id, agent or "Agent", reason_text, "blocker", now) + ) + action_item = {"id": cursor.lastrowid, "task_id": task_id, "agent": agent or "Agent", + "content": reason_text, "item_type": "blocker", "resolved": False, "created_at": now} + print(f"\U0001f4dd STOP-WORK: Created blocker action item #{cursor.lastrowid}") + else: + print(f"\U0001f4be STOP-WORK: Clearing working_agent (no status change)") + + # Keep agent_session_key alive — session stays until Done or explicit UI stop. + # Only clear working_agent (agent is no longer actively working, but session remains for follow-ups). + if new_status: + print(f"💾 STOP-WORK: Updating DB - working_agent=NULL, status={new_status} (session key preserved)") + conn.execute( + "UPDATE tasks SET working_agent = NULL, status = ?, updated_at = ? WHERE id = ?", + (new_status, now, task_id) + ) + logger.info(f"Task #{task_id} status change: {current_status} -> {new_status} by {agent or 'Agent'} (stop-work)") + else: + print(f"💾 STOP-WORK: Updating DB - working_agent=NULL (status unchanged, session key preserved)") + conn.execute( + "UPDATE tasks SET working_agent = NULL, updated_at = ? WHERE id = ?", + (now, task_id) + ) + print(f"\u2705 STOP-WORK: Database updated successfully") + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + result = dict(row) + + if new_status: + log_activity(task_id, "status_change", agent or "Agent", f"Auto-moved to {new_status} (agent stopped work)") + + print(f"\U0001f4e1 STOP-WORK: Broadcasting work_stopped event") + await manager.broadcast({"type": "work_stopped", "task_id": task_id, "agent": agent or current_working}) + if new_status: + print(f"\U0001f4e1 STOP-WORK: Broadcasting task_updated event (status: {new_status})") + await manager.broadcast({"type": "task_updated", "task": result}) + if action_item: + print(f"\U0001f4e1 STOP-WORK: Broadcasting action_item_added event") + await manager.broadcast({"type": "action_item_added", "task_id": task_id, "item": action_item}) + + print(f"\u2705 STOP-WORK COMPLETE: Task #{task_id} | New status: {new_status or current_status}") + return {"status": "stopped", "task_id": task_id, "moved_to": new_status} + + +@router.post("/api/tasks/{task_id}/move") +async def move_task(task_id: int, status: str = None, agent: str = None, reason: str = None): + """Quick move task to a new status with workflow rules.""" + print(f"\U0001f4cb MOVE-TASK: Task #{task_id} \u2192 {status} | Agent: {agent} | Reason: {reason}") + now = datetime.now().isoformat() + with get_db_write() as conn: + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + if not row: + print(f"\u274c MOVE-TASK FAILED: Task #{task_id} not found") + raise HTTPException(status_code=404, detail="Task not found") + task = dict(row) + old_status = task["status"] + old_working = task.get("working_agent") + print(f"\U0001f4cb MOVE-TASK: Current state - Status: {old_status} | Working: {old_working} | Assigned: {task.get('agent')}") + + if status == "Done" and agent != "User": + print(f"\u274c MOVE-TASK BLOCKED: Only User can move to Done (agent={agent})") + raise HTTPException(status_code=403, detail="Only User can move tasks to Done") + + # If moving to Done, clear working_agent in same transaction + if status == "Done": + print(f"\U0001f4be MOVE-TASK: Updating status {old_status} \u2192 {status} and clearing working_agent") + conn.execute("UPDATE tasks SET status = ?, working_agent = NULL, agent_session_key = NULL, updated_at = ? WHERE id = ?", (status, now, task_id)) + else: + print(f"\U0001f4be MOVE-TASK: Updating status {old_status} \u2192 {status}") + conn.execute("UPDATE tasks SET status = ?, updated_at = ? WHERE id = ?", (status, now, task_id)) + print(f"\u2705 MOVE-TASK: Database updated successfully") + + logger.info(f"Task #{task_id} moved: {old_status} -> {status} by {agent}") + + action_item = None + if status == "Review" and old_status != "Review": + content = reason or f"Ready for review: {task['title']}" + cursor = conn.execute( + "INSERT INTO action_items (task_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?)", + (task_id, agent or task["agent"], content, "completion", now) + ) + action_item = { + "id": cursor.lastrowid, "task_id": task_id, "agent": agent or task["agent"], + "content": content, "item_type": "completion", "resolved": 0, "created_at": now + } + if status == "Blocked" and old_status != "Blocked": + content = reason or f"Blocked: {task['title']} - reason not specified" + cursor = conn.execute( + "INSERT INTO action_items (task_id, agent, content, item_type, created_at) VALUES (?, ?, ?, ?, ?)", + (task_id, agent or task["agent"], content, "blocker", now) + ) + action_item = { + "id": cursor.lastrowid, "task_id": task_id, "agent": agent or task["agent"], + "content": content, "item_type": "blocker", "resolved": 0, "created_at": now + } + + row = conn.execute("SELECT * FROM tasks WHERE id = ?", (task_id,)).fetchone() + result = dict(row) + + log_activity(task_id, "moved", agent, f"Moved to {status}") + + await manager.broadcast({"type": "task_updated", "task": result}) + if action_item: + await manager.broadcast({"type": "action_item_added", "task_id": task_id, "item": action_item}) + + if status == "In Progress" and old_status != "In Progress": + assigned_agent = result.get("agent", "Unassigned") + if assigned_agent in AGENT_TO_OPENCLAW_ID and assigned_agent != "User": + print(f"🚀 MOVE-TASK: Task #{task_id} moved to In Progress — auto-spawning {assigned_agent}") + await spawn_agent_session(task_id, result.get("title", ""), result.get("description", ""), assigned_agent) + + session_cleared = False + if status == "Done": + await manager.broadcast({"type": "work_stopped", "task_id": task_id, "agent": old_working}) + if AUTO_STOP_ON_DONE: + # Use session key from pre-write task data (already NULL in DB after write) + session_key = task.get("agent_session_key") + if session_key: + print(f"🛑 Auto-stopping agent session {session_key} for task #{task_id} (moved to Done)") + await stop_agent_session(session_key) + set_task_session(task_id, None) + session_cleared = True + logger.info(f"Task #{task_id} moved to Done — cleared agent session {session_key}") + print(f"🧹 Cleared agent session for task #{task_id}") + + return {"status": "moved", "new_status": status, "action_item_created": action_item is not None, "session_cleared": session_cleared} + + +@router.get("/api/tasks/{task_id}/agent-status") +async def get_agent_status(task_id: int): + """Check if the agent session for a task is still alive.""" + with get_db() as conn: + row = conn.execute("SELECT agent_session_key, working_agent FROM tasks WHERE id = ?", (task_id,)).fetchone() + if not row: + raise HTTPException(status_code=404, detail="Task not found") + + session_key = row["agent_session_key"] + working_agent = row["working_agent"] + + if not session_key: + return {"alive": False, "session_key": None, "working_agent": working_agent} + + alive = await is_session_alive(session_key) + # Don't clear session key here — it's needed for followup spawning. + # Session cleanup only happens on Done or explicit Stop. + return {"alive": alive, "session_key": session_key, "working_agent": working_agent} + + +@router.get("/api/activity") +def get_activity(limit: int = 50): + """Get recent activity.""" + with get_db() as conn: + rows = conn.execute( + "SELECT * FROM activity_log ORDER BY timestamp DESC LIMIT ?", + (limit,) + ).fetchall() + return [dict(row) for row in rows] diff --git a/app/routes/uploads.py b/app/routes/uploads.py new file mode 100644 index 0000000..319f156 --- /dev/null +++ b/app/routes/uploads.py @@ -0,0 +1,45 @@ +""" +File uploads: image upload endpoint. +""" + +import base64 as b64_module +import uuid + +from fastapi import APIRouter, HTTPException + +from app.config import DATA_DIR +from app.models import ImageUpload + +router = APIRouter() + + +@router.post("/api/upload/image") +async def upload_image(image: ImageUpload): + """Upload a base64 image and return the file path.""" + attachments_dir = DATA_DIR / "attachments" + attachments_dir.mkdir(exist_ok=True) + + try: + data = image.data + if data.startswith("data:") and ";base64," in data: + header, b64_content = data.split(",", 1) + mime_type = header.split(":")[1].split(";")[0] + ext = mime_type.split("/")[1] if "/" in mime_type else "png" + else: + b64_content = data + ext = "png" + + if ext not in ["png", "jpg", "jpeg", "gif", "webp"]: + ext = "png" + + img_filename = f"{uuid.uuid4().hex[:8]}_{image.filename or 'image'}" + if not img_filename.endswith(f".{ext}"): + img_filename = f"{img_filename}.{ext}" + + img_path = attachments_dir / img_filename + with open(img_path, "wb") as f: + f.write(b64_module.b64decode(b64_content)) + + return {"path": f"/app/data/attachments/{img_filename}", "filename": img_filename} + except Exception as e: + raise HTTPException(status_code=400, detail=f"Failed to save image: {e}") diff --git a/app/websocket.py b/app/websocket.py new file mode 100644 index 0000000..2c8b8ba --- /dev/null +++ b/app/websocket.py @@ -0,0 +1,33 @@ +""" +WebSocket connection manager and broadcast helper. +""" + +from typing import Set +from fastapi import WebSocket + + +class ConnectionManager: + """Manage WebSocket connections for live updates.""" + + def __init__(self): + self.active_connections: Set[WebSocket] = set() + + async def connect(self, websocket: WebSocket): + await websocket.accept() + self.active_connections.add(websocket) + + def disconnect(self, websocket: WebSocket): + self.active_connections.discard(websocket) + + async def broadcast(self, message: dict): + """Send update to all connected clients.""" + dead = set() + for connection in self.active_connections: + try: + await connection.send_json(message) + except: + dead.add(connection) + self.active_connections -= dead + + +manager = ConnectionManager() diff --git a/docker-compose.yml b/docker-compose.yml index 6beaf1d..1a5f036 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,7 @@ services: taskboard: build: . - container_name: moltdev-taskboard + container_name: openclaw-taskboard restart: unless-stopped ports: - "8080:8080" diff --git a/docs/plans/2026-02-13-refactor-and-features-design.md b/docs/plans/2026-02-13-refactor-and-features-design.md new file mode 100644 index 0000000..295e7f0 --- /dev/null +++ b/docs/plans/2026-02-13-refactor-and-features-design.md @@ -0,0 +1,139 @@ +# OpenDevBoard Refactor & Feature Expansion — Design Document + +**Date:** 2026-02-13 +**Status:** Approved + +--- + +## 1. Backend Architecture + +### Package Structure + +``` +app/ +├── __init__.py # create_app() factory +├── main.py # Uvicorn entry, middleware registration +├── config.py # Env-based settings +├── database.py # get_db(), init_db(), schema migrations +├── models.py # Pydantic request/response schemas +├── websocket.py # ConnectionManager, broadcast helpers +├── openclaw.py # Agent spawn/send/stop helpers +└── routes/ + ├── __init__.py # Collects all APIRouters + ├── tasks.py # /api/tasks CRUD, /move, /start-work, /stop-work + ├── projects.py # /api/projects CRUD + ├── comments.py # /api/tasks/{id}/comments + ├── action_items.py # /api/tasks/{id}/action-items + resolve/archive + ├── sessions.py # /api/sessions CRUD, stop, stop-all + ├── chat.py # /api/jarvis/chat, /history, /respond + └── uploads.py # /api/upload/image +``` + +### Database Changes + +New `projects` table: + +```sql +CREATE TABLE IF NOT EXISTS projects ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL, + slug TEXT UNIQUE NOT NULL, + description TEXT DEFAULT '', + color TEXT DEFAULT '#00b4d8', + created_at TEXT NOT NULL +); +``` + +Default project auto-inserted on first migration. + +`tasks` table addition: + +```sql +ALTER TABLE tasks ADD COLUMN project_id INTEGER DEFAULT 1 REFERENCES projects(id); +``` + +All existing tasks assigned to Default project (id=1). + +### Status Validation + +Valid statuses: `["Backlog", "Todo", "In Progress", "Review", "Done", "Blocked"]` + +Backend rejects any task create/update with invalid status (HTTP 422). + +### Docker Updates + +- Dockerfile: `COPY app/ app/` replaces `COPY app.py .` +- CMD: `python -m uvicorn app.main:app --host 0.0.0.0 --port 8080 --log-level info --no-access-log` + +--- + +## 2. Frontend Changes + +### Project Switcher + +- Dropdown in header, next to board title +- Options: "All Projects" (default), then each project by name +- Selecting filters tasks client-side +- "Manage Projects" link opens add/remove modal + +### Project Badges + +- In "All Projects" view: colored pill badge on each card with project name +- Badge color from `project.color` +- Hidden when viewing single project + +### New "Todo" Column + +- Column order: Backlog → Todo → In Progress → Review → Done → Blocked (6 columns) +- Full drag-and-drop support + +### Responsive Design + +- Breakpoints: 768px (tablet), 480px (mobile) +- Tablet: columns wrap 2-3 per row +- Mobile: columns stack vertically, full-width. Modals go full-screen +- Command bar full-width on mobile + +### Filter Bar + +- Below header, above board +- Dropdowns: Priority, Agent, Project (in "All" view) +- Text search: title/description keyword +- AND logic, client-side filtering + +### Markdown Export + +- "Export MD" button in task modal header +- Content: title, metadata table (priority, status, agent, due date, project), description, action items, all comments chronologically +- Downloads as `task-{id}-{title-slug}.md` + +--- + +## 3. Agent Hardening + +### Guard Against Double-Spawn + +- Before spawning, check if `task.agent_session_key` is set AND session is active via OpenClaw `sessions_list` +- Active session → skip spawn +- Dead session → clear `agent_session_key`, spawn new + +### Session Liveness Check on Card Open + +- When opening task modal, if `agent_session_key` exists, background-check session liveness +- Update working indicator (clear if dead) +- New endpoint: `GET /api/tasks/{task_id}/agent-status` → `{ alive: bool, session_key: str }` + +### Agent Lifecycle on Status Change + +- Moving to "In Progress" → auto-spawn agent (if assigned and no active session) +- Moving to "Done" → auto-stop agent session (if one exists) +- All other transitions → agent stays alive + +--- + +## 4. Constraints + +- Tech stack: FastAPI, SQLite, Vanilla JS/CSS (no frameworks) +- Preserve: WebSocket live updates, AI-Agent integration, security middleware (IP restriction, API keys) +- Maintain dark/cyberpunk aesthetic +- After every major task, pause for user review and commit diff --git a/docs/plans/2026-02-13-refactor-implementation-plan.md b/docs/plans/2026-02-13-refactor-implementation-plan.md new file mode 100644 index 0000000..47f0f60 --- /dev/null +++ b/docs/plans/2026-02-13-refactor-implementation-plan.md @@ -0,0 +1,902 @@ +# OpenDevBoard Refactor & Feature Expansion — Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Refactor monolithic app.py into a clean FastAPI package, add multi-project support, Todo column, advanced filtering, responsive design, markdown export, and harden agent lifecycle. + +**Architecture:** Split app.py into `app/` package with APIRouters per domain. Add `projects` table linked to tasks. All frontend changes in single `static/index.html`. Client-side filtering, server-side validation. + +**Tech Stack:** FastAPI, SQLite, Pydantic, Vanilla JS/CSS, WebSockets, OpenClaw API + +**User Workflow:** After each major task (marked with CHECKPOINT), pause for user review and commit. + +--- + +## Phase 1: Backend Refactoring — Split app.py into Package + +### Task 1: Create package skeleton and config module + +**Files:** +- Create: `app/__init__.py` +- Create: `app/config.py` + +**Step 1: Create `app/__init__.py`** + +Empty file to make `app/` a Python package: + +```python +# app/__init__.py +``` + +**Step 2: Create `app/config.py`** + +Extract all env-var reading and configuration constants from `app.py` into a config module. This includes: +- `OPENCLAW_GATEWAY_URL`, `OPENCLAW_TOKEN`, `TASKBOARD_API_KEY`, `TASKBOARD_BASE_URL` +- `ALLOWED_IPS`, `ALLOWED_PATHS` +- Agent/branding config: `MAIN_AGENT_NAME`, `MAIN_AGENT_EMOJI`, `HUMAN_NAME`, etc. +- `PROJECT_NAME`, `COMPANY_NAME`, `COMPANY_CONTEXT`, `COMPLIANCE_FRAMEWORKS` +- Valid statuses list: `VALID_STATUSES = ["Backlog", "Todo", "In Progress", "Review", "Done", "Blocked"]` +- Valid priorities list: `VALID_PRIORITIES = ["Critical", "High", "Medium", "Low"]` + +Reference: Read top ~100 lines of `app.py` for all `os.getenv()` calls and constants. + +--- + +### Task 2: Create database module + +**Files:** +- Create: `app/database.py` + +**Step 1: Extract database code from `app.py`** + +Move these from `app.py` into `app/database.py`: +- `get_db()` function (SQLite connection factory with WAL mode) +- `init_db()` function (CREATE TABLE statements) +- Any migration/upgrade logic + +The `init_db()` function should be importable and called during app startup. + +Reference: Search `app.py` for `sqlite3`, `CREATE TABLE`, `get_db`. + +--- + +### Task 3: Create models module + +**Files:** +- Create: `app/models.py` + +**Step 1: Extract all Pydantic models from `app.py`** + +Move these classes: +- `TaskCreate`, `TaskUpdate` +- `CommentCreate` +- `ActionItemCreate` +- `JarvisMessage` +- Any other Pydantic BaseModel subclasses + +Add status validation to `TaskCreate` and `TaskUpdate`: +```python +from app.config import VALID_STATUSES, VALID_PRIORITIES + +class TaskCreate(BaseModel): + # ... existing fields ... + status: str = "Backlog" + + @field_validator("status") + @classmethod + def validate_status(cls, v): + if v not in VALID_STATUSES: + raise ValueError(f"Invalid status '{v}'. Must be one of: {VALID_STATUSES}") + return v + + @field_validator("priority") + @classmethod + def validate_priority(cls, v): + if v and v not in VALID_PRIORITIES: + raise ValueError(f"Invalid priority '{v}'. Must be one of: {VALID_PRIORITIES}") + return v +``` + +Same validators on `TaskUpdate` (but fields are Optional). + +--- + +### Task 4: Create WebSocket manager module + +**Files:** +- Create: `app/websocket.py` + +**Step 1: Extract WebSocket manager from `app.py`** + +Move the `ConnectionManager` class and the `broadcast()` helper function. This module should be importable by all route modules. + +Reference: Search `app.py` for `class ConnectionManager`, `websocket`, `broadcast`. + +--- + +### Task 5: Create OpenClaw integration module + +**Files:** +- Create: `app/openclaw.py` + +**Step 1: Extract OpenClaw helper functions from `app.py`** + +Move these functions: +- `spawn_agent_session()` +- `send_to_agent_session()` +- `get_agent_system_prompt()` (or similar prompt builder) +- Any `httpx` / `aiohttp` calls to OpenClaw gateway +- Session management helpers + +Reference: Search `app.py` for `openclaw`, `spawn`, `tools/invoke`, `sessions_spawn`. + +--- + +### Task 6: Create route modules + +**Files:** +- Create: `app/routes/__init__.py` +- Create: `app/routes/tasks.py` +- Create: `app/routes/comments.py` +- Create: `app/routes/action_items.py` +- Create: `app/routes/sessions.py` +- Create: `app/routes/chat.py` +- Create: `app/routes/uploads.py` +- Create: `app/routes/projects.py` (empty placeholder for now) + +**Step 1: Create `app/routes/__init__.py`** + +Collects all routers: +```python +from fastapi import APIRouter +from app.routes.tasks import router as tasks_router +from app.routes.comments import router as comments_router +from app.routes.action_items import router as action_items_router +from app.routes.sessions import router as sessions_router +from app.routes.chat import router as chat_router +from app.routes.uploads import router as uploads_router +from app.routes.projects import router as projects_router + +api_router = APIRouter() +api_router.include_router(tasks_router) +api_router.include_router(comments_router) +api_router.include_router(action_items_router) +api_router.include_router(sessions_router) +api_router.include_router(chat_router) +api_router.include_router(uploads_router) +api_router.include_router(projects_router) +``` + +**Step 2: Split endpoints into route files** + +Each route file follows this pattern: +```python +from fastapi import APIRouter, Depends, HTTPException +from app.database import get_db +from app.models import ... +from app.websocket import manager, broadcast +from app.config import ... + +router = APIRouter() + +@router.get("/api/tasks") +async def get_tasks(...): + ... +``` + +Split by domain: +- `tasks.py`: `/api/tasks` CRUD, `/api/tasks/{id}/move`, `/api/tasks/{id}/start-work`, `/api/tasks/{id}/stop-work`, `/api/agents/{agent}/tasks`, `/api/config`, `/api/activity` +- `comments.py`: `/api/tasks/{id}/comments` GET/POST/DELETE +- `action_items.py`: `/api/tasks/{id}/action-items` GET/POST, `/api/action-items/{id}/resolve|unresolve|archive|unarchive`, DELETE +- `sessions.py`: `/api/sessions` GET, `/api/sessions/create`, `/api/sessions/{key}/stop`, `/api/sessions/stop-all`, DELETE +- `chat.py`: `/api/jarvis/history`, `/api/jarvis/chat`, `/api/jarvis/respond` +- `uploads.py`: `/api/upload/image` +- `projects.py`: Empty router placeholder (implemented in Phase 2) + +--- + +### Task 7: Create main.py entry point + +**Files:** +- Create: `app/main.py` + +**Step 1: Create the app factory and entry point** + +```python +from fastapi import FastAPI +from fastapi.staticfiles import StaticFiles +from fastapi.responses import FileResponse +from starlette.middleware.cors import CORSMiddleware + +from app.config import ALLOWED_IPS, ... +from app.database import init_db +from app.websocket import manager +from app.routes import api_router + +app = FastAPI(title="OpenDevBoard") + +# Middleware (move from app.py) +# - IPRestrictionMiddleware +# - RequestLoggingMiddleware +# - CORSMiddleware + +# Include all routes +app.include_router(api_router) + +# WebSocket endpoint +@app.websocket("/ws") +async def websocket_endpoint(websocket): + # ... existing WS logic ... + +# Serve static files +@app.get("/") +async def root(): + return FileResponse("static/index.html") + +# Startup event +@app.on_event("startup") +async def startup(): + init_db() +``` + +Move middleware classes (IPRestrictionMiddleware, RequestLoggingMiddleware) either inline in main.py or into a separate `app/middleware.py` if they're large. + +--- + +### Task 8: Verify refactored backend works + +**Step 1: Update imports and fix any circular dependencies** + +Run the app and verify all endpoints work: +```bash +cd /home/matthias/home-stack/openclawdev-taskboard +python -m uvicorn app.main:app --host 0.0.0.0 --port 8080 --reload +``` + +**Step 2: Test key endpoints manually** +- `GET /` → serves index.html +- `GET /api/config` → returns config +- `GET /api/tasks` → returns tasks +- WebSocket connection at `/ws` → connects + +**Step 3: Verify WebSocket live updates still function** +- Open browser, create/move a task, confirm real-time updates + +### **CHECKPOINT 1** — Pause for user review and commit. Backend refactoring complete, app runs identically to before. + +--- + +## Phase 2: Database Migration & Multi-Project Support + +### Task 9: Add projects table and migrate tasks + +**Files:** +- Modify: `app/database.py` — add projects table creation, ALTER tasks table +- Modify: `app/models.py` — add ProjectCreate, ProjectResponse models + +**Step 1: Update `init_db()` in `app/database.py`** + +Add after existing CREATE TABLE statements: +```python +# Projects table +cursor.execute(""" + CREATE TABLE IF NOT EXISTS projects ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL, + slug TEXT UNIQUE NOT NULL, + description TEXT DEFAULT '', + color TEXT DEFAULT '#00b4d8', + created_at TEXT NOT NULL + ) +""") + +# Add project_id to tasks if not exists +try: + cursor.execute("ALTER TABLE tasks ADD COLUMN project_id INTEGER DEFAULT 1 REFERENCES projects(id)") +except Exception: + pass # Column already exists + +# Ensure default project exists +cursor.execute("SELECT id FROM projects WHERE slug = 'default'") +if not cursor.fetchone(): + cursor.execute( + "INSERT INTO projects (name, slug, description, color, created_at) VALUES (?, ?, ?, ?, ?)", + ("Default", "default", "Default project", "#00b4d8", datetime.now(timezone.utc).isoformat()) + ) + +conn.commit() +``` + +**Step 2: Add Pydantic models in `app/models.py`** + +```python +class ProjectCreate(BaseModel): + name: str = Field(..., min_length=1, max_length=100) + description: str = Field(default="", max_length=500) + color: str = Field(default="#00b4d8", pattern=r"^#[0-9a-fA-F]{6}$") + +class ProjectResponse(BaseModel): + id: int + name: str + slug: str + description: str + color: str + created_at: str +``` + +--- + +### Task 10: Implement project API endpoints + +**Files:** +- Modify: `app/routes/projects.py` + +**Step 1: Implement CRUD endpoints** + +```python +router = APIRouter() + +@router.get("/api/projects") +async def list_projects(): + # Return all projects ordered by name, Default first + +@router.post("/api/projects") +async def create_project(project: ProjectCreate): + # Generate slug from name (slugify) + # Insert into DB + # Broadcast via WebSocket: { type: "project_created", project: {...} } + +@router.delete("/api/projects/{project_id}") +async def delete_project(project_id: int): + # Prevent deleting Default project (id=1) + # Reassign tasks to Default project OR reject if tasks exist + # Broadcast: { type: "project_deleted", project_id: ... } +``` + +**Step 2: Make task endpoints project-aware** + +Modify `app/routes/tasks.py`: +- `GET /api/tasks` — add optional `project_id` query param for server-side filtering +- `POST /api/tasks` — accept `project_id` in body (default: 1) +- `GET /api/tasks/{id}` — include `project_id` and project info in response +- `GET /api/config` — include projects list in config response + +--- + +### Task 11: Add "Todo" to valid statuses + +**Files:** +- Modify: `app/config.py` — already has "Todo" in VALID_STATUSES from Task 3 + +**Step 1: Verify status validation** + +The `VALID_STATUSES` list already includes "Todo" from Task 3. Verify that: +- Creating a task with status "Todo" works +- Creating a task with an invalid status like "Foo" returns 422 + +### **CHECKPOINT 2** — Pause for user review and commit. Multi-project backend complete. + +--- + +## Phase 3: Frontend — Todo Column, Project Switcher, Filters + +### Task 12: Add "Todo" column to the board + +**Files:** +- Modify: `static/index.html` + +**Step 1: Update the board column definitions** + +Find the JavaScript that defines the column order/statuses. Add "Todo" between "Backlog" and "In Progress". The column rendering loop should now produce 6 columns. + +**Step 2: Update CSS for 6 columns** + +Adjust the CSS grid/flex for the board to accommodate 6 columns instead of 5. Likely change: +```css +.board { + grid-template-columns: repeat(6, 1fr); + /* or adjust flex-basis percentages */ +} +``` + +**Step 3: Ensure drag-and-drop works with new column** + +Verify drag-and-drop handlers accept "Todo" as a valid drop target. The status is sent to `PATCH /api/tasks/{id}` which is already validated server-side. + +--- + +### Task 13: Add Project Switcher to header + +**Files:** +- Modify: `static/index.html` + +**Step 1: Add project dropdown to header HTML** + +Add a ` + + + + +``` + +**Step 2: Add JS functions** + +```javascript +let currentProjectId = 'all'; +let projects = []; + +async function loadProjects() { + const resp = await fetch('/api/projects'); + projects = await resp.json(); + populateProjectSwitcher(); +} + +function populateProjectSwitcher() { + const select = document.getElementById('projectFilter'); + select.innerHTML = ''; + projects.forEach(p => { + select.innerHTML += ``; + }); +} + +function filterByProject(projectId) { + currentProjectId = projectId; + renderBoard(); +} +``` + +**Step 3: Update `renderBoard()` to filter by project** + +In the existing `renderBoard()` function, add project filtering: +```javascript +let filteredTasks = tasks; +if (currentProjectId !== 'all') { + filteredTasks = filteredTasks.filter(t => t.project_id == currentProjectId); +} +``` + +**Step 4: Add project badge to cards** + +In `renderCard(task)`, add a badge when viewing "All Projects": +```javascript +if (currentProjectId === 'all') { + const project = projects.find(p => p.id === task.project_id); + if (project) { + card.innerHTML += `${escapeHtml(project.name)}`; + } +} +``` + +**Step 5: Add CSS for project badge** +```css +.project-badge { + display: inline-block; + padding: 2px 8px; + border-radius: 12px; + font-size: 0.7rem; + color: white; + opacity: 0.85; +} +``` + +--- + +### Task 14: Add Project Manager modal + +**Files:** +- Modify: `static/index.html` + +**Step 1: Add "Manage Projects" link near the project switcher** + +Small link/button that opens a modal. + +**Step 2: Create project manager modal** + +Simple modal with: +- List of existing projects (name, color, delete button — disabled for Default) +- "Add Project" form: name input, color picker, description textarea, submit button +- Uses `POST /api/projects` and `DELETE /api/projects/{id}` +- Refreshes project list on changes + +--- + +### Task 15: Add filter bar + +**Files:** +- Modify: `static/index.html` + +**Step 1: Add filter bar HTML below header** + +```html +
+ + + +
+``` + +**Step 2: Implement `applyFilters()` function** + +```javascript +function applyFilters() { + renderBoard(); // renderBoard reads filter values +} +``` + +**Step 3: Update `renderBoard()` to apply all filters** + +```javascript +function getFilteredTasks() { + let filtered = [...tasks]; + + // Project filter + if (currentProjectId !== 'all') { + filtered = filtered.filter(t => t.project_id == currentProjectId); + } + + // Priority filter + const priority = document.getElementById('filterPriority').value; + if (priority) filtered = filtered.filter(t => t.priority === priority); + + // Agent filter + const agent = document.getElementById('filterAgent').value; + if (agent) filtered = filtered.filter(t => t.agent === agent); + + // Search filter + const search = document.getElementById('filterSearch').value.toLowerCase(); + if (search) { + filtered = filtered.filter(t => + t.title.toLowerCase().includes(search) || + (t.description && t.description.toLowerCase().includes(search)) + ); + } + + return filtered; +} +``` + +**Step 4: Add CSS for filter bar** + +```css +.filter-bar { + display: flex; + gap: 0.5rem; + padding: 0.5rem 1rem; + background: var(--bg-dark); + border-bottom: 1px solid rgba(255,255,255,0.05); + flex-wrap: wrap; +} +.filter-bar select, .filter-bar input { + background: var(--bg-card); + color: var(--text); + border: 1px solid rgba(255,255,255,0.1); + padding: 0.4rem 0.6rem; + border-radius: 6px; + font-size: 0.85rem; +} +``` + +### **CHECKPOINT 3** — Pause for user review and commit. Todo column, project switcher, and filter bar functional. + +--- + +## Phase 4: Responsive Design + +### Task 16: Add responsive CSS media queries + +**Files:** +- Modify: `static/index.html` + +**Step 1: Add tablet breakpoint (768px)** + +```css +@media (max-width: 768px) { + .board { + grid-template-columns: repeat(3, 1fr); /* 3 columns per row */ + } + .filter-bar { + flex-direction: column; + } + .task-modal { + width: 95vw; + max-width: none; + } + .command-bar-expanded { + width: 95vw; + } +} +``` + +**Step 2: Add mobile breakpoint (480px)** + +```css +@media (max-width: 480px) { + .board { + grid-template-columns: 1fr; /* Single column stacking */ + gap: 0.5rem; + } + .header { + flex-wrap: wrap; + } + .project-switcher { + width: 100%; + } + .task-modal { + width: 100vw; + height: 100vh; + border-radius: 0; + } + .task-modal .split-pane { + flex-direction: column; /* Stack details + chat vertically */ + } + .command-bar-expanded { + width: 100vw; + height: 100vh; + } +} +``` + +**Step 3: Test at various viewport sizes** + +Use browser dev tools to verify at 1920px, 768px, and 480px widths. + +### **CHECKPOINT 4** — Pause for user review and commit. Responsive design complete. + +--- + +## Phase 5: Markdown Export + +### Task 17: Implement markdown export in task modal + +**Files:** +- Modify: `static/index.html` + +**Step 1: Add "Export MD" button to task modal header** + +Add button next to existing header buttons (delete, save, close). + +**Step 2: Implement `exportTaskMarkdown(taskId)` function** + +```javascript +async function exportTaskMarkdown(taskId) { + const task = tasks.find(t => t.id === taskId); + if (!task) return; + + const taskComments = comments[taskId] || []; + const taskActionItems = actionItems[taskId] || []; + const project = projects.find(p => p.id === task.project_id); + + let md = `# ${task.title}\n\n`; + md += `| Field | Value |\n|-------|-------|\n`; + md += `| Status | ${task.status} |\n`; + md += `| Priority | ${task.priority} |\n`; + md += `| Agent | ${task.agent} |\n`; + md += `| Project | ${project ? project.name : 'Default'} |\n`; + md += `| Due Date | ${task.due_date || 'None'} |\n`; + md += `| Created | ${task.created_at} |\n`; + md += `| Updated | ${task.updated_at} |\n\n`; + + if (task.description) { + md += `## Description\n\n${task.description}\n\n`; + } + + if (taskActionItems.length > 0) { + md += `## Action Items\n\n`; + taskActionItems.forEach(item => { + const check = item.resolved ? 'x' : ' '; + md += `- [${check}] **${item.item_type}** (${item.agent}): ${item.content}\n`; + }); + md += '\n'; + } + + if (taskComments.length > 0) { + md += `## Comments\n\n`; + taskComments.forEach(c => { + md += `### ${c.agent} — ${formatDateTime(c.created_at)}\n\n`; + md += `${c.content}\n\n---\n\n`; + }); + } + + // Trigger download + const blob = new Blob([md], { type: 'text/markdown' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + const slug = task.title.toLowerCase().replace(/[^a-z0-9]+/g, '-').slice(0, 50); + a.href = url; + a.download = `task-${task.id}-${slug}.md`; + a.click(); + URL.revokeObjectURL(url); +} +``` + +### **CHECKPOINT 5** — Pause for user review and commit. Markdown export complete. + +--- + +## Phase 6: Agent Hardening + +### Task 18: Guard against double-spawn + +**Files:** +- Modify: `app/openclaw.py` + +**Step 1: Add session liveness check** + +```python +async def is_session_alive(session_key: str) -> bool: + """Check if an OpenClaw session is still active.""" + try: + resp = await httpx.AsyncClient().post( + f"{OPENCLAW_GATEWAY_URL}/tools/invoke", + json={"tool": "sessions_list", "args": {"limit": 50}}, + headers={"Authorization": f"Bearer {OPENCLAW_TOKEN}"}, + timeout=5.0 + ) + sessions = resp.json().get("result", {}).get("sessions", []) + return any(s.get("key") == session_key for s in sessions) + except Exception: + return False +``` + +**Step 2: Update `spawn_agent_session()` to check first** + +Before spawning, check: +```python +async def spawn_agent_session(task_id, ...): + db = get_db() + task = db.execute("SELECT agent_session_key FROM tasks WHERE id = ?", (task_id,)).fetchone() + + if task and task["agent_session_key"]: + if await is_session_alive(task["agent_session_key"]): + logger.info(f"Task {task_id} already has active session, skipping spawn") + return # Don't double-spawn + else: + # Clear dead session + db.execute("UPDATE tasks SET agent_session_key = NULL WHERE id = ?", (task_id,)) + db.commit() + + # Proceed with spawn... +``` + +--- + +### Task 19: Session liveness check on card open + +**Files:** +- Modify: `app/routes/tasks.py` — add new endpoint +- Modify: `static/index.html` — call on modal open + +**Step 1: Add backend endpoint** + +```python +@router.get("/api/tasks/{task_id}/agent-status") +async def get_agent_status(task_id: int): + db = get_db() + task = db.execute("SELECT agent_session_key, working_agent FROM tasks WHERE id = ?", (task_id,)).fetchone() + if not task: + raise HTTPException(404) + + session_key = task["agent_session_key"] + alive = False + if session_key: + alive = await is_session_alive(session_key) + if not alive: + # Clear stale data + db.execute("UPDATE tasks SET agent_session_key = NULL, working_agent = NULL WHERE id = ?", (task_id,)) + db.commit() + + return {"alive": alive, "session_key": session_key, "working_agent": task["working_agent"]} +``` + +**Step 2: Call from frontend on modal open** + +In the function that opens the task modal, add: +```javascript +// Check agent liveness +fetch(`/api/tasks/${taskId}/agent-status`) + .then(r => r.json()) + .then(status => { + if (!status.alive && workingAgentsByTask[taskId]) { + // Clear working indicator + delete workingAgentsByTask[taskId]; + renderBoard(); + } + updateAgentControls(taskId, status); + }); +``` + +--- + +### Task 20: Auto-stop agent when task moves to Done + +**Files:** +- Modify: `app/routes/tasks.py` — update move/update logic + +**Step 1: In the task move/update handler, add auto-stop logic** + +When status changes to "Done": +```python +if new_status == "Done": + session_key = task["agent_session_key"] + if session_key: + try: + # Stop the agent session + async with httpx.AsyncClient() as client: + await client.post( + f"{OPENCLAW_GATEWAY_URL}/tools/invoke", + json={"tool": "sessions_stop", "args": {"sessionKey": session_key}}, + headers={"Authorization": f"Bearer {OPENCLAW_TOKEN}"}, + timeout=10.0 + ) + # Clear session data + cursor.execute("UPDATE tasks SET agent_session_key = NULL, working_agent = NULL WHERE id = ?", (task_id,)) + logger.info(f"Auto-stopped agent session {session_key} for task {task_id} moved to Done") + except Exception as e: + logger.error(f"Failed to stop agent session: {e}") +``` + +### **CHECKPOINT 6** — Pause for user review and commit. Agent hardening complete. + +--- + +## Phase 7: Docker & Cleanup + +### Task 21: Update Docker configuration + +**Files:** +- Modify: `Dockerfile` +- Modify: `docker-compose.yml` + +**Step 1: Update Dockerfile** + +```dockerfile +FROM python:3.11-slim +WORKDIR /app +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +COPY app/ app/ +COPY static/ static/ +VOLUME /app/data +EXPOSE 8080 +CMD ["python", "-m", "uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8080", "--log-level", "info", "--no-access-log"] +``` + +**Step 2: Test Docker build and run** + +```bash +docker compose build && docker compose up -d +``` + +Verify app works in Docker container. + +--- + +### Task 22: Final integration verification + +**Step 1: Verify all features end-to-end** +- Create a project, switch to it, create tasks +- Drag tasks through all 6 columns +- Use filters (priority, agent, search) +- Export a task to markdown +- Verify agent spawns on In Progress, stops on Done +- Open card with agent session, verify liveness check +- Test responsive layouts at 768px and 480px +- Verify WebSocket real-time updates across all features + +**Step 2: Keep `app.py` as backup** + +Rename to `app.py.legacy` or keep `app.py.bak` that already exists. + +### **CHECKPOINT 7** — Final review. Full refactor and feature expansion complete. diff --git a/static/index.html b/static/index.html index e57da54..b56f037 100644 --- a/static/index.html +++ b/static/index.html @@ -1017,6 +1017,126 @@ border-radius: 6px; font-size: 0.875rem; } + + /* Filter Bar */ + .filter-bar { + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0.5rem 2rem; + background: var(--bg-card); + border-bottom: 1px solid var(--border); + } + .filter-bar select, .filter-bar input { + background: var(--bg-dark); + color: var(--text); + border: 1px solid var(--border); + padding: 0.4rem 0.5rem; + border-radius: 6px; + font-size: 0.8rem; + } + .filter-bar input::placeholder { color: var(--text-muted); } + .filter-bar select:focus, .filter-bar input:focus { + outline: none; + border-color: var(--accent); + } + + /* Project badge on cards */ + .project-badge { + display: inline-block; + padding: 2px 8px; + border-radius: 12px; + font-size: 0.7rem; + color: white; + opacity: 0.85; + } + + /* Project Manager modal */ + .pm-modal-overlay { + display: none; + position: fixed; + top: 0; left: 0; right: 0; bottom: 0; + background: rgba(0,0,0,0.7); + z-index: 200; + justify-content: center; + align-items: center; + } + .pm-modal-overlay.active { display: flex; } + .pm-modal { + background: var(--bg-card); + border-radius: 12px; + width: 420px; + max-height: 80vh; + display: flex; + flex-direction: column; + box-shadow: 0 25px 50px -12px rgba(0,0,0,0.5); + } + .pm-modal-header { + padding: 1rem 1.25rem; + border-bottom: 1px solid var(--border); + display: flex; + justify-content: space-between; + align-items: center; + } + .pm-modal-header h3 { font-size: 1.1rem; } + .pm-modal-body { + padding: 1rem 1.25rem; + overflow-y: auto; + flex: 1; + } + .pm-project-item { + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0.4rem 0; + border-bottom: 1px solid rgba(51,65,85,0.5); + } + .pm-color-dot { + width: 12px; height: 12px; + border-radius: 50%; + flex-shrink: 0; + } + .pm-project-name { flex: 1; font-size: 0.9rem; } + .pm-delete-btn { + background: none; border: none; color: var(--critical); + cursor: pointer; font-size: 1rem; padding: 2px 6px; border-radius: 4px; + } + .pm-delete-btn:hover { background: rgba(239,68,68,0.15); } + .pm-delete-btn:disabled { opacity: 0.3; cursor: not-allowed; } + .pm-add-form { + margin-top: 1rem; + display: flex; + flex-direction: column; + gap: 0.5rem; + } + .pm-add-form input, .pm-add-form textarea { + background: var(--bg-dark); + color: var(--text); + border: 1px solid var(--border); + padding: 0.4rem 0.5rem; + border-radius: 6px; + font-size: 0.85rem; + } + .pm-add-form textarea { resize: vertical; min-height: 40px; } + .pm-add-row { + display: flex; gap: 0.5rem; align-items: center; + } + .pm-add-row input[type="color"] { + width: 32px; height: 32px; border: none; padding: 0; + border-radius: 4px; cursor: pointer; background: none; + } + .pm-add-btn { + background: var(--accent); color: white; border: none; + padding: 0.4rem 1rem; border-radius: 6px; cursor: pointer; + font-size: 0.85rem; + } + .pm-add-btn:hover { background: var(--accent-hover); } + .pm-manage-link { + background: none; border: none; color: var(--text-muted); + cursor: pointer; font-size: 0.75rem; text-decoration: underline; + padding: 0; + } + .pm-manage-link:hover { color: var(--accent); } /* Agent Legend */ .agent-legend { @@ -1058,12 +1178,12 @@ padding: 1.5rem; overflow-x: auto; overflow-y: hidden; - height: calc(100vh - 73px); + height: calc(100vh - 113px); } .column { - flex: 1 0 300px; - min-width: 300px; + flex: 1 0 250px; + min-width: 250px; background: var(--bg-column); border-radius: 8px; display: flex; @@ -1161,15 +1281,7 @@ .tag.priority-Medium { background: var(--medium); color: black; } .tag.priority-Low { background: var(--low); color: white; } - /* Agent colors */ - .tag.agent-Jarvis { background: #6366f1; color: white; } - .tag.agent-Jarvis { background: #0ea5e9; color: white; } - .tag.agent-Architect { background: #8b5cf6; color: white; } - .tag.agent-Security-Auditor { background: #ef4444; color: white; } - .tag.agent-Code-Reviewer { background: #14b8a6; color: white; } - .tag.agent-UX-Manager { background: #ec4899; color: white; } - .tag.agent-User { background: #22c55e; color: white; } - .tag.agent-Unassigned { background: #64748b; color: white; } + /* Agent colors are injected dynamically from config.agentMeta */ /* Notification bubble */ .notification-bubble { @@ -1256,8 +1368,8 @@ text-decoration: line-through; } .action-item.resolved .action-item-check { - background: var(--text-muted); - border-color: var(--text-muted); + background: #10b981; + border-color: #10b981; cursor: pointer; } .action-item.resolved .action-item-check:hover { @@ -1535,8 +1647,8 @@ background: var(--bg-card); border-radius: 16px; width: 100%; - max-width: 600px; - min-height: 500px; + max-width: 720px; + min-height: 700px; max-height: 90vh; overflow: hidden; display: flex; @@ -1661,12 +1773,18 @@ } .header-save-btn.saved { - animation: saveGlow 0.6s ease-out; + animation: saveGlow 1.2s ease-out; + background: rgba(34, 197, 94, 0.2); } - + .header-save-btn.saved .save-icon { display: block !important; - stroke: var(--success); + stroke: #fff; + filter: drop-shadow(0 0 3px rgba(34, 197, 94, 0.8)); + } + + .header-save-btn.saved .save-spinner { + display: none !important; } @keyframes saveGlow { @@ -2598,6 +2716,117 @@ .chat-input-wrapper { position: relative; } + + /* ── Responsive: Tablet (max-width: 768px) ── */ + @media (max-width: 768px) { + .board { + flex-wrap: wrap; + overflow-x: hidden; + overflow-y: auto; + height: auto; + min-height: calc(100vh - 113px); + } + .column { + flex: 1 1 calc(50% - 0.5rem); + min-width: calc(50% - 0.5rem); + max-height: none; + } + .filter-bar { + flex-wrap: wrap; + } + .modal { + width: 95vw; + max-width: none; + } + .command-bar-expanded { + width: 95vw; + max-width: 95vw; + } + } + + /* ── Responsive: Mobile (max-width: 480px) ── */ + @media (max-width: 480px) { + .header { + flex-wrap: wrap; + padding: 0.75rem 1rem; + gap: 0.5rem; + } + .header-actions { + width: 100%; + justify-content: space-between; + } + .command-bar { + max-width: none; + margin: 0; + width: 100%; + } + #projectSwitcher { + width: 100%; + } + .board { + flex-wrap: nowrap; + flex-direction: column; + overflow-y: auto; + overflow-x: hidden; + height: auto; + min-height: calc(100vh - 150px); + padding: 1rem; + } + .column { + flex: 0 0 auto; + min-width: 100%; + width: 100%; + max-height: none; + } + .filter-bar { + flex-direction: column; + align-items: stretch; + padding: 0.5rem 1rem; + } + .modal { + width: 100vw; + height: 100vh; + max-width: none; + max-height: none; + min-height: 100vh; + border-radius: 0; + } + .modal.theater-mode { + width: 100vw; + height: 100vh; + max-width: none; + max-height: none; + margin: 0; + border-radius: 0; + } + .modal-header { + border-radius: 0; + } + .modal-overlay { + padding: 0; + } + .theater-mode .modal-body { + flex-direction: column; + } + .command-bar-expanded { + width: 100vw; + max-width: 100vw; + height: 100vh; + max-height: 100vh; + top: 0; + left: 0; + transform: none; + border-radius: 0; + } + .command-bar-expanded.fullsize { + top: 0; + left: 0; + transform: none; + width: 100vw; + height: 100vh; + border-radius: 0; + } + } @@ -2662,15 +2891,52 @@

- + - + + +
+ + + + +
+
+ + +
+
+
+

Manage Projects

+ +
+
+
+
+
+ + +
+ + +
+
+
+
@@ -2722,6 +2988,7 @@

-
+
+
+ + +
- +
- +
+ +
+
- +
- +
+
+
+
@@ -2857,19 +3144,13 @@

Delete Task let columnSortMode = {}; // status -> sort mode (latest|priority|agent|custom) let customSortOrder = {}; // status -> array of task IDs in custom order let pastedImages = []; // Array for multiple images + let currentProjectId = 'all'; // Project filter: 'all' or numeric id let theaterMode = false; let detailsVisible = true; - // Agent icons mapping (main agent added dynamically from config) - window.AGENT_ICONS = { - 'Architect': '🏛️', - 'Security Auditor': '🔒', - 'Code Reviewer': '📋', - 'UX Manager': '🎨', - 'User': '👤', - 'Unassigned': '○' - }; - + // Agent icons mapping (populated from config.agentMeta at startup) + window.AGENT_ICONS = {}; + function getAgentIcon(agent) { return window.AGENT_ICONS[agent] || '○'; } @@ -2957,16 +3238,34 @@

Delete Task sessionSelector.options[0].textContent = `${mainAgent} (Main)`; } - // Update AGENT_ICONS with main agent (used by cards and legend) - if (window.AGENT_ICONS) { + // Populate AGENT_ICONS from agentMeta (dynamic from OpenClaw) + if (config.agentMeta) { + window.AGENT_ICONS = {}; + window.AGENT_ICONS_MAP = {}; + for (const [name, meta] of Object.entries(config.agentMeta)) { + AGENT_ICONS[name] = meta.icon || '○'; + if (name !== 'User' && name !== 'Unassigned') { + AGENT_ICONS_MAP[name] = meta.icon || '○'; + } + } + // Inject dynamic CSS for agent colors + let agentStyles = ''; + for (const [name, meta] of Object.entries(config.agentMeta)) { + const cssName = name.replace(/\s+/g, '-'); + agentStyles += `.tag.agent-${cssName} { background: ${meta.color || '#64748b'}; color: white; }\n`; + } + let styleEl = document.getElementById('dynamic-agent-styles'); + if (!styleEl) { + styleEl = document.createElement('style'); + styleEl.id = 'dynamic-agent-styles'; + document.head.appendChild(styleEl); + } + styleEl.textContent = agentStyles; + } else { + // Fallback: set main agent icon AGENT_ICONS[mainAgent] = mainEmoji; } - // Update AGENT_ICONS_MAP with main agent (used by mention dropdown) - if (window.AGENT_ICONS_MAP) { - AGENT_ICONS_MAP[mainAgent] = mainEmoji; - } - // Store branding globally for use elsewhere window.BRANDING = b; } @@ -2976,34 +3275,65 @@

Delete Task config.agents.forEach(a => { filterAgent.innerHTML += ``; }); - + // Populate legend (skip Unassigned) const legend = document.getElementById('agentLegend'); config.agents.filter(a => a !== 'Unassigned').forEach(a => { legend.innerHTML += `
${getAgentIcon(a)}${a}
`; }); - + const statusSelect = document.getElementById('taskStatus'); const prioritySelect = document.getElementById('taskPriority'); const agentSelect = document.getElementById('taskAgent'); - + config.statuses.forEach(s => { statusSelect.innerHTML += ``; }); - + config.priorities.forEach(p => { prioritySelect.innerHTML += ``; }); - + config.agents.forEach(a => { agentSelect.innerHTML += ``; }); + + // Populate project select in task form + const taskProjectSelect = document.getElementById('taskProject'); + if (taskProjectSelect && config.projects) { + taskProjectSelect.innerHTML = ''; + config.projects.forEach(p => { + taskProjectSelect.innerHTML += ``; + }); + } + + // Populate project switcher + populateProjectSwitcher(); + } + + function populateProjectSwitcher() { + const switcher = document.getElementById('projectSwitcher'); + const current = switcher.value; + switcher.innerHTML = ''; + (config.projects || []).forEach(p => { + switcher.innerHTML += ``; + }); + switcher.value = current; + // Restore if value was removed + if (switcher.value !== current) switcher.value = 'all'; + } + + function onProjectSwitch(val) { + currentProjectId = val; + renderBoard(); + } + + function getProjectById(id) { + return (config.projects || []).find(p => p.id === parseInt(id)); } async function loadTasks() { - const agent = document.getElementById('filterAgent').value; - const url = agent ? `${API}/api/tasks?agent=${agent}` : `${API}/api/tasks`; - tasks = await fetch(url).then(r => r.json()); + tasks = await fetch(`${API}/api/tasks`).then(r => r.json()); // Load comments and action items for (const task of tasks) { @@ -3014,12 +3344,34 @@

Delete Task } } + function applyFilters(taskList) { + let filtered = taskList; + // Project filter + if (currentProjectId !== 'all') { + filtered = filtered.filter(t => String(t.project_id) === String(currentProjectId)); + } + // Priority filter + const pf = document.getElementById('filterPriority')?.value; + if (pf) filtered = filtered.filter(t => t.priority === pf); + // Agent filter + const af = document.getElementById('filterAgent')?.value; + if (af) filtered = filtered.filter(t => t.agent === af); + // Search filter + const sf = (document.getElementById('filterSearch')?.value || '').toLowerCase(); + if (sf) filtered = filtered.filter(t => + (t.title || '').toLowerCase().includes(sf) || + (t.description || '').toLowerCase().includes(sf) + ); + return filtered; + } + function renderBoard() { const board = document.getElementById('board'); board.innerHTML = ''; - + const filteredTasks = applyFilters(tasks); + config.statuses.forEach(status => { - let statusTasks = tasks.filter(t => t.status === status); + let statusTasks = filteredTasks.filter(t => t.status === status); const sortMode = columnSortMode[status] || 'latest'; // Apply sorting @@ -3120,9 +3472,18 @@

Delete Task ).join('')}

` : ''; + // Project badge (only when viewing all projects) + let projectBadgeHtml = ''; + if (currentProjectId === 'all' && task.project_id) { + const proj = getProjectById(task.project_id); + if (proj) { + projectBadgeHtml = `${escapeHtml(proj.name)}`; + } + } + return ` -
Delete Task ${aiWorkingHtml}
${escapeHtml(task.title)}
+ ${projectBadgeHtml} ${task.priority} ${task.agent} ${task.due_date ? `${task.due_date}` : ''} @@ -3274,7 +3636,8 @@

Delete Task document.getElementById('taskPriority').value = 'Medium'; document.getElementById('taskAgent').value = 'Unassigned'; document.getElementById('taskDueDate').value = ''; - + document.getElementById('taskProject').value = currentProjectId !== 'all' ? currentProjectId : '1'; + // Hide elements not relevant for new tasks document.getElementById('headerDeleteBtn').style.display = 'none'; document.getElementById('theaterBtn').style.display = 'none'; @@ -3309,6 +3672,7 @@

Delete Task document.getElementById('taskPriority').value = task.priority; document.getElementById('taskAgent').value = task.agent; document.getElementById('taskDueDate').value = task.due_date || ''; + document.getElementById('taskProject').value = task.project_id || '1'; document.getElementById('headerDeleteBtn').style.display = ''; document.getElementById('theaterBtn').style.display = ''; document.getElementById('chatSection').style.display = 'block'; @@ -3328,11 +3692,8 @@

Delete Task sourceInfo.style.display = 'none'; } - // Update stop button tooltip with agent name if known - const stopAgentBtn = document.getElementById('stopAgentBtn'); - stopAgentBtn.title = task.working_agent ? `Stop ${task.working_agent}` : 'Stop agent'; - - // Show working indicator if agent is currently working + // Update toggle button state based on working agent + updateAgentToggleButton(!!task.working_agent, task.working_agent); if (task.working_agent) { showChatWorkingIndicator(task.working_agent); } else { @@ -3341,7 +3702,31 @@

Delete Task await loadAndRenderComments(taskId); await loadAndRenderActionItems(taskId); - + + // Check agent session liveness + try { + const agentStatus = await fetch(`${API}/api/tasks/${taskId}/agent-status`).then(r => r.json()); + // Show Stop button if session key exists (session may be idle but still reachable) + const hasSession = !!agentStatus.session_key; + if (hasSession || agentStatus.alive) { + updateAgentToggleButton(true, agentStatus.working_agent || task.agent); + if (agentStatus.working_agent) { + showChatWorkingIndicator(agentStatus.working_agent); + } + } else { + updateAgentToggleButton(false, null); + hideChatWorkingIndicator(); + // Update local task data if no session at all + const localTask = tasks.find(t => t.id === taskId); + if (localTask) { + localTask.working_agent = null; + localTask.agent_session_key = null; + } + } + } catch (e) { + console.warn('Failed to check agent status:', e); + } + // Re-attach paste handler to ensure it works in modal context const chatInput = document.getElementById('chatInput'); if (chatInput) { @@ -3426,10 +3811,16 @@

Delete Task // Determine if section should show (has active or archived items) const hasItems = allActiveItems.length > 0 || archivedItems.length > 0; + section.style.display = 'block'; if (!hasItems) { - section.style.display = 'none'; + list.innerHTML = ''; + countEl.textContent = ''; + // Collapse when no items + section.classList.add('collapsed'); + document.getElementById('actionItemsContent').classList.add('collapsed'); + document.getElementById('actionItemsCollapseBtn').textContent = '▶'; + actionItemsCollapsed = true; } else { - section.style.display = 'block'; const openCount = openItems.length; countEl.textContent = openCount > 0 ? `${openCount} open` : (allActiveItems.length > 0 ? 'all done' : ''); @@ -3567,6 +3958,35 @@

Delete Task await loadTasks(); renderBoard(); } + + function toggleAddActionItem() { + // Expand section if collapsed + if (actionItemsCollapsed) { + toggleActionItemsCollapse(); + } + const form = document.getElementById('addActionItemForm'); + form.style.display = form.style.display === 'none' ? 'flex' : 'none'; + if (form.style.display === 'flex') { + document.getElementById('newActionItemContent').focus(); + } + } + + async function submitNewActionItem() { + const content = document.getElementById('newActionItemContent').value.trim(); + if (!content) return; + const type = document.getElementById('newActionItemType').value; + const taskId = parseInt(document.getElementById('taskId').value); + await fetch(`${API}/api/tasks/${taskId}/action-items`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ agent: 'user', content: content, item_type: type }) + }); + document.getElementById('newActionItemContent').value = ''; + document.getElementById('addActionItemForm').style.display = 'none'; + await loadAndRenderActionItems(taskId); + await loadTasks(); + renderBoard(); + } function formatMessageContent(content) { // Check if content is an image (base64 or URL) @@ -3746,6 +4166,7 @@

Delete Task priority: document.getElementById('taskPriority').value, agent: document.getElementById('taskAgent').value, due_date: document.getElementById('taskDueDate').value || null, + project_id: parseInt(document.getElementById('taskProject').value) || 1, }; // Show spinner @@ -3778,7 +4199,7 @@

Delete Task // Reset after animation setTimeout(() => { saveBtn.classList.remove('saved'); - }, 1500); + }, 2500); } catch (error) { saveBtn.classList.remove('saving'); console.error('Save failed:', error); @@ -3786,6 +4207,56 @@

Delete Task } // Delete confirmation modal + async function exportTaskMarkdown() { + const taskId = document.getElementById('taskId').value; + if (!taskId) return; + const task = tasks.find(t => t.id == taskId); + if (!task) return; + + const taskComments = comments[taskId] || []; + const taskActionItems = actionItems[taskId] || []; + const project = getProjectById(task.project_id); + + let md = `# ${task.title}\n\n`; + md += `| Field | Value |\n|-------|-------|\n`; + md += `| Status | ${task.status} |\n`; + md += `| Priority | ${task.priority} |\n`; + md += `| Agent | ${task.agent} |\n`; + md += `| Project | ${project ? project.name : 'Default'} |\n`; + md += `| Due Date | ${task.due_date || 'None'} |\n`; + md += `| Created | ${task.created_at} |\n`; + md += `| Updated | ${task.updated_at} |\n\n`; + + if (task.description) { + md += `## Description\n\n${task.description}\n\n`; + } + + if (taskActionItems.length > 0) { + md += `## Action Items\n\n`; + taskActionItems.forEach(item => { + const check = item.resolved ? 'x' : ' '; + md += `- [${check}] **${item.item_type}** (${item.agent}): ${item.content}\n`; + }); + md += '\n'; + } + + if (taskComments.length > 0) { + md += `## Comments\n\n`; + taskComments.forEach(c => { + md += `### ${c.agent} — ${formatDateTime(c.created_at)}\n\n`; + md += `${c.content}\n\n---\n\n`; + }); + } + + const slug = task.title.toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/-+$/, '').slice(0, 50); + const a = document.createElement('a'); + a.href = 'data:text/markdown;charset=utf-8,' + encodeURIComponent(md); + a.download = `task-${task.id}-${slug}.md`; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + } + function confirmDeleteTask() { const taskId = document.getElementById('taskId').value; if (!taskId) return; // Can't delete unsaved task @@ -3821,6 +4292,42 @@

Delete Task function saveTaskFromHeader() { document.getElementById('taskForm').requestSubmit(); } + + let _autoSaving = false; + async function autoSaveTaskField() { + const taskId = document.getElementById('taskId').value; + if (!taskId || _autoSaving) return; + _autoSaving = true; + const saveBtn = document.getElementById('headerSaveBtn'); + saveBtn.classList.add('saving'); + saveBtn.classList.remove('saved'); + try { + const data = { + title: document.getElementById('taskTitleInput').value, + description: document.getElementById('taskDescription').value, + status: document.getElementById('taskStatus').value, + priority: document.getElementById('taskPriority').value, + agent: document.getElementById('taskAgent').value, + due_date: document.getElementById('taskDueDate').value || null, + project_id: parseInt(document.getElementById('taskProject').value) || 1, + }; + await fetch(`${API}/api/tasks/${taskId}`, { + method: 'PATCH', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(data) + }); + await loadTasks(); + renderBoard(); + saveBtn.classList.remove('saving'); + saveBtn.classList.add('saved'); + setTimeout(() => saveBtn.classList.remove('saved'), 2500); + } catch (e) { + saveBtn.classList.remove('saving'); + console.error('Auto-save failed:', e); + } finally { + _autoSaving = false; + } + } async function deleteTask() { // Legacy function - now redirects to confirmation modal @@ -4224,13 +4731,12 @@

Delete Task }); // Trigger download - const blob = new Blob([md], { type: 'text/markdown' }); - const url = URL.createObjectURL(blob); const a = document.createElement('a'); - a.href = url; + a.href = 'data:text/markdown;charset=utf-8,' + encodeURIComponent(md); a.download = `chat-export-${taskId}-${Date.now()}.md`; + document.body.appendChild(a); a.click(); - URL.revokeObjectURL(url); + document.body.removeChild(a); toggleExportMode(); } @@ -4348,12 +4854,6 @@

Delete Task document.getElementById('fileInput').value = ''; } - // Filter change - document.getElementById('filterAgent').addEventListener('change', async () => { - await loadTasks(); - renderBoard(); - }); - // WebSocket for live updates let ws = null; let wsConnected = false; @@ -4428,7 +4928,11 @@

Delete Task await loadTasks(); renderBoard(); } - + + if (data.type === 'project_created' || data.type === 'project_deleted') { + await refreshProjects(); + } + if (data.type === 'comment_added') { const openTaskId = document.getElementById('taskId').value; if (openTaskId && parseInt(openTaskId) === data.task_id) { @@ -4476,24 +4980,41 @@

Delete Task const openTaskId = document.getElementById('taskId').value; if (openTaskId && parseInt(openTaskId) === data.task_id && data.agent) { showChatWorkingIndicator(data.agent); + updateAgentToggleButton(true, data.agent); } } if (data.type === 'work_stopped') { - // Remove agent from tracking (supports multiple) - if (data.task_id && data.agent && workingAgentsByTask[data.task_id]) { - workingAgentsByTask[data.task_id].delete(data.agent); - if (workingAgentsByTask[data.task_id].size === 0) { + // Remove agent from tracking + if (data.task_id) { + if (data.agent && workingAgentsByTask[data.task_id]) { + // Specific agent stopped - remove only that one + workingAgentsByTask[data.task_id].delete(data.agent); + if (workingAgentsByTask[data.task_id].size === 0) { + delete workingAgentsByTask[data.task_id]; + } + } else { + // No agent specified or unknown - clear all working agents for this task delete workingAgentsByTask[data.task_id]; } } // Refresh board to update AI working indicator on card await loadTasks(); renderBoard(); - // Also remove specific agent from modal indicator if this task is open + // Also remove agent from modal indicator if this task is open const openTaskId = document.getElementById('taskId').value; - if (openTaskId && parseInt(openTaskId) === data.task_id && data.agent) { - removeChatWorkingAgent(data.agent); + if (openTaskId && parseInt(openTaskId) === data.task_id) { + if (data.agent) { + removeChatWorkingAgent(data.agent); + } else { + // Clear all chat working indicators if no specific agent + const chatWorkingAgents = document.getElementById('chatWorkingAgents'); + if (chatWorkingAgents) chatWorkingAgents.innerHTML = ''; + document.getElementById('chatWorkingIndicator').style.display = 'none'; + } + // Update toggle button state + const stillWorking = workingAgentsByTask[data.task_id] && workingAgentsByTask[data.task_id].size > 0; + updateAgentToggleButton(stillWorking, stillWorking ? Array.from(workingAgentsByTask[data.task_id])[0] : null); } } @@ -4857,46 +5378,69 @@

Delete Task } } - // Stop agent working on a task - async function stopTaskAgent() { + // Update the agent toggle button appearance + function updateAgentToggleButton(isWorking, agentName) { + const btn = document.getElementById('stopAgentBtn'); + if (!btn) return; + if (isWorking) { + btn.innerHTML = '■'; + btn.title = `Stop ${agentName || 'agent'}`; + btn.style.background = 'rgba(239, 68, 68, 0.15)'; + btn.style.borderColor = 'var(--critical)'; + btn.style.color = 'var(--critical)'; + } else { + btn.innerHTML = '▶'; + btn.title = 'Start agent'; + btn.style.background = 'rgba(34, 197, 94, 0.15)'; + btn.style.borderColor = 'var(--low)'; + btn.style.color = 'var(--low)'; + } + btn.disabled = false; + } + + // Toggle agent working on a task (start/stop) + async function toggleTaskAgent() { const taskId = document.getElementById('taskId').value; if (!taskId) return; - + const btn = document.getElementById('stopAgentBtn'); btn.disabled = true; - + + const task = tasks.find(t => t.id == taskId); + if (!task) { btn.disabled = false; return; } + + // Determine if agent is currently working (or has a live session) + const isWorking = !!(task.working_agent || task.agent_session_key || (workingAgentsByTask[taskId] && workingAgentsByTask[taskId].size > 0)); + try { - // Get the task's session key - const task = tasks.find(t => t.id == taskId); - if (task && task.agent_session_key) { - await fetch(`/api/sessions/${encodeURIComponent(task.agent_session_key)}/stop`, { + if (isWorking) { + // === STOP === + if (task.agent_session_key) { + await fetch(`/api/sessions/${encodeURIComponent(task.agent_session_key)}/stop`, { + method: 'POST' + }); + } + await fetch(`/api/tasks/${taskId}/stop-work`, { method: 'POST' }); + hideChatWorkingIndicator(); + } else { + // === START === + const agentName = task.agent || 'Agent'; + await fetch(`/api/tasks/${taskId}/start-work?agent=${encodeURIComponent(agentName)}`, { method: 'POST' }); } - - // Also stop the work indicator - await fetch(`/api/tasks/${taskId}/stop-work`, { method: 'POST' }); - - // Hide chat working indicator - hideChatWorkingIndicator(); - - // Visual feedback - btn.style.background = 'var(--low)'; - btn.style.borderColor = 'var(--low)'; - btn.style.color = 'white'; - - setTimeout(() => { - btn.disabled = false; - btn.style.background = ''; - btn.style.borderColor = ''; - btn.style.color = ''; - }, 2000); - + // Refresh board await loadTasks(); renderBoard(); + + // Update button state based on new state + const updatedTask = tasks.find(t => t.id == taskId); + if (updatedTask) { + updateAgentToggleButton(!!updatedTask.working_agent, updatedTask.working_agent); + } } catch (e) { - console.error('Error stopping agent:', e); + console.error('Error toggling agent:', e); btn.disabled = false; } } @@ -5506,12 +6050,11 @@

🔄 Workflow Stages

🤖 Agents

-
${mainEmoji} ${mainAgent} — Main coordinator, handles command bar chat
-
🏛️ Architect — System design, patterns, scalability
-
🔒 Security Auditor — SOC2, HIPAA, CIS compliance
-
📋 Code Reviewer — Code quality, best practices
-
🎨 UX Manager — User flows, UI consistency
-
👤 User — Human supervisor (you!)
+ ${config.agentMeta ? config.agents.map(a => { + const meta = config.agentMeta[a]; + if (!meta) return ''; + return `
${meta.icon} ${a} — ${meta.description}
`; + }).join('') : `
${mainEmoji} ${mainAgent} — Main coordinator
`}
@@ -5619,13 +6162,8 @@

⌨️ Keyboard Shortcuts

document.getElementById('helpModal').classList.remove('active'); } - // Mention/Tag Agents (main agent added dynamically from config) - window.AGENT_ICONS_MAP = { - 'Architect': '🏛️', - 'Security Auditor': '🔒', - 'Code Reviewer': '📋', - 'UX Manager': '🎨' - }; + // AGENT_ICONS_MAP is populated from config.agentMeta in applyBranding() + if (!window.AGENT_ICONS_MAP) window.AGENT_ICONS_MAP = {}; function toggleMentionDropdown(event) { event.stopPropagation(); @@ -5670,6 +6208,52 @@

⌨️ Keyboard Shortcuts

} }); + // Project Manager functions + function openProjectManager() { + document.getElementById('projectManagerModal').classList.add('active'); + renderProjectList(); + } + function closeProjectManager() { + document.getElementById('projectManagerModal').classList.remove('active'); + } + function renderProjectList() { + const list = document.getElementById('pmProjectList'); + const projects = config.projects || []; + list.innerHTML = projects.map(p => ` +
+
+ ${escapeHtml(p.name)} + +
+ `).join(''); + } + async function addProject() { + const name = document.getElementById('pmNewName').value.trim(); + if (!name) return; + const color = document.getElementById('pmNewColor').value; + const description = document.getElementById('pmNewDesc').value.trim(); + await fetch(`${API}/api/projects`, { + method: 'POST', + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify({ name, color, description }) + }); + document.getElementById('pmNewName').value = ''; + document.getElementById('pmNewDesc').value = ''; + await refreshProjects(); + } + async function deleteProject(id, name) { + if (!confirm(`Delete project "${name}"? Tasks will NOT be deleted.`)) return; + await fetch(`${API}/api/projects/${id}`, { method: 'DELETE' }); + await refreshProjects(); + } + async function refreshProjects() { + const cfg = await fetch(`${API}/api/config`).then(r => r.json()); + config.projects = cfg.projects; + populateProjectSwitcher(); + renderProjectList(); + renderBoard(); + } + // Start init(); connectWebSocket();