diff --git a/apps/auth-worker/src/index.ts b/apps/auth-worker/src/index.ts index 81ebac6..5b87b23 100644 --- a/apps/auth-worker/src/index.ts +++ b/apps/auth-worker/src/index.ts @@ -1,12 +1,17 @@ import { Hono } from "hono"; import { cors } from "hono/cors"; -import type { DeviceAuthorizationResponse, TokenResponse } from "./types.ts"; +import type { DeviceAuthorizationResponse, MagicAuthResponse, TokenResponse } from "./types.ts"; type Bindings = { WORKOS_API_KEY: string; WORKOS_CLIENT_ID: string; + MAGIC_AUTH_LIMITER: RateLimit; }; +interface RateLimit { + limit: (options: { key: string }) => Promise<{ success: boolean }>; +} + const app = new Hono<{ Bindings: Bindings }>(); app.use( @@ -100,6 +105,95 @@ app.post("/auth/device/token", async (c) => { }); }); +// Magic Auth - send verification code via email +app.post("/auth/magic", async (c) => { + const body = await c.req.json<{ email: string }>(); + + if (!body.email) { + return c.json({ error: "missing_email", message: "Email is required" }, 400); + } + + // Rate limit by IP + const ip = c.req.header("cf-connecting-ip") || "unknown"; + const { success: withinLimit } = await c.env.MAGIC_AUTH_LIMITER.limit({ key: ip }); + if (!withinLimit) { + return c.json({ error: "rate_limited", message: "Too many requests. Try again later." }, 429); + } + + const workosResponse = await fetch("https://api.workos.com/user_management/magic_auth", { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${c.env.WORKOS_API_KEY}`, + }, + body: JSON.stringify({ email: body.email }), + }); + + if (!workosResponse.ok) { + const error = await workosResponse.json(); + return c.json({ error: "workos_error", details: error }, 500); + } + + const data = (await workosResponse.json()) as MagicAuthResponse; + + // Return ONLY the id and email — NOT the code + return c.json({ + id: data.id, + email: data.email, + }); +}); + +// Magic Auth - verify code and exchange for tokens +app.post("/auth/magic/verify", async (c) => { + const body = await c.req.json<{ email: string; code: string }>(); + + if (!body.email || !body.code) { + return c.json({ error: "missing_fields", message: "Email and code are required" }, 400); + } + + const workosResponse = await fetch("https://api.workos.com/user_management/authenticate", { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${c.env.WORKOS_API_KEY}`, + }, + body: JSON.stringify({ + client_id: c.env.WORKOS_CLIENT_ID, + client_secret: c.env.WORKOS_API_KEY, + grant_type: "urn:workos:oauth:grant-type:magic-auth:code", + code: body.code, + email: body.email, + }), + }); + + const data = await workosResponse.json(); + + if (data.error === "invalid_grant") { + return c.json({ error: "invalid_code", message: "Invalid or expired code" }, 400); + } + + if (data.error === "expired_token" || data.error === "code_expired") { + return c.json({ error: "expired", message: "Code expired. Please request a new one." }, 410); + } + + if (data.error) { + return c.json({ error: data.error, message: data.error_description }, 400); + } + + const tokenData = data as TokenResponse; + return c.json({ + access_token: tokenData.access_token, + refresh_token: tokenData.refresh_token, + expires_in: tokenData.expires_in, + user: { + id: tokenData.user.id, + email: tokenData.user.email, + first_name: tokenData.user.first_name, + last_name: tokenData.user.last_name, + }, + }); +}); + // Token Refresh app.post("/auth/refresh", async (c) => { const body = await c.req.json<{ refresh_token: string }>(); diff --git a/apps/auth-worker/src/types.ts b/apps/auth-worker/src/types.ts index ec59c75..95ad2e8 100644 --- a/apps/auth-worker/src/types.ts +++ b/apps/auth-worker/src/types.ts @@ -29,6 +29,16 @@ export interface AuthorizationPendingResponse { error_description: string; } +export interface MagicAuthResponse { + id: string; + user_id: string; + email: string; + expires_at: string; + code: string; // present in WorkOS response but we NEVER return this + created_at: string; + updated_at: string; +} + export interface WorkOSErrorResponse { error: string; error_description: string; diff --git a/apps/auth-worker/wrangler.toml b/apps/auth-worker/wrangler.toml index fc0dcbf..0596cbc 100644 --- a/apps/auth-worker/wrangler.toml +++ b/apps/auth-worker/wrangler.toml @@ -11,3 +11,9 @@ WORKOS_CLIENT_ID = "client_01KD3D0QAQF4YXP4ZS8DX00CW6" # Secrets (set via `wrangler secret put`): # - WORKOS_API_KEY + +[[unsafe.bindings]] +name = "MAGIC_AUTH_LIMITER" +type = "ratelimit" +namespace_id = "1001" +simple = { limit = 5, period = 60 } diff --git a/apps/cli/src/commands/login.ts b/apps/cli/src/commands/login.ts index 2515011..b55a8df 100644 --- a/apps/cli/src/commands/login.ts +++ b/apps/cli/src/commands/login.ts @@ -1,11 +1,50 @@ import { type LoginFlowOptions, runLoginFlow } from "../lib/auth/login-flow.ts"; +import { error, info } from "../lib/output.ts"; interface LoginOptions { /** Skip the initial "Logging in..." message (used when called from auto-login) */ silent?: boolean; + /** Email address for magic auth flow (headless login) */ + email?: string; + /** 6-digit verification code (skip send step, verify directly) */ + code?: string; } export default async function login(options: LoginOptions = {}): Promise { + const email = options.email; + + // --code without --email is a mistake + if (options.code && !email) { + error("--code requires --email"); + info("Usage: jack login --email you@example.com --code 123456"); + process.exit(1); + } + + if (email) { + const { runMagicAuthFlow } = await import("../lib/auth/login-flow.ts"); + const result = await runMagicAuthFlow({ + email, + code: options.code, + silent: options.silent, + }); + if (!result.success) process.exit(1); + + // Print token to stdout so agents can capture it programmatically + if (result.token && (!process.stdout.isTTY || process.env.CI)) { + process.stdout.write(`${result.token}\n`); + } + return; + } + + // TTY guardrail: fail fast if no browser possible (CI counts as non-interactive) + if (!process.stdout.isTTY || process.env.CI) { + error("Cannot open browser in this environment."); + info("Use: jack login --email you@example.com"); + info("Or set JACK_API_TOKEN for headless use."); + process.exit(1); + } + + // Existing device flow const flowOptions: LoginFlowOptions = { silent: options.silent, }; diff --git a/apps/cli/src/index.ts b/apps/cli/src/index.ts index c52fce0..baaa6e3 100755 --- a/apps/cli/src/index.ts +++ b/apps/cli/src/index.ts @@ -193,6 +193,12 @@ const cli = meow( to: { type: "string", }, + email: { + type: "string", + }, + code: { + type: "string", + }, }, }, ); @@ -485,7 +491,13 @@ try { } case "login": { const { default: login } = await import("./commands/login.ts"); - await withTelemetry("login", login)(); + await withTelemetry( + "login", + login, + )({ + email: cli.flags.email || process.env.JACK_EMAIL, + code: cli.flags.code, + }); break; } case "logout": { diff --git a/apps/cli/src/lib/auth/client.ts b/apps/cli/src/lib/auth/client.ts index f1f9db4..3e6f5c4 100644 --- a/apps/cli/src/lib/auth/client.ts +++ b/apps/cli/src/lib/auth/client.ts @@ -58,6 +58,41 @@ export async function pollDeviceToken(deviceCode: string): Promise; } +export interface MagicAuthStartResponse { + id: string; + email: string; +} + +export async function startMagicAuth(email: string): Promise { + const response = await fetch(`${getAuthApiUrl()}/auth/magic`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ email }), + }); + + if (!response.ok) { + const errorBody = (await response.json().catch(() => ({}))) as { message?: string }; + throw new Error(errorBody.message || "Failed to send verification code"); + } + + return response.json() as Promise; +} + +export async function verifyMagicAuth(email: string, code: string): Promise { + const response = await fetch(`${getAuthApiUrl()}/auth/magic/verify`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ email, code }), + }); + + if (!response.ok) { + const errorBody = (await response.json().catch(() => ({}))) as { message?: string }; + throw new Error(errorBody.message || "Invalid or expired code"); + } + + return response.json() as Promise; +} + export async function refreshToken(refreshTokenValue: string): Promise { const response = await fetch(`${getAuthApiUrl()}/auth/refresh`, { method: "POST", diff --git a/apps/cli/src/lib/auth/ensure-auth.test.ts b/apps/cli/src/lib/auth/ensure-auth.test.ts index 82c6aa2..e1a02f2 100644 --- a/apps/cli/src/lib/auth/ensure-auth.test.ts +++ b/apps/cli/src/lib/auth/ensure-auth.test.ts @@ -242,7 +242,7 @@ describe("ensureAuthForCreate", () => { mockHasWrangler.mockResolvedValue(false); await expect(ensureAuthForCreate({ interactive: false })).rejects.toThrow( - "Not logged in and wrangler not authenticated", + "Not logged in. Run 'jack login --email ' or set JACK_API_TOKEN for headless use.", ); }); diff --git a/apps/cli/src/lib/auth/ensure-auth.ts b/apps/cli/src/lib/auth/ensure-auth.ts index 8c9c430..1bb5b19 100644 --- a/apps/cli/src/lib/auth/ensure-auth.ts +++ b/apps/cli/src/lib/auth/ensure-auth.ts @@ -143,7 +143,7 @@ export async function ensureAuthForCreate( if (!interactive) { // Non-interactive and no auth available - this is an error condition throw new Error( - "Not logged in and wrangler not authenticated. Run 'jack login' or 'wrangler login' first.", + "Not logged in. Run 'jack login --email ' or set JACK_API_TOKEN for headless use.", ); } diff --git a/apps/cli/src/lib/auth/guard.ts b/apps/cli/src/lib/auth/guard.ts index 80a812b..a1c7a6f 100644 --- a/apps/cli/src/lib/auth/guard.ts +++ b/apps/cli/src/lib/auth/guard.ts @@ -13,7 +13,7 @@ export async function requireAuth(): Promise { throw new JackError( JackErrorCode.AUTH_FAILED, "Not logged in", - "Run 'jack login' to sign in, or set JACK_API_TOKEN for headless use", + "Run 'jack login' to sign in, 'jack login --email ' for headless, or set JACK_API_TOKEN", ); } diff --git a/apps/cli/src/lib/auth/index.ts b/apps/cli/src/lib/auth/index.ts index 35d0843..c19ba30 100644 --- a/apps/cli/src/lib/auth/index.ts +++ b/apps/cli/src/lib/auth/index.ts @@ -5,6 +5,9 @@ export { pollDeviceToken, refreshToken, startDeviceAuth, + startMagicAuth, + verifyMagicAuth, + type MagicAuthStartResponse, } from "./client.ts"; export { ensureAuthForCreate, @@ -14,8 +17,11 @@ export { export { requireAuth, requireAuthOrLogin, getCurrentUser } from "./guard.ts"; export { runLoginFlow, + runMagicAuthFlow, type LoginFlowOptions, type LoginFlowResult, + type MagicAuthFlowOptions, + type MagicAuthFlowResult, } from "./login-flow.ts"; export { deleteCredentials, diff --git a/apps/cli/src/lib/auth/login-flow.ts b/apps/cli/src/lib/auth/login-flow.ts index 2cd94d9..5c5a09c 100644 --- a/apps/cli/src/lib/auth/login-flow.ts +++ b/apps/cli/src/lib/auth/login-flow.ts @@ -13,7 +13,14 @@ import { isCancel } from "../hooks.ts"; import { promptSelect } from "../hooks.ts"; import { celebrate, error, info, spinner, success, warn } from "../output.ts"; import { identifyUser } from "../telemetry.ts"; -import { type DeviceAuthResponse, pollDeviceToken, startDeviceAuth } from "./client.ts"; +import { + type DeviceAuthResponse, + type MagicAuthStartResponse, + pollDeviceToken, + startDeviceAuth, + startMagicAuth, + verifyMagicAuth, +} from "./client.ts"; import { type AuthCredentials, saveCredentials } from "./store.ts"; export interface LoginFlowOptions { @@ -145,6 +152,163 @@ export async function runLoginFlow(options?: LoginFlowOptions): Promise { + const { email } = options; + let code = options.code; + + // If no code provided, send the magic auth email first + if (!code) { + if (!options.silent) { + info(`Sending verification code to ${email}...`); + console.error(""); + } + + const sendSpin = spinner("Sending code..."); + try { + await startMagicAuth(email); + sendSpin.stop(); + } catch (err) { + sendSpin.stop(); + error(err instanceof Error ? err.message : "Failed to send verification code"); + return { success: false }; + } + + info("Check your email for a 6-digit code."); + + const isInteractive = process.stdout.isTTY && !process.env.CI; + + // Interactive: prompt inline so human-supervised agents can paste the code + if (isInteractive) { + console.error(""); + const codeInput = await text({ + message: "Enter code:", + validate: (value) => { + if (!value || value.trim().length === 0) return "Code is required"; + if (!/^\d{6}$/.test(value.trim())) return "Enter the 6-digit code from your email"; + }, + }); + + if (isCancel(codeInput)) { + warn("Login cancelled."); + return { success: false }; + } + code = codeInput.trim(); + } else { + // Non-TTY: exit so agent can re-run with --code + console.error(""); + info("Then run:"); + info(` jack login --email ${email} --code `); + return { success: true, codeSent: true }; + } + } + + // Verify code and complete login + const verifySpin = spinner("Verifying code..."); + + try { + const tokens = await verifyMagicAuth(email, code); + verifySpin.stop(); + + // Save credentials (needed for authFetch in subsequent calls) + const expiresIn = tokens.expires_in ?? 300; + const creds: AuthCredentials = { + access_token: tokens.access_token, + refresh_token: tokens.refresh_token, + expires_at: Math.floor(Date.now() / 1000) + expiresIn, + user: tokens.user, + }; + await saveCredentials(creds); + + // Register user in control plane + try { + await registerUser({ + email: tokens.user.email, + first_name: tokens.user.first_name, + last_name: tokens.user.last_name, + }); + } catch (_regError) { + error("Failed to complete login - could not reach jack cloud."); + error("Please check your internet connection and try again."); + return { success: false }; + } + + // Link user identity for analytics + await identifyUser(tokens.user.id, { email: tokens.user.email }); + + // Create API token for headless use + let apiToken: string | undefined; + try { + const { createApiToken } = await import("../services/token-operations.ts"); + const tokenResult = await createApiToken("Magic Auth Token"); + apiToken = tokenResult.token; + } catch (_err) { + warn("Could not create API token. You can create one later with 'jack tokens create'."); + } + + // Prompt for username only when interactive + if (process.stdout.isTTY && !process.env.CI) { + await promptForUsername(tokens.user.email, tokens.user.first_name); + } + + const isInteractive = process.stdout.isTTY && !process.env.CI; + + console.error(""); + success(`Logged in as ${tokens.user.email}`); + + if (apiToken) { + if (isInteractive) { + success(`API token created: ${apiToken.slice(0, 12)}...`); + console.error(""); + info("Your token has been saved. You can also set it as:"); + info(` export JACK_API_TOKEN=${apiToken}`); + } else { + info("jack CLI is now authenticated. Future commands will use this session."); + info(`API token for other tools: ${apiToken}`); + } + } + + return { + success: true, + token: apiToken, + user: tokens.user, + }; + } catch (err) { + verifySpin.stop(); + error(err instanceof Error ? err.message : "Verification failed"); + return { success: false }; + } +} + function sleep(ms: number): Promise { return new Promise((resolve) => setTimeout(resolve, ms)); } diff --git a/apps/cli/src/lib/services/ask-project.ts b/apps/cli/src/lib/services/ask-project.ts new file mode 100644 index 0000000..7d95469 --- /dev/null +++ b/apps/cli/src/lib/services/ask-project.ts @@ -0,0 +1,69 @@ +import { authFetch } from "../auth/index.ts"; +import { getControlApiUrl } from "../control-plane.ts"; +import { readProjectLink } from "../project-link.ts"; + +export interface AskProjectHints { + endpoint?: string; + method?: "GET" | "POST" | "PUT" | "PATCH" | "DELETE"; + deployment_id?: string; +} + +export interface AskProjectOptions { + projectDir: string; + question: string; + hints?: AskProjectHints; +} + +export interface AskProjectEvidence { + id: string; + type: string; + source: string; + summary: string; + timestamp: string; + relation: "supports" | "conflicts" | "gap"; + meta?: Record; +} + +export interface AskProjectResult { + answer: string; + evidence: AskProjectEvidence[]; +} + +export async function askProject(options: AskProjectOptions): Promise { + const { projectDir, question, hints } = options; + const trimmedQuestion = question.trim(); + if (!trimmedQuestion) { + throw new Error("question is required"); + } + + const link = await readProjectLink(projectDir); + if (!link) { + throw new Error("Project is not linked. Run jack link or deploy a managed project first."); + } + + if (link.deploy_mode !== "managed") { + throw new Error("ask_project is only available for managed (Jack Cloud) projects."); + } + + const response = await authFetch(`${getControlApiUrl()}/v1/projects/${link.project_id}/ask`, { + method: "POST", + headers: { + "Content-Type": "application/json", + "X-Jack-Source": "mcp_local", + }, + body: JSON.stringify({ + question: trimmedQuestion, + hints, + }), + }); + + if (!response.ok) { + const errBody = (await response.json().catch(() => ({}))) as { + error?: string; + message?: string; + }; + throw new Error(errBody.message || `ask_project failed: ${response.status}`); + } + + return response.json() as Promise; +} diff --git a/apps/cli/src/mcp/tools/index.ts b/apps/cli/src/mcp/tools/index.ts index 0c4fadf..e1bfbc9 100644 --- a/apps/cli/src/mcp/tools/index.ts +++ b/apps/cli/src/mcp/tools/index.ts @@ -7,6 +7,7 @@ import { JackError, JackErrorCode } from "../../lib/errors.ts"; import { getDeployMode, getProjectId } from "../../lib/project-link.ts"; import { createProject, deployProject, getProjectStatus } from "../../lib/project-operations.ts"; import { listAllProjects } from "../../lib/project-resolver.ts"; +import { askProject } from "../../lib/services/ask-project.ts"; import { createCronSchedule } from "../../lib/services/cron-create.ts"; import { deleteCronSchedule } from "../../lib/services/cron-delete.ts"; import { listCronSchedules } from "../../lib/services/cron-list.ts"; @@ -323,6 +324,27 @@ const TestEndpointSchema = z.object({ .describe("Capture runtime logs during the request (managed mode only)"), }); +const AskProjectSchema = z.object({ + project_path: z + .string() + .optional() + .describe("Path to project directory (defaults to current directory)"), + question: z.string().describe("Debugging question to ask about the project"), + hints: z + .object({ + endpoint: z.string().optional().describe("Endpoint path hint, e.g. /api/todos"), + method: z + .enum(["GET", "POST", "PUT", "PATCH", "DELETE"]) + .optional() + .describe("HTTP method hint for endpoint checks"), + deployment_id: z + .string() + .optional() + .describe("Optional deployment ID to focus historical reasoning"), + }) + .optional(), +}); + export function registerTools(server: McpServer, _options: McpServerOptions, debug: DebugLogger) { // Register tool list handler server.setRequestHandler(ListToolsRequestSchema, async () => { @@ -898,6 +920,44 @@ export function registerTools(server: McpServer, _options: McpServerOptions, deb required: ["path"], }, }, + { + name: "ask_project", + description: + "Ask an evidence-backed debugging question about this managed Jack Cloud project. " + + "Use for runtime failures, recent change analysis, and code-to-production impact mapping.", + inputSchema: { + type: "object", + properties: { + project_path: { + type: "string", + description: "Path to project directory (defaults to current directory)", + }, + question: { + type: "string", + description: "Debugging question to ask about the project", + }, + hints: { + type: "object", + properties: { + endpoint: { + type: "string", + description: "Endpoint path hint, e.g. /api/todos", + }, + method: { + type: "string", + enum: ["GET", "POST", "PUT", "PATCH", "DELETE"], + description: "HTTP method hint for endpoint checks", + }, + deployment_id: { + type: "string", + description: "Optional deployment ID for historical reasoning", + }, + }, + }, + }, + required: ["question"], + }, + }, ], }; }); @@ -2239,6 +2299,50 @@ export function registerTools(server: McpServer, _options: McpServerOptions, deb }; } + case "ask_project": { + const args = AskProjectSchema.parse(request.params.arguments ?? {}); + const projectPath = args.project_path ?? process.cwd(); + + const wrappedAskProject = withTelemetry( + "ask_project", + async ( + projectDir: string, + question: string, + hints?: { + endpoint?: string; + method?: "GET" | "POST" | "PUT" | "PATCH" | "DELETE"; + deployment_id?: string; + }, + ) => { + const result = await askProject({ + projectDir, + question, + hints, + }); + + track(Events.COMMAND_COMPLETED, { + command: "ask_project", + evidence_count: result.evidence.length, + platform: "mcp", + }); + + return result; + }, + { platform: "mcp" }, + ); + + const result = await wrappedAskProject(projectPath, args.question, args.hints); + + return { + content: [ + { + type: "text", + text: JSON.stringify(formatSuccessResponse(result, startTime), null, 2), + }, + ], + }; + } + default: throw new Error(`Unknown tool: ${toolName}`); } diff --git a/apps/control-plane/migrations/0030_add_ask_project_index.sql b/apps/control-plane/migrations/0030_add_ask_project_index.sql new file mode 100644 index 0000000..48a3954 --- /dev/null +++ b/apps/control-plane/migrations/0030_add_ask_project_index.sql @@ -0,0 +1,87 @@ +-- Ask Project latest-deploy index (V1) +-- Latest snapshot only per project for fast/cheap retrieval. + +CREATE TABLE IF NOT EXISTS ask_code_index_latest ( + project_id TEXT PRIMARY KEY, + deployment_id TEXT NOT NULL, + indexed_at TEXT NOT NULL DEFAULT (datetime('now')), + parser_version TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'indexing', -- indexing | ready | failed + file_count INTEGER NOT NULL DEFAULT 0, + symbol_count INTEGER NOT NULL DEFAULT 0, + chunk_count INTEGER NOT NULL DEFAULT 0, + last_duration_ms INTEGER NOT NULL DEFAULT 0, + queue_attempts INTEGER NOT NULL DEFAULT 1, + error_message TEXT +); + +-- Immutable run records for per-deploy indexing observability. +CREATE TABLE IF NOT EXISTS ask_code_index_runs ( + id TEXT PRIMARY KEY, + project_id TEXT NOT NULL, + deployment_id TEXT NOT NULL, + parser_version TEXT NOT NULL, + status TEXT NOT NULL, -- ready | failed + queue_attempts INTEGER NOT NULL DEFAULT 1, + duration_ms INTEGER NOT NULL DEFAULT 0, + file_count INTEGER NOT NULL DEFAULT 0, + symbol_count INTEGER NOT NULL DEFAULT 0, + chunk_count INTEGER NOT NULL DEFAULT 0, + error_message TEXT, + created_at TEXT NOT NULL DEFAULT (datetime('now')) +); + +CREATE INDEX IF NOT EXISTS idx_ask_code_index_runs_project_created +ON ask_code_index_runs(project_id, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ask_code_index_runs_deployment +ON ask_code_index_runs(deployment_id, created_at DESC); + +CREATE TABLE IF NOT EXISTS ask_code_files_latest ( + project_id TEXT NOT NULL, + path TEXT NOT NULL, + language TEXT, + content_hash TEXT NOT NULL, + size_bytes INTEGER NOT NULL, + PRIMARY KEY (project_id, path) +); + +CREATE INDEX IF NOT EXISTS idx_ask_code_files_latest_project +ON ask_code_files_latest(project_id); + +CREATE TABLE IF NOT EXISTS ask_code_symbols_latest ( + project_id TEXT NOT NULL, + path TEXT NOT NULL, + symbol TEXT NOT NULL, + kind TEXT NOT NULL, + line_start INTEGER, + line_end INTEGER, + signature TEXT, + PRIMARY KEY (project_id, path, symbol, kind, line_start) +); + +CREATE INDEX IF NOT EXISTS idx_ask_code_symbols_latest_project_kind +ON ask_code_symbols_latest(project_id, kind); + +CREATE TABLE IF NOT EXISTS ask_code_chunks_latest ( + id TEXT PRIMARY KEY, + project_id TEXT NOT NULL, + path TEXT NOT NULL, + chunk_index INTEGER NOT NULL, + line_start INTEGER, + line_end INTEGER, + content TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS idx_ask_code_chunks_latest_project +ON ask_code_chunks_latest(project_id); + +-- FTS table for lexical retrieval. Keep project_id/path/chunk metadata as unindexed columns. +CREATE VIRTUAL TABLE IF NOT EXISTS ask_code_chunks_latest_fts USING fts5( + project_id UNINDEXED, + path UNINDEXED, + chunk_index UNINDEXED, + line_start UNINDEXED, + line_end UNINDEXED, + content +); diff --git a/apps/control-plane/package.json b/apps/control-plane/package.json index 52ea826..96135af 100644 --- a/apps/control-plane/package.json +++ b/apps/control-plane/package.json @@ -6,7 +6,7 @@ "dev": "wrangler dev", "deploy": "wrangler deploy", "typecheck": "tsc --noEmit", - "db:migrate": "wrangler d1 execute jack-control-db --file ./migrations/0001_create_users.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0002_create_control_plane.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0003_create_projects.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0004_unique_project_slug.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0005_create_deployments.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0006_add_deleted_at.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0007_create_feedback.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0008_add_project_tags.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0009_add_username.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0010_enforce_slug_username_boundaries.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0011_add_binding_name.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0012_add_source_visibility_columns.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0013_slug_per_user_uniqueness.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0014_relax_project_slug_uniqueness.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0015_create_log_sessions.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0016_create_custom_domains.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0017_create_org_billing.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0018_backfill_org_billing.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0019_domain_slots.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0020_add_daimo_columns.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0021_create_credits.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0022_drop_promo_codes.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0023_add_dns_verification.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0024_create_cron_schedules.sql --remote && wrangler d1 execute jack-control-db --file ./migrations/0025_add_cron_secret.sql --remote" + "db:migrate": "wrangler d1 migrations apply jack-control-db --remote" }, "dependencies": { "@getjack/auth": "workspace:*", diff --git a/apps/control-plane/src/ask-code-index.ts b/apps/control-plane/src/ask-code-index.ts new file mode 100644 index 0000000..68d3683 --- /dev/null +++ b/apps/control-plane/src/ask-code-index.ts @@ -0,0 +1,749 @@ +import { unzipSync } from "fflate"; +import type { Bindings, Deployment } from "./types"; + +export type AskCodeSymbolKind = + | "route" + | "function" + | "class" + | "export" + | "env_binding" + | "sql_ref"; + +export interface AskCodeSymbol { + symbol: string; + kind: AskCodeSymbolKind; + lineStart: number | null; + lineEnd: number | null; + signature: string | null; +} + +export interface AskCodeChunk { + chunkIndex: number; + lineStart: number | null; + lineEnd: number | null; + content: string; +} + +export interface AskCodeParseResult { + symbols: AskCodeSymbol[]; + chunks: AskCodeChunk[]; +} + +export interface CodeIndexAdapter { + readonly id: string; + readonly version: string; + supports(path: string): boolean; + parse(file: { path: string; content: string }): AskCodeParseResult; +} + +export interface AskCodeIndexStatus { + projectId: string; + deploymentId: string; + indexedAt: string; + parserVersion: string; + status: "ready" | "indexing" | "failed"; + fileCount: number; + symbolCount: number; + chunkCount: number; + lastDurationMs: number; + queueAttempts: number; + errorMessage: string | null; +} + +export interface AskCodeSearchResult { + path: string; + chunkIndex: number | null; + lineStart: number | null; + lineEnd: number | null; + snippet: string; +} + +export interface AskRouteMatch { + path: string; + symbol: string; + signature: string | null; + lineStart: number | null; + lineEnd: number | null; +} + +const MAX_TEXT_FILE_BYTES = 300_000; +const CHUNK_LINES = 70; +const JS_TS_EXTENSIONS = [".ts", ".tsx", ".js", ".jsx", ".mjs", ".cjs"]; + +function hasSupportedTextExtension(path: string): boolean { + const lower = path.toLowerCase(); + return ( + JS_TS_EXTENSIONS.some((ext) => lower.endsWith(ext)) || + lower.endsWith(".json") || + lower.endsWith(".md") + ); +} + +function detectLanguage(path: string): string { + const lower = path.toLowerCase(); + if (lower.endsWith(".ts") || lower.endsWith(".tsx")) return "typescript"; + if ( + lower.endsWith(".js") || + lower.endsWith(".jsx") || + lower.endsWith(".mjs") || + lower.endsWith(".cjs") + ) + return "javascript"; + if (lower.endsWith(".json")) return "json"; + if (lower.endsWith(".md")) return "markdown"; + return "text"; +} + +function tokenizeQuery(input: string): string[] { + const terms = input + .toLowerCase() + .replace(/[^a-z0-9/_-]+/g, " ") + .split(/\s+/) + .flatMap((t) => t.split("/")) + .filter((t) => t.length >= 2 && !/^\d+$/.test(t)); + return Array.from(new Set(terms)).slice(0, 10); +} + +function toFtsQuery(input: string): string | null { + const tokens = tokenizeQuery(input); + if (tokens.length === 0) return null; + return tokens.map((token) => `"${token.replace(/"/g, "")}"`).join(" OR "); +} + +function buildChunks(content: string): AskCodeChunk[] { + const lines = content.split("\n"); + const chunks: AskCodeChunk[] = []; + + let chunkIndex = 0; + for (let i = 0; i < lines.length; i += CHUNK_LINES) { + const slice = lines.slice(i, i + CHUNK_LINES); + const text = slice.join("\n").trim(); + if (!text) continue; + chunks.push({ + chunkIndex, + lineStart: i + 1, + lineEnd: i + slice.length, + content: text, + }); + chunkIndex += 1; + } + return chunks; +} + +const jsTsAdapter: CodeIndexAdapter = { + id: "js_ts", + version: "v1", + supports(path: string): boolean { + return JS_TS_EXTENSIONS.some((ext) => path.toLowerCase().endsWith(ext)); + }, + parse(file: { path: string; content: string }): AskCodeParseResult { + const lines = file.content.split("\n"); + const symbols: AskCodeSymbol[] = []; + + for (let i = 0; i < lines.length; i += 1) { + const line = lines[i] ?? ""; + const lineNo = i + 1; + + // Route patterns: app.get("/x"), router.post("/x"), etc. + const routeMatch = line.match( + /\b(?:app|router)\.(get|post|put|patch|delete|all)\s*\(\s*["'`]([^"'`]+)["'`]/i, + ); + if (routeMatch) { + const method = routeMatch[1]?.toUpperCase() ?? "GET"; + const routePath = routeMatch[2] ?? ""; + symbols.push({ + symbol: `${method} ${routePath}`, + kind: "route", + lineStart: lineNo, + lineEnd: lineNo, + signature: `${method} ${routePath}`, + }); + } + + // Basic pathname checks: pathname === "/x" + const pathnameMatch = line.match(/\bpathname\s*={2,3}\s*["'`]([^"'`]+)["'`]/i); + if (pathnameMatch) { + const routePath = pathnameMatch[1] ?? ""; + symbols.push({ + symbol: `ROUTE ${routePath}`, + kind: "route", + lineStart: lineNo, + lineEnd: lineNo, + signature: `ROUTE ${routePath}`, + }); + } + + // Function declarations + const fnMatch = line.match(/\bfunction\s+([A-Za-z0-9_]+)\s*\(/); + if (fnMatch) { + symbols.push({ + symbol: fnMatch[1] ?? "function", + kind: "function", + lineStart: lineNo, + lineEnd: lineNo, + signature: line.trim().slice(0, 240), + }); + } + + // Class declarations + const classMatch = line.match(/\bclass\s+([A-Za-z0-9_]+)/); + if (classMatch) { + symbols.push({ + symbol: classMatch[1] ?? "class", + kind: "class", + lineStart: lineNo, + lineEnd: lineNo, + signature: line.trim().slice(0, 240), + }); + } + + // Export declarations + if (/\bexport\b/.test(line)) { + symbols.push({ + symbol: line.trim().slice(0, 120), + kind: "export", + lineStart: lineNo, + lineEnd: lineNo, + signature: line.trim().slice(0, 240), + }); + } + + // env bindings + const envRegex = /\benv\.([A-Z_][A-Z0-9_]*)\b/g; + for (const match of line.matchAll(envRegex)) { + const binding = match[1]; + if (!binding) continue; + symbols.push({ + symbol: binding, + kind: "env_binding", + lineStart: lineNo, + lineEnd: lineNo, + signature: `env.${binding}`, + }); + } + + // SQL refs + if (/\b(SELECT|INSERT|UPDATE|DELETE|CREATE TABLE|ALTER TABLE|DROP TABLE)\b/i.test(line)) { + symbols.push({ + symbol: line.trim().slice(0, 120), + kind: "sql_ref", + lineStart: lineNo, + lineEnd: lineNo, + signature: line.trim().slice(0, 240), + }); + } + } + + return { + symbols, + chunks: buildChunks(file.content), + }; + }, +}; + +const adapters: CodeIndexAdapter[] = [jsTsAdapter]; + +function selectAdapter(path: string): CodeIndexAdapter | null { + for (const adapter of adapters) { + if (adapter.supports(path)) return adapter; + } + return null; +} + +function decodeText(bytes: Uint8Array): string | null { + if (bytes.byteLength === 0) return ""; + if (bytes.byteLength > MAX_TEXT_FILE_BYTES) return null; + return new TextDecoder().decode(bytes); +} + +function toIsoTimestamp(value: string | null | undefined): string { + if (!value) return new Date().toISOString(); + if (value.includes("T")) return value; + return `${value.replace(" ", "T")}Z`; +} + +function parseNullableInt(value: unknown): number | null { + if (value === null || value === undefined || value === "") return null; + const num = typeof value === "number" ? value : Number(value); + return Number.isFinite(num) ? num : null; +} + +function safeErrorMessage(error: unknown): string { + if (error instanceof Error) return error.message.slice(0, 500); + return String(error).slice(0, 500); +} + +async function markIndexStatus( + env: Bindings, + input: { + projectId: string; + deploymentId: string; + parserVersion: string; + status: "ready" | "indexing" | "failed"; + fileCount?: number; + symbolCount?: number; + chunkCount?: number; + lastDurationMs?: number; + queueAttempts?: number; + errorMessage?: string | null; + }, +): Promise { + await env.DB.prepare( + `INSERT INTO ask_code_index_latest + (project_id, deployment_id, indexed_at, parser_version, status, file_count, symbol_count, chunk_count, last_duration_ms, queue_attempts, error_message) + VALUES (?, ?, CURRENT_TIMESTAMP, ?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(project_id) DO UPDATE SET + deployment_id = excluded.deployment_id, + indexed_at = CURRENT_TIMESTAMP, + parser_version = excluded.parser_version, + status = excluded.status, + file_count = excluded.file_count, + symbol_count = excluded.symbol_count, + chunk_count = excluded.chunk_count, + last_duration_ms = excluded.last_duration_ms, + queue_attempts = excluded.queue_attempts, + error_message = excluded.error_message`, + ) + .bind( + input.projectId, + input.deploymentId, + input.parserVersion, + input.status, + input.fileCount ?? 0, + input.symbolCount ?? 0, + input.chunkCount ?? 0, + input.lastDurationMs ?? 0, + input.queueAttempts ?? 1, + input.errorMessage ?? null, + ) + .run(); +} + +async function insertIndexRun( + env: Bindings, + input: { + projectId: string; + deploymentId: string; + parserVersion: string; + status: "ready" | "failed"; + queueAttempts: number; + durationMs: number; + fileCount: number; + symbolCount: number; + chunkCount: number; + errorMessage: string | null; + }, +): Promise { + await env.DB.prepare( + `INSERT INTO ask_code_index_runs + (id, project_id, deployment_id, parser_version, status, queue_attempts, duration_ms, file_count, symbol_count, chunk_count, error_message) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + ) + .bind( + `idxrun_${crypto.randomUUID()}`, + input.projectId, + input.deploymentId, + input.parserVersion, + input.status, + input.queueAttempts, + input.durationMs, + input.fileCount, + input.symbolCount, + input.chunkCount, + input.errorMessage, + ) + .run(); +} + +export async function markCodeIndexEnqueued( + env: Bindings, + input: { + projectId: string; + deploymentId: string; + queueAttempts?: number; + }, +): Promise { + await markIndexStatus(env, { + projectId: input.projectId, + deploymentId: input.deploymentId, + parserVersion: `${jsTsAdapter.id}:${jsTsAdapter.version}`, + status: "indexing", + fileCount: 0, + symbolCount: 0, + chunkCount: 0, + lastDurationMs: 0, + queueAttempts: input.queueAttempts ?? 1, + errorMessage: null, + }); +} + +export interface IndexLatestDeploymentOptions { + queueAttempts?: number; + rethrowOnFailure?: boolean; +} + +export async function indexLatestDeploymentSource( + env: Bindings, + params: { projectId: string; deployment: Deployment } & IndexLatestDeploymentOptions, +): Promise { + const { projectId, deployment } = params; + const parserVersion = `${jsTsAdapter.id}:${jsTsAdapter.version}`; + const queueAttempts = params.queueAttempts ?? 1; + const startedAt = Date.now(); + + await markIndexStatus(env, { + projectId, + deploymentId: deployment.id, + parserVersion, + status: "indexing", + queueAttempts, + }); + + try { + if (!deployment.artifact_bucket_key) { + throw new Error("Deployment has no artifact bucket key"); + } + + const sourceKey = `${deployment.artifact_bucket_key}/source.zip`; + const sourceObj = await env.CODE_BUCKET.get(sourceKey); + if (!sourceObj) { + throw new Error("source.zip not found for deployment"); + } + + const zipData = await sourceObj.arrayBuffer(); + const files = unzipSync(new Uint8Array(zipData)); + + const fileRows: Array<{ + path: string; + language: string; + contentHash: string; + sizeBytes: number; + }> = []; + const symbolRows: Array<{ + path: string; + symbol: string; + kind: AskCodeSymbolKind; + lineStart: number | null; + lineEnd: number | null; + signature: string | null; + }> = []; + const chunkRows: Array<{ + id: string; + path: string; + chunkIndex: number; + lineStart: number | null; + lineEnd: number | null; + content: string; + }> = []; + + for (const [rawPath, bytes] of Object.entries(files)) { + const path = rawPath.startsWith("/") ? rawPath.slice(1) : rawPath; + if (!hasSupportedTextExtension(path)) continue; + + const text = decodeText(bytes); + if (text === null) continue; + + const adapter = selectAdapter(path); + const parsed = adapter + ? adapter.parse({ path, content: text }) + : ({ + symbols: [], + chunks: buildChunks(text), + } satisfies AskCodeParseResult); + + fileRows.push({ + path, + language: detectLanguage(path), + contentHash: `bytes:${bytes.byteLength}:sum:${bytes.reduce((acc, cur) => acc + cur, 0)}`, + sizeBytes: bytes.byteLength, + }); + + const seenSymbolKeys = new Set(); + for (const symbol of parsed.symbols) { + const key = [ + symbol.kind, + symbol.symbol, + symbol.lineStart ?? "", + symbol.lineEnd ?? "", + symbol.signature ?? "", + ].join("|"); + if (seenSymbolKeys.has(key)) continue; + seenSymbolKeys.add(key); + + symbolRows.push({ + path, + symbol: symbol.symbol.slice(0, 200), + kind: symbol.kind, + lineStart: symbol.lineStart, + lineEnd: symbol.lineEnd, + signature: symbol.signature?.slice(0, 300) ?? null, + }); + } + + for (const chunk of parsed.chunks) { + chunkRows.push({ + id: crypto.randomUUID(), + path, + chunkIndex: chunk.chunkIndex, + lineStart: chunk.lineStart, + lineEnd: chunk.lineEnd, + content: chunk.content.slice(0, 5000), + }); + } + } + + // Replace project snapshot + await env.DB.prepare("DELETE FROM ask_code_files_latest WHERE project_id = ?") + .bind(projectId) + .run(); + await env.DB.prepare("DELETE FROM ask_code_symbols_latest WHERE project_id = ?") + .bind(projectId) + .run(); + await env.DB.prepare("DELETE FROM ask_code_chunks_latest WHERE project_id = ?") + .bind(projectId) + .run(); + await env.DB.prepare("DELETE FROM ask_code_chunks_latest_fts WHERE project_id = ?") + .bind(projectId) + .run(); + + for (const row of fileRows) { + await env.DB.prepare( + `INSERT INTO ask_code_files_latest (project_id, path, language, content_hash, size_bytes) + VALUES (?, ?, ?, ?, ?)`, + ) + .bind(projectId, row.path, row.language, row.contentHash, row.sizeBytes) + .run(); + } + + for (const row of symbolRows) { + await env.DB.prepare( + `INSERT OR IGNORE INTO ask_code_symbols_latest (project_id, path, symbol, kind, line_start, line_end, signature) + VALUES (?, ?, ?, ?, ?, ?, ?)`, + ) + .bind(projectId, row.path, row.symbol, row.kind, row.lineStart, row.lineEnd, row.signature) + .run(); + } + + for (const row of chunkRows) { + await env.DB.prepare( + `INSERT INTO ask_code_chunks_latest (id, project_id, path, chunk_index, line_start, line_end, content) + VALUES (?, ?, ?, ?, ?, ?, ?)`, + ) + .bind( + `${projectId}:${row.id}`, + projectId, + row.path, + row.chunkIndex, + row.lineStart, + row.lineEnd, + row.content, + ) + .run(); + + await env.DB.prepare( + `INSERT INTO ask_code_chunks_latest_fts (project_id, path, chunk_index, line_start, line_end, content) + VALUES (?, ?, ?, ?, ?, ?)`, + ) + .bind(projectId, row.path, String(row.chunkIndex), row.lineStart, row.lineEnd, row.content) + .run(); + } + + const durationMs = Date.now() - startedAt; + await markIndexStatus(env, { + projectId, + deploymentId: deployment.id, + parserVersion, + status: "ready", + fileCount: fileRows.length, + symbolCount: symbolRows.length, + chunkCount: chunkRows.length, + lastDurationMs: durationMs, + queueAttempts, + errorMessage: null, + }); + await insertIndexRun(env, { + projectId, + deploymentId: deployment.id, + parserVersion, + status: "ready", + queueAttempts, + durationMs, + fileCount: fileRows.length, + symbolCount: symbolRows.length, + chunkCount: chunkRows.length, + errorMessage: null, + }); + } catch (error) { + const durationMs = Date.now() - startedAt; + const safeMessage = safeErrorMessage(error); + await markIndexStatus(env, { + projectId, + deploymentId: deployment.id, + parserVersion, + status: "failed", + lastDurationMs: durationMs, + queueAttempts, + errorMessage: safeMessage, + }); + await insertIndexRun(env, { + projectId, + deploymentId: deployment.id, + parserVersion, + status: "failed", + queueAttempts, + durationMs, + fileCount: 0, + symbolCount: 0, + chunkCount: 0, + errorMessage: safeMessage, + }); + if (params.rethrowOnFailure) { + throw error; + } + } +} + +export async function getLatestCodeIndexStatus( + env: Bindings, + projectId: string, +): Promise { + const row = await env.DB.prepare( + `SELECT project_id, deployment_id, indexed_at, parser_version, status, file_count, symbol_count, chunk_count, last_duration_ms, queue_attempts, error_message + FROM ask_code_index_latest + WHERE project_id = ?`, + ) + .bind(projectId) + .first<{ + project_id: string; + deployment_id: string; + indexed_at: string; + parser_version: string; + status: "ready" | "indexing" | "failed"; + file_count: number; + symbol_count: number; + chunk_count: number; + last_duration_ms: number; + queue_attempts: number; + error_message: string | null; + }>(); + + if (!row) return null; + return { + projectId: row.project_id, + deploymentId: row.deployment_id, + indexedAt: toIsoTimestamp(row.indexed_at), + parserVersion: row.parser_version, + status: row.status, + fileCount: row.file_count, + symbolCount: row.symbol_count, + chunkCount: row.chunk_count, + lastDurationMs: row.last_duration_ms, + queueAttempts: row.queue_attempts, + errorMessage: row.error_message, + }; +} + +export async function searchLatestCodeIndex( + env: Bindings, + projectId: string, + query: string, + limit = 6, +): Promise { + const ftsQuery = toFtsQuery(query); + if (!ftsQuery) return []; + + const result = await env.DB.prepare( + `SELECT path, chunk_index, line_start, line_end, substr(content, 1, 280) as snippet + FROM ask_code_chunks_latest_fts + WHERE ask_code_chunks_latest_fts MATCH ? AND project_id = ? + LIMIT ?`, + ) + .bind(ftsQuery, projectId, limit) + .all<{ + path: string; + chunk_index: string | number | null; + line_start: string | number | null; + line_end: string | number | null; + snippet: string; + }>(); + + return (result.results ?? []).map((row) => ({ + path: row.path, + chunkIndex: parseNullableInt(row.chunk_index), + lineStart: parseNullableInt(row.line_start), + lineEnd: parseNullableInt(row.line_end), + snippet: row.snippet, + })); +} + +export async function findRouteMatchesForEndpoint( + env: Bindings, + projectId: string, + endpointPath: string, + limit = 5, +): Promise { + const pattern = `%${endpointPath.toLowerCase()}%`; + const result = await env.DB.prepare( + `SELECT path, symbol, signature, line_start, line_end + FROM ask_code_symbols_latest + WHERE project_id = ? AND kind = 'route' AND lower(coalesce(signature, '')) LIKE ? + ORDER BY path ASC, line_start ASC + LIMIT ?`, + ) + .bind(projectId, pattern, limit) + .all<{ + path: string; + symbol: string; + signature: string | null; + line_start: number | null; + line_end: number | null; + }>(); + + return (result.results ?? []).map((row) => ({ + path: row.path, + symbol: row.symbol, + signature: row.signature, + lineStart: row.line_start ?? null, + lineEnd: row.line_end ?? null, + })); +} + +export async function searchSourceFallback( + env: Bindings, + deployment: Deployment, + query: string, + limit = 4, +): Promise { + if (!deployment.artifact_bucket_key) return []; + const tokens = tokenizeQuery(query); + if (tokens.length === 0) return []; + + const sourceObj = await env.CODE_BUCKET.get(`${deployment.artifact_bucket_key}/source.zip`); + if (!sourceObj) return []; + + const zipData = await sourceObj.arrayBuffer(); + const files = unzipSync(new Uint8Array(zipData)); + + const results: AskCodeSearchResult[] = []; + for (const [path, bytes] of Object.entries(files)) { + if (!hasSupportedTextExtension(path)) continue; + const text = decodeText(bytes); + if (!text) continue; + + const lower = text.toLowerCase(); + const matched = tokens.some((token) => lower.includes(token)); + if (!matched) continue; + + results.push({ + path, + chunkIndex: null, + lineStart: null, + lineEnd: null, + snippet: text.slice(0, 280), + }); + + if (results.length >= limit) break; + } + + return results; +} diff --git a/apps/control-plane/src/ask-index-queue.ts b/apps/control-plane/src/ask-index-queue.ts new file mode 100644 index 0000000..fa4dbf5 --- /dev/null +++ b/apps/control-plane/src/ask-index-queue.ts @@ -0,0 +1,125 @@ +import { indexLatestDeploymentSource, markCodeIndexEnqueued } from "./ask-code-index"; +import type { Bindings, Deployment } from "./types"; + +export interface AskIndexQueueMessage { + version: 1; + projectId: string; + deploymentId: string; + enqueuedAt: string; + reason: "deploy" | "rollback"; +} + +const MAX_ATTEMPTS = 4; +const RETRY_BACKOFF_SECONDS = [10, 45, 180, 600]; + +function isPermanentIndexError(message: string): boolean { + const lower = message.toLowerCase(); + return ( + lower.includes("has no artifact bucket key") || + lower.includes("source.zip not found") || + lower.includes("invalid message body shape") + ); +} + +function isAskIndexQueueMessage(value: unknown): value is AskIndexQueueMessage { + if (!value || typeof value !== "object") return false; + const record = value as Record; + return ( + record.version === 1 && + typeof record.projectId === "string" && + record.projectId.length > 0 && + typeof record.deploymentId === "string" && + record.deploymentId.length > 0 && + typeof record.enqueuedAt === "string" && + (record.reason === "deploy" || record.reason === "rollback") + ); +} + +function parseQueueMessage(raw: unknown): AskIndexQueueMessage | null { + if (!isAskIndexQueueMessage(raw)) return null; + return raw; +} + +function retryDelayForAttempt(attempts: number): number { + const index = Math.max(0, Math.min(RETRY_BACKOFF_SECONDS.length - 1, attempts - 1)); + return RETRY_BACKOFF_SECONDS[index] ?? 60; +} + +async function getDeploymentForIndexMessage( + env: Bindings, + message: AskIndexQueueMessage, +): Promise { + return env.DB.prepare("SELECT * FROM deployments WHERE id = ? AND project_id = ?") + .bind(message.deploymentId, message.projectId) + .first(); +} + +export async function enqueueAskIndexJob( + env: Bindings, + message: AskIndexQueueMessage, +): Promise { + if (!env.ASK_INDEX_QUEUE) { + throw new Error("ASK_INDEX_QUEUE binding is not configured"); + } + + await env.ASK_INDEX_QUEUE.send(message, { contentType: "json" }); + await markCodeIndexEnqueued(env, { + projectId: message.projectId, + deploymentId: message.deploymentId, + queueAttempts: 1, + }); +} + +export async function consumeAskIndexBatch( + batch: MessageBatch, + env: Bindings, +): Promise { + for (const message of batch.messages) { + const parsed = parseQueueMessage(message.body); + if (!parsed) { + console.error("ask_project index queue: invalid message body shape"); + message.ack(); + continue; + } + + try { + const deployment = await getDeploymentForIndexMessage(env, parsed); + if (!deployment) { + console.warn( + `ask_project index queue: deployment ${parsed.deploymentId} not found for project ${parsed.projectId}`, + ); + message.ack(); + continue; + } + + if (deployment.status !== "live") { + console.info( + `ask_project index queue: skip deployment ${deployment.id} because status=${deployment.status}`, + ); + message.ack(); + continue; + } + + await indexLatestDeploymentSource(env, { + projectId: parsed.projectId, + deployment, + queueAttempts: message.attempts, + rethrowOnFailure: true, + }); + message.ack(); + } catch (error) { + const messageText = error instanceof Error ? error.message : String(error); + const shouldRetry = message.attempts < MAX_ATTEMPTS && !isPermanentIndexError(messageText); + console.error( + `ask_project index queue: job failed for deployment ${parsed.deploymentId} attempt=${message.attempts}: ${messageText}`, + ); + if (shouldRetry) { + message.retry({ + delaySeconds: retryDelayForAttempt(message.attempts), + }); + } else { + message.ack(); + } + } + } +} diff --git a/apps/control-plane/src/ask-project.ts b/apps/control-plane/src/ask-project.ts new file mode 100644 index 0000000..4367b02 --- /dev/null +++ b/apps/control-plane/src/ask-project.ts @@ -0,0 +1,586 @@ +import { + findRouteMatchesForEndpoint, + getLatestCodeIndexStatus, + searchLatestCodeIndex, + searchSourceFallback, +} from "./ask-code-index"; +import { CloudflareClient } from "./cloudflare-api"; +import { DeploymentService } from "./deployment-service"; +import { ProvisioningService } from "./provisioning"; +import type { Bindings, Deployment } from "./types"; + +export interface AskProjectHints { + endpoint?: string; + method?: "GET" | "POST" | "PUT" | "PATCH" | "DELETE"; + deployment_id?: string; +} + +export interface AskProjectRequest { + question: string; + hints?: AskProjectHints; +} + +export interface AskProjectEvidence { + id: string; + type: + | "endpoint_test" + | "log_event" + | "sql_result" + | "deployment_event" + | "env_snapshot" + | "code_chunk" + | "code_symbol" + | "index_status"; + source: string; + summary: string; + timestamp: string; + relation: "supports" | "conflicts" | "gap"; + meta?: Record; +} + +export interface AskProjectResponse { + answer: string; + evidence: AskProjectEvidence[]; +} + +interface AskProjectInput { + env: Bindings; + project: { + id: string; + slug: string; + owner_username: string | null; + }; + question: string; + hints?: AskProjectHints; +} + +interface EndpointCheckResult { + status: number; + durationMs: number; + bodyExcerpt: string; +} + +function toIsoTimestamp(value?: string | null): string { + if (!value) return new Date().toISOString(); + if (value.includes("T")) return value; + return `${value.replace(" ", "T")}Z`; +} + +function redactText(input: string): string { + return input + .replace(/\b(sk|pk|rk|jkt)_[A-Za-z0-9_-]+\b/g, "[redacted-token]") + .replace(/\b(password|secret|token|api[_-]?key)\b\s*[:=]\s*["'][^"']+["']/gi, "$1=[redacted]"); +} + +function isWhyShippedQuestion(question: string): boolean { + const q = question.toLowerCase(); + return q.includes("why did we ship") || q.includes("why we shipped") || q.includes("why shipped"); +} + +function isChangeQuestion(question: string): boolean { + const q = question.toLowerCase(); + return q.includes("what changed") || q.includes("recently") || q.includes("caused this"); +} + +function extractEndpointFromQuestion(question: string): string | null { + const match = question.match(/(\/[a-zA-Z0-9._~:/?#\[\]@!$&'()*+,;=-]+)/); + return match?.[1] ?? null; +} + +function looksLikeFailureQuestion(question: string): boolean { + const q = question.toLowerCase(); + return ( + q.includes("500") || + q.includes("error") || + q.includes("broken") || + q.includes("fail") || + q.includes("not working") + ); +} + +function tokenizeQuestion(input: string): string[] { + const stopwords = new Set([ + "why", + "did", + "we", + "ship", + "shipped", + "how", + "what", + "the", + "this", + "that", + "with", + "from", + "into", + "for", + "and", + "our", + "your", + "was", + "were", + "is", + "are", + ]); + + return input + .toLowerCase() + .replace(/[^a-z0-9/_-]+/g, " ") + .split(/\s+/) + .filter((token) => token.length >= 3 && !stopwords.has(token)); +} + +function inferDeploymentFromQuestion( + question: string, + deployments: Deployment[], +): Deployment | null { + const tokens = tokenizeQuestion(question); + if (tokens.length === 0) return null; + + let best: { deployment: Deployment; score: number } | null = null; + for (const deployment of deployments) { + const message = deployment.message?.toLowerCase(); + if (!message) continue; + + let score = 0; + for (const token of tokens) { + if (message.includes(token)) score += 1; + } + if (score === 0) continue; + + if (!best || score > best.score) { + best = { deployment, score }; + } + } + + return best?.deployment ?? null; +} + +class EvidenceCollector { + private index = 1; + private readonly list: AskProjectEvidence[] = []; + + add( + type: AskProjectEvidence["type"], + source: string, + summary: string, + relation: AskProjectEvidence["relation"], + meta?: Record, + timestamp?: string, + ): void { + this.list.push({ + id: `ev_${String(this.index).padStart(3, "0")}`, + type, + source, + summary: redactText(summary).slice(0, 500), + relation, + timestamp: timestamp ?? new Date().toISOString(), + meta, + }); + this.index += 1; + } + + values(): AskProjectEvidence[] { + return this.list; + } +} + +async function runEndpointCheck( + baseUrl: string, + path: string, + method: "GET" | "POST" | "PUT" | "PATCH" | "DELETE", +): Promise { + const normalized = path.startsWith("/") ? path : `/${path}`; + const url = new URL(normalized, baseUrl); + const controller = new AbortController(); + const timeout = setTimeout(() => controller.abort(), 8000); + const startedAt = Date.now(); + try { + const response = await fetch(url.toString(), { + method, + redirect: "follow", + signal: controller.signal, + }); + const body = (await response.text()).slice(0, 600); + return { + status: response.status, + durationMs: Date.now() - startedAt, + bodyExcerpt: body, + }; + } finally { + clearTimeout(timeout); + } +} + +function summarizeDeploymentMessage(deployment: Deployment): string { + const createdAt = toIsoTimestamp(deployment.created_at); + if (deployment.message) { + return `Deployment ${deployment.id} (${deployment.status}) at ${createdAt}: ${deployment.message}`; + } + return `Deployment ${deployment.id} (${deployment.status}) at ${createdAt} has no deploy message`; +} + +function pickAnswer(params: { + question: string; + latestDeployment: Deployment | null; + endpointPath: string | null; + endpointCheck: EndpointCheckResult | null; + missingTableName: string | null; + tableMissingConfirmed: boolean; + routeMatches: Array<{ path: string; signature: string | null }>; + deployMessage: string | null; + hasInsufficientEvidence: boolean; +}): string { + const { + question, + latestDeployment, + endpointPath, + endpointCheck, + missingTableName, + tableMissingConfirmed, + routeMatches, + deployMessage, + hasInsufficientEvidence, + } = params; + + if (isWhyShippedQuestion(question)) { + if (deployMessage) { + return `The latest deployment appears to have been shipped for: "${deployMessage}".`; + } + return "I can't determine why it was shipped from deployment metadata because the deploy message is missing."; + } + + if (tableMissingConfirmed && missingTableName && endpointPath) { + return `The likely root cause is a missing D1 table "${missingTableName}" for endpoint ${endpointPath}.`; + } + + if (endpointCheck && endpointPath) { + if (endpointCheck.status >= 500) { + if (routeMatches.length > 0) { + const files = routeMatches + .map((m) => m.path) + .filter((v, i, arr) => arr.indexOf(v) === i) + .slice(0, 3) + .join(", "); + return `Endpoint ${endpointPath} is returning ${endpointCheck.status}. Likely related route code is in: ${files}.`; + } + return `Endpoint ${endpointPath} is returning ${endpointCheck.status}. I can confirm a runtime failure but route mapping evidence is limited.`; + } + if ( + endpointCheck.status >= 200 && + endpointCheck.status < 500 && + looksLikeFailureQuestion(question) + ) { + return `I could not reproduce a server failure for ${endpointPath}; current status is ${endpointCheck.status}.`; + } + } + + if (isChangeQuestion(question) && latestDeployment) { + if (latestDeployment.message) { + return `The most recent change is deployment ${latestDeployment.id} with message: "${latestDeployment.message}".`; + } + return `Deployment ${latestDeployment.id} is the most recent change, but it has no deploy message.`; + } + + if (hasInsufficientEvidence) { + return "I can’t determine this confidently with the current evidence. See gaps in evidence for what’s missing."; + } + + return "Based on current evidence, I can provide partial debugging context but not a high-confidence root cause yet."; +} + +export async function answerProjectQuestion(input: AskProjectInput): Promise { + const { env, project, question, hints } = input; + const evidence = new EvidenceCollector(); + const deploymentService = new DeploymentService(env); + const provisioning = new ProvisioningService(env); + + const latestDeployment = await deploymentService.getLatestDeployment(project.id); + const deployments = (await deploymentService.listDeployments(project.id)).slice(0, 10); + + if (!latestDeployment) { + evidence.add( + "deployment_event", + "deployments", + "No live deployment found for this project.", + "gap", + ); + return { + answer: "I can't answer this because there is no live deployment for this project yet.", + evidence: evidence.values(), + }; + } + + const hintedDeploymentId = hints?.deployment_id?.trim(); + const hintedDeployment = hintedDeploymentId + ? deployments.find((d) => d.id === hintedDeploymentId || d.id.endsWith(hintedDeploymentId)) + : undefined; + const inferredDeployment = + !hintedDeployment && isWhyShippedQuestion(question) + ? inferDeploymentFromQuestion(question, deployments) + : null; + const targetDeployment = hintedDeployment ?? inferredDeployment ?? latestDeployment; + + evidence.add( + "deployment_event", + "deployments", + summarizeDeploymentMessage(targetDeployment), + "supports", + { + deployment_id: targetDeployment.id, + status: targetDeployment.status, + source: targetDeployment.source, + }, + toIsoTimestamp(targetDeployment.created_at), + ); + + const historicalDeployments = deployments.filter((d) => d.id !== targetDeployment.id).slice(0, 4); + for (const deployment of historicalDeployments) { + evidence.add( + "deployment_event", + "deployments", + summarizeDeploymentMessage(deployment), + "supports", + { + deployment_id: deployment.id, + status: deployment.status, + source: deployment.source, + }, + toIsoTimestamp(deployment.created_at), + ); + } + + const resources = await provisioning.getProjectResources(project.id); + evidence.add( + "env_snapshot", + "resources", + `Project has ${resources.length} active resources.`, + "supports", + { + resource_types: resources.map((r) => r.resource_type), + }, + ); + + const latestIndex = await getLatestCodeIndexStatus(env, project.id); + if (!latestIndex) { + evidence.add("index_status", "code_index_latest", "No latest code index found yet.", "gap"); + } else if (latestIndex.status !== "ready") { + evidence.add( + "index_status", + "code_index_latest", + `Latest code index status is ${latestIndex.status}.`, + "gap", + { + deployment_id: latestIndex.deploymentId, + }, + latestIndex.indexedAt, + ); + } else if (latestIndex.deploymentId !== latestDeployment.id) { + evidence.add( + "index_status", + "code_index_latest", + `Latest code index is stale (indexed deployment ${latestIndex.deploymentId}, latest live ${latestDeployment.id}).`, + "gap", + { + indexed_deployment_id: latestIndex.deploymentId, + latest_deployment_id: latestDeployment.id, + }, + latestIndex.indexedAt, + ); + } else { + evidence.add( + "index_status", + "code_index_latest", + `Latest code index is ready for deployment ${latestDeployment.id}.`, + "supports", + { + deployment_id: latestIndex.deploymentId, + file_count: latestIndex.fileCount, + symbol_count: latestIndex.symbolCount, + chunk_count: latestIndex.chunkCount, + last_duration_ms: latestIndex.lastDurationMs, + queue_attempts: latestIndex.queueAttempts, + }, + latestIndex.indexedAt, + ); + } + + const endpointPath = hints?.endpoint ?? extractEndpointFromQuestion(question); + const endpointMethod = hints?.method ?? "GET"; + let liveChecks = 0; + let endpointCheck: EndpointCheckResult | null = null; + let missingTableName: string | null = null; + let tableMissingConfirmed = false; + + if (endpointPath && liveChecks < 4) { + const baseUrl = project.owner_username + ? `https://${project.owner_username}-${project.slug}.runjack.xyz` + : `https://${project.slug}.runjack.xyz`; + + try { + liveChecks += 1; + endpointCheck = await runEndpointCheck(baseUrl, endpointPath, endpointMethod); + const relation = endpointCheck.status >= 500 ? "supports" : "conflicts"; + evidence.add( + "endpoint_test", + "live_endpoint_check", + `${endpointMethod} ${endpointPath} returned ${endpointCheck.status} in ${endpointCheck.durationMs}ms.`, + relation, + { + status: endpointCheck.status, + duration_ms: endpointCheck.durationMs, + body_excerpt: redactText(endpointCheck.bodyExcerpt).slice(0, 240), + }, + ); + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + evidence.add( + "endpoint_test", + "live_endpoint_check", + `Failed to test ${endpointMethod} ${endpointPath}: ${message}`, + "gap", + ); + } + } + + if (endpointCheck?.status && endpointCheck.status >= 500 && liveChecks < 4) { + const missingTable = endpointCheck.bodyExcerpt.match(/no such table:\s*([A-Za-z0-9_]+)/i); + if (missingTable?.[1]) { + missingTableName = missingTable[1]; + const d1Resource = await env.DB.prepare( + "SELECT provider_id FROM resources WHERE project_id = ? AND resource_type = 'd1' AND status != 'deleted' ORDER BY created_at ASC LIMIT 1", + ) + .bind(project.id) + .first<{ provider_id: string }>(); + + if (d1Resource) { + try { + liveChecks += 1; + const cfClient = new CloudflareClient(env); + const result = await cfClient.executeD1Query( + d1Resource.provider_id, + "SELECT name FROM sqlite_master WHERE type='table' AND name = ?", + [missingTableName], + ); + const found = (result.results?.length ?? 0) > 0; + tableMissingConfirmed = !found; + evidence.add( + "sql_result", + "d1_table_check", + found + ? `Table "${missingTableName}" exists in D1.` + : `Table "${missingTableName}" does not exist in D1.`, + found ? "conflicts" : "supports", + { + table: missingTableName, + exists: found, + }, + ); + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + evidence.add( + "sql_result", + "d1_table_check", + `Failed to verify table "${missingTableName}" in D1: ${message}`, + "gap", + ); + } + } else { + evidence.add( + "sql_result", + "d1_table_check", + "Could not verify missing-table error because project has no D1 resource.", + "gap", + ); + } + } + } + + const codeQuery = endpointPath ?? question; + const routeMatches = + endpointPath && latestIndex?.status === "ready" + ? await findRouteMatchesForEndpoint(env, project.id, endpointPath, 4) + : []; + + if (routeMatches.length > 0) { + for (const match of routeMatches.slice(0, 3)) { + evidence.add( + "code_symbol", + "code_index_latest", + `Route match in ${match.path}: ${match.signature ?? match.symbol}`, + "supports", + { + path: match.path, + line_start: match.lineStart, + line_end: match.lineEnd, + }, + ); + } + } else if (endpointPath) { + evidence.add( + "code_symbol", + "code_index_latest", + `No route symbol match found for endpoint ${endpointPath}.`, + "gap", + ); + } + + let codeHits = + latestIndex?.status === "ready" + ? await searchLatestCodeIndex(env, project.id, codeQuery, 3) + : []; + + if (codeHits.length === 0) { + const fallback = await searchSourceFallback(env, targetDeployment, codeQuery, 3); + if (fallback.length > 0) { + codeHits = fallback; + evidence.add( + "index_status", + "source_fallback", + "Used source fallback search because latest code index had no hits.", + "gap", + ); + } + } + + for (const hit of codeHits.slice(0, 3)) { + evidence.add( + "code_chunk", + "code_search", + `Possible relevant code in ${hit.path}: ${hit.snippet}`, + "supports", + { + path: hit.path, + line_start: hit.lineStart, + line_end: hit.lineEnd, + }, + ); + } + + if (codeHits.length === 0) { + evidence.add( + "code_chunk", + "code_search", + "No relevant code chunks were found for this query.", + "gap", + ); + } + + const hasInsufficientEvidence = evidence.values().some((e) => e.relation === "gap"); + const answer = pickAnswer({ + question, + latestDeployment, + endpointPath, + endpointCheck, + missingTableName, + tableMissingConfirmed, + routeMatches, + deployMessage: targetDeployment.message, + hasInsufficientEvidence, + }); + + return { + answer, + evidence: evidence.values(), + }; +} diff --git a/apps/control-plane/src/index.ts b/apps/control-plane/src/index.ts index cbd3a29..07bdf3b 100644 --- a/apps/control-plane/src/index.ts +++ b/apps/control-plane/src/index.ts @@ -21,6 +21,13 @@ import { DaimoBillingService } from "./daimo-billing-service"; import { DeploymentService, validateManifest } from "./deployment-service"; import { getDoEnforcementStatus, processDoMetering } from "./do-metering"; import { REFERRAL_CAP, TIER_LIMITS, computeLimits } from "./entitlements-config"; +import { indexLatestDeploymentSource } from "./ask-code-index"; +import { + type AskIndexQueueMessage, + consumeAskIndexBatch, + enqueueAskIndexJob, +} from "./ask-index-queue"; +import { answerProjectQuestion, type AskProjectRequest } from "./ask-project"; import { ProvisioningService, normalizeSlug, validateSlug } from "./provisioning"; import { ProjectCacheService } from "./repositories/project-cache-service"; import { validateReadOnly } from "./sql-utils"; @@ -31,6 +38,7 @@ import type { CustomDomainNextStep, CustomDomainResponse, CustomDomainStatus, + Deployment, OrgBilling, PlanStatus, PlanTier, @@ -150,6 +158,172 @@ function d1DatetimeToIso(value: string): string { return `${value.replace(" ", "T")}Z`; } +const POSTHOG_CAPTURE_DEFAULT_HOST = "https://eu.i.posthog.com"; + +function getStatusBucket(status: number): string { + if (status < 300) return "2xx"; + if (status < 400) return "3xx"; + if (status < 500) return "4xx"; + return "5xx"; +} + +function inferAskSource(c: { req: { header: (name: string) => string | undefined } }): string { + const explicit = c.req.header("x-jack-source")?.trim().toLowerCase(); + if ( + explicit === "web" || + explicit === "api" || + explicit === "mcp_local" || + explicit === "mcp_remote" || + explicit === "cli" + ) { + return explicit; + } + + const userAgent = c.req.header("user-agent")?.toLowerCase() ?? ""; + if (userAgent.includes("mozilla")) return "web"; + if (userAgent.includes("curl")) return "api"; + if (userAgent.includes("jack") || userAgent.includes("bun") || userAgent.includes("node")) return "cli"; + return "api"; +} + +async function resolveOrgPlanTier(db: D1Database, orgId: string): Promise { + try { + const billing = await db + .prepare("SELECT plan_tier FROM org_billing WHERE org_id = ?") + .bind(orgId) + .first<{ plan_tier: PlanTier | null }>(); + return billing?.plan_tier ?? "free"; + } catch { + return "free"; + } +} + +async function capturePosthogEvent( + env: Bindings, + distinctId: string, + event: string, + properties: Record, +): Promise { + if (!env.POSTHOG_API_KEY) return; + const host = (env.POSTHOG_HOST || POSTHOG_CAPTURE_DEFAULT_HOST).replace(/\/+$/, ""); + + try { + await fetch(`${host}/capture`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + api_key: env.POSTHOG_API_KEY, + distinct_id: distinctId, + event, + properties: { + ...properties, + $timestamp: Date.now(), + }, + }), + }); + } catch { + // Fail-open: analytics must not affect API behavior. + } +} + +type AskProjectTrackingInput = { + env: Bindings; + userId: string; + projectId: string; + orgId: string; + tier: PlanTier; + source: string; + status: number; + latencyMs: number; + evidenceCount: number; + questionLength: number; + hasEndpointHint: boolean; + hintMethod?: string; + errorCode?: string; +}; + +async function trackAskProjectRequest(input: AskProjectTrackingInput): Promise { + const dataset = input.env.CONTROL_USAGE; + + if (dataset) { + try { + dataset.writeDataPoint({ + indexes: [input.projectId], + blobs: [ + input.orgId, // blob1: org_id + input.tier, // blob2: plan tier + "ask_project", // blob3: feature + input.source, // blob4: source + getStatusBucket(input.status), // blob5: status bucket + input.errorCode ?? "", // blob6: error code + input.hasEndpointHint ? "1" : "0", // blob7: endpoint hint present + input.hintMethod ?? "", // blob8: hint method + "", // blob9: reserved + "", // blob10: reserved + ], + doubles: [ + 1, // double1: request count + Math.max(0, Math.round(input.latencyMs)), // double2: latency ms + Math.max(0, Math.round(input.evidenceCount)), // double3: evidence count + Math.max(0, Math.round(input.questionLength)), // double4: question length + ], + }); + } catch (error) { + console.error("Failed to write ask_project usage datapoint:", error); + } + } else { + console.error("CONTROL_USAGE binding not configured; skipping ask_project AE metering."); + } + + await capturePosthogEvent(input.env, input.userId, "ask_project_request", { + project_id: input.projectId, + org_id: input.orgId, + tier: input.tier, + source: input.source, + status: input.status, + status_bucket: getStatusBucket(input.status), + latency_ms: Math.max(0, Math.round(input.latencyMs)), + evidence_count: Math.max(0, Math.round(input.evidenceCount)), + question_length: Math.max(0, Math.round(input.questionLength)), + has_endpoint_hint: input.hasEndpointHint, + hint_method: input.hintMethod ?? null, + error_code: input.errorCode ?? null, + }); +} + +function scheduleLatestCodeIndex( + env: Bindings, + ctx: ExecutionContext, + projectId: string, + deployment: Deployment, + reason: AskIndexQueueMessage["reason"] = "deploy", +): void { + if (deployment.status !== "live") return; + ctx.waitUntil( + (async () => { + if (env.ASK_INDEX_QUEUE) { + await enqueueAskIndexJob(env, { + version: 1, + projectId, + deploymentId: deployment.id, + enqueuedAt: new Date().toISOString(), + reason, + }); + return; + } + + // Fallback path when queue is not bound (dev/smoke environments). + await indexLatestDeploymentSource(env, { + projectId, + deployment, + queueAttempts: 1, + }); + })().catch((error) => { + console.error("ask_project indexer failed:", error); + }), + ); +} + function validateUsername(username: string): string | null { if (!username || username.trim() === "") { return "Username cannot be empty"; @@ -2743,6 +2917,7 @@ api.post("/projects/:projectId/rollback", async (c) => { try { const deploymentService = new DeploymentService(c.env); const deployment = await deploymentService.rollbackDeployment(projectId, deploymentId); + scheduleLatestCodeIndex(c.env, c.executionCtx, projectId, deployment, "rollback"); return c.json({ deployment: { id: deployment.id, @@ -2766,6 +2941,111 @@ api.post("/projects/:projectId/rollback", async (c) => { } }); +// POST /v1/projects/:projectId/ask - Ask an evidence-backed debugging question +api.post("/projects/:projectId/ask", async (c) => { + const auth = c.get("auth"); + const projectId = c.req.param("projectId"); + const provisioning = new ProvisioningService(c.env); + + const project = await provisioning.getProject(projectId); + if (!project) { + return c.json({ error: "not_found", message: "Project not found" }, 404); + } + + const membership = await c.env.DB.prepare( + "SELECT 1 FROM org_memberships WHERE org_id = ? AND user_id = ?", + ) + .bind(project.org_id, auth.userId) + .first(); + if (!membership) { + return c.json({ error: "not_found", message: "Project not found" }, 404); + } + + const tier = await resolveOrgPlanTier(c.env.DB, project.org_id); + const source = inferAskSource(c); + const requestStartedAt = Date.now(); + + const recordAskOutcome = (params: { + status: number; + errorCode?: string; + evidenceCount?: number; + questionLength?: number; + hasEndpointHint?: boolean; + hintMethod?: string; + }) => { + c.executionCtx.waitUntil( + trackAskProjectRequest({ + env: c.env, + userId: auth.userId, + projectId: project.id, + orgId: project.org_id, + tier, + source, + status: params.status, + latencyMs: Date.now() - requestStartedAt, + evidenceCount: params.evidenceCount ?? 0, + questionLength: params.questionLength ?? 0, + hasEndpointHint: params.hasEndpointHint ?? false, + hintMethod: params.hintMethod, + errorCode: params.errorCode, + }), + ); + }; + + let body: AskProjectRequest; + try { + body = await c.req.json(); + } catch { + recordAskOutcome({ status: 400, errorCode: "invalid_json" }); + return c.json({ error: "invalid_request", message: "Invalid JSON body" }, 400); + } + + const question = body.question?.trim() ?? ""; + const hasEndpointHint = Boolean(body.hints?.endpoint); + const hintMethod = body.hints?.method; + + if (!question) { + recordAskOutcome({ + status: 400, + errorCode: "invalid_question", + questionLength: 0, + hasEndpointHint, + hintMethod, + }); + return c.json({ error: "invalid_request", message: "question is required" }, 400); + } + + try { + const response = await answerProjectQuestion({ + env: c.env, + project: { + id: project.id, + slug: project.slug, + owner_username: project.owner_username, + }, + question, + hints: body.hints, + }); + recordAskOutcome({ + status: 200, + evidenceCount: response.evidence.length, + questionLength: question.length, + hasEndpointHint, + hintMethod, + }); + return c.json(response); + } catch (error) { + const message = error instanceof Error ? error.message : "Failed to answer question"; + recordAskOutcome({ + status: 500, + errorCode: "internal_error", + questionLength: question.length, + hasEndpointHint, + hintMethod, + }); + return c.json({ error: "internal_error", message }, 500); + } +}); // GET /v1/projects/:projectId/crons - List cron schedules api.get("/projects/:projectId/crons", async (c) => { @@ -3186,9 +3466,7 @@ api.delete("/projects/:projectId", async (c) => { } // Clean up DO enforcement state (if any) - await c.env.DB.prepare("DELETE FROM do_enforcement WHERE project_id = ?") - .bind(projectId) - .run(); + await c.env.DB.prepare("DELETE FROM do_enforcement WHERE project_id = ?").bind(projectId).run(); // Soft-delete in DB const now = new Date().toISOString(); @@ -3270,6 +3548,7 @@ api.post("/projects/:projectId/deployments", async (c) => { try { const deploymentService = new DeploymentService(c.env); const deployment = await deploymentService.createDeployment(projectId, body.source); + scheduleLatestCodeIndex(c.env, c.executionCtx, projectId, deployment); return c.json(deployment, 201); } catch (error) { const message = error instanceof Error ? error.message : "Deployment creation failed"; @@ -3421,6 +3700,7 @@ api.post("/projects/:projectId/deployments/upload", async (c) => { assetManifest, message: deployMessage ?? undefined, }); + scheduleLatestCodeIndex(c.env, c.executionCtx, projectId, deployment); return c.json(deployment, 201); } catch (error) { @@ -5848,6 +6128,9 @@ const handler: ExportedHandler = { ctx.waitUntil(processDueCronSchedules(env, ctx)); ctx.waitUntil(processDoMetering(env)); }, + queue: async (batch, env) => { + await consumeAskIndexBatch(batch, env); + }, }; export default handler; diff --git a/apps/control-plane/src/types.ts b/apps/control-plane/src/types.ts index 993f1d1..9631ca1 100644 --- a/apps/control-plane/src/types.ts +++ b/apps/control-plane/src/types.ts @@ -7,8 +7,10 @@ export type Bindings = { CLOUDFLARE_ZONE_ID: string; PROJECTS_CACHE: KVNamespace; CODE_BUCKET: R2Bucket; + ASK_INDEX_QUEUE?: Queue; TENANT_DISPATCH: DispatchNamespace; USAGE: AnalyticsEngineDataset; + CONTROL_USAGE?: AnalyticsEngineDataset; LOG_STREAM: DurableObjectNamespace; FEEDBACK_LIMITER: { limit: (options: { key: string }) => Promise<{ success: boolean }>; @@ -25,6 +27,9 @@ export type Bindings = { DAIMO_RECEIVER_ADDRESS: string; // Secrets encryption (RSA-OAEP private key JWK, set via wrangler secret put) SECRETS_ENCRYPTION_PRIVATE_KEY: string; + // Optional PostHog server-side capture key for control-plane events + POSTHOG_API_KEY?: string; + POSTHOG_HOST?: string; }; // Project status enum diff --git a/apps/control-plane/wrangler.toml b/apps/control-plane/wrangler.toml index 9c77299..960de56 100644 --- a/apps/control-plane/wrangler.toml +++ b/apps/control-plane/wrangler.toml @@ -22,6 +22,16 @@ id = "1745e7c2b059484c81d39c694201d061" binding = "CODE_BUCKET" bucket_name = "jack-code-internal" +[[queues.producers]] +binding = "ASK_INDEX_QUEUE" +queue = "ask-index-jobs" + +[[queues.consumers]] +queue = "ask-index-jobs" +max_batch_size = 10 +max_batch_timeout = 5 +max_retries = 4 + [[dispatch_namespaces]] binding = "TENANT_DISPATCH" namespace = "jack-tenants" @@ -53,6 +63,11 @@ simple = { limit = 30, period = 60 } binding = "USAGE" dataset = "jack_usage" +# Analytics Engine dataset for control-plane feature usage +[[analytics_engine_datasets]] +binding = "CONTROL_USAGE" +dataset = "jack_control_usage" + # Note: Run `wrangler d1 create jack-control-db` first # Then add the database_id here # Secrets (set via wrangler secret put ): @@ -65,3 +80,4 @@ dataset = "jack_usage" # - DAIMO_API_KEY # Daimo API key (from Daimo team) # - DAIMO_WEBHOOK_SECRET # Daimo webhook basic auth token # - DAIMO_RECEIVER_ADDRESS # Wallet address to receive USDC on Base +# - POSTHOG_API_KEY # Optional: server-side product analytics capture diff --git a/apps/mcp-worker/src/control-plane.ts b/apps/mcp-worker/src/control-plane.ts index b112b71..2be08e0 100644 --- a/apps/mcp-worker/src/control-plane.ts +++ b/apps/mcp-worker/src/control-plane.ts @@ -26,6 +26,27 @@ export interface LogSessionInfo { status: string; } +export interface AskProjectHintInput { + endpoint?: string; + method?: "GET" | "POST" | "PUT" | "PATCH" | "DELETE"; + deployment_id?: string; +} + +export interface AskProjectEvidence { + id: string; + type: string; + source: string; + summary: string; + timestamp: string; + relation: "supports" | "conflicts" | "gap"; + meta?: Record; +} + +export interface AskProjectResult { + answer: string; + evidence: AskProjectEvidence[]; +} + export class ControlPlaneClient { private baseUrl: string; private token: string; @@ -77,9 +98,7 @@ export class ControlPlaneClient { } } - async getProjectResources( - projectId: string, - ): Promise<{ + async getProjectResources(projectId: string): Promise<{ resources: Array<{ id: string; resource_type: string; @@ -303,10 +322,13 @@ export class ControlPlaneClient { }); } - async listDatabases( - projectId: string, - ): Promise<{ - resources: Array<{ id: string; resource_type: string; resource_name: string; binding_name: string | null }>; + async listDatabases(projectId: string): Promise<{ + resources: Array<{ + id: string; + resource_type: string; + resource_name: string; + binding_name: string | null; + }>; }> { const { resources } = await this.getProjectResources(projectId); return { resources: resources.filter((r) => r.resource_type === "d1") }; @@ -330,4 +352,22 @@ export class ControlPlaneClient { body: JSON.stringify(deploymentId ? { deployment_id: deploymentId } : {}), }); } + + async askProject( + projectId: string, + question: string, + hints?: AskProjectHintInput, + ): Promise { + return this.jsonFetch(`/projects/${encodeURIComponent(projectId)}/ask`, { + method: "POST", + headers: { + "Content-Type": "application/json", + "X-Jack-Source": "mcp_remote", + }, + body: JSON.stringify({ + question, + hints, + }), + }); + } } diff --git a/apps/mcp-worker/src/server.ts b/apps/mcp-worker/src/server.ts index 6dc98bf..68965fd 100644 --- a/apps/mcp-worker/src/server.ts +++ b/apps/mcp-worker/src/server.ts @@ -1,6 +1,7 @@ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; import { z } from "zod"; import { ControlPlaneClient } from "./control-plane.ts"; +import { askProject } from "./tools/ask-project.ts"; import { createDatabase, executeSql, listDatabases } from "./tools/database.ts"; import { deploy } from "./tools/deploy-code.ts"; import { getLogs } from "./tools/logs.ts"; @@ -60,6 +61,31 @@ IMPORTANT: To update an existing project, ALWAYS use changes mode with project_i }, ); + server.tool( + "ask_project", + "Ask an evidence-backed debugging question about a deployed project. Best for runtime failures, recent changes, and code-to-production impact analysis.", + { + project_id: z.string().describe("The project ID"), + question: z.string().describe("Debugging question to ask about this project"), + hints: z + .object({ + endpoint: z.string().optional().describe("Endpoint path hint, e.g. /api/todos"), + method: z + .enum(["GET", "POST", "PUT", "PATCH", "DELETE"]) + .optional() + .describe("HTTP method hint for endpoint checks"), + deployment_id: z + .string() + .optional() + .describe("Optional deployment ID to focus historical reasoning"), + }) + .optional(), + }, + async ({ project_id, question, hints }) => { + return askProject(client, project_id, question, hints); + }, + ); + server.tool( "list_projects", "List all projects deployed to Jack Cloud for the authenticated user. Call this FIRST when the user refers to an existing app or project to find its project_id before using other tools.", diff --git a/apps/mcp-worker/src/tools/ask-project.ts b/apps/mcp-worker/src/tools/ask-project.ts new file mode 100644 index 0000000..047aaf3 --- /dev/null +++ b/apps/mcp-worker/src/tools/ask-project.ts @@ -0,0 +1,23 @@ +import type { AskProjectHintInput, ControlPlaneClient } from "../control-plane.ts"; +import { type McpToolResult, err, ok } from "../utils.ts"; + +export async function askProject( + client: ControlPlaneClient, + projectId: string, + question: string, + hints?: AskProjectHintInput, +): Promise { + try { + const result = await client.askProject(projectId, question, hints); + return ok(result); + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + if (message.includes("not found") || message.includes("Not found")) { + return err("NOT_FOUND", message, "Use list_projects to confirm project_id."); + } + if (message.includes("question is required")) { + return err("VALIDATION_ERROR", message, "Provide a non-empty question."); + } + return err("INTERNAL_ERROR", `ask_project failed: ${message}`); + } +} diff --git a/docs/pages/guides/ai-agents.mdx b/docs/pages/guides/ai-agents.mdx index 9c1dde6..166eb38 100644 --- a/docs/pages/guides/ai-agents.mdx +++ b/docs/pages/guides/ai-agents.mdx @@ -14,7 +14,7 @@ Install jack and configure MCP in one command: curl -fsSL docs.getjack.org/install.sh | bash ``` -Start a new Claude Code session (or `/mcp reconnect jack`), then ask Claude to deploy something. Auth happens automatically on first use. +Start a new Claude Code session (or `/mcp reconnect jack`), then ask Claude to deploy something. **No terminal?** Use the remote MCP server from Claude Desktop or Claude.ai: @@ -24,6 +24,54 @@ https://mcp.getjack.org/mcp The remote server has 10 tools (deploy, projects, databases, logs, files). Install locally for the full 27-tool experience. +## Authentication + +Agents need to authenticate before deploying. There are three ways, depending on your setup. + +### Pre-existing token (simplest) + +If you already have an API token, set it as an environment variable. No login needed. + +```bash +export JACK_API_TOKEN=jkt_abc123... +``` + +Create tokens with `jack tokens create`. + +### Magic auth (headless, no browser) + +For agents that can send/receive email, or for human-supervised setups where a human can read the code from email. + +**Two-step flow (fully non-interactive):** + +```bash +jack login --email agent@example.com +``` + +This sends a 6-digit verification code to the email and exits. Then: + +```bash +jack login --email agent@example.com --code 847291 +``` + +This verifies the code, registers the account, and creates a `jkt_*` API token. The CLI is authenticated for all future commands. The token is also printed to stdout for programmatic capture. + +**Interactive flow (human-supervised):** + +If running in a terminal, `jack login --email you@example.com` will prompt for the code inline — the human reads it from email and pastes it. + +**Environment variables:** + +| Variable | Purpose | +|----------|---------| +| `JACK_EMAIL` | Same as `--email` flag — triggers magic auth automatically | +| `JACK_API_TOKEN` | Skip login entirely, use an existing token | +| `CI=true` | Forces non-interactive mode (no prompts) | + +### Browser login (interactive humans) + +Running `jack login` without `--email` opens a browser for OAuth login. This is the default for interactive terminal sessions. If no browser is available, it fails fast with instructions to use `--email` instead. + ## MCP Server jack includes a built-in [Model Context Protocol](https://modelcontextprotocol.io) server that lets AI agents deploy and manage projects directly. diff --git a/docs/pages/guides/openclaw.mdx b/docs/pages/guides/openclaw.mdx index 677fa40..e26f96b 100644 --- a/docs/pages/guides/openclaw.mdx +++ b/docs/pages/guides/openclaw.mdx @@ -39,7 +39,27 @@ curl -fsSL docs.getjack.org/install.sh | bash Or try without installing: `npx -y @getjack/jack new my-api --template api` -Auth happens automatically on first use. +### Authentication + +**Already have a token?** Set it and go: + +```bash +export JACK_API_TOKEN=jkt_abc123... +``` + +**No token yet?** Use magic auth (no browser needed): + +```bash +jack login --email you@example.com +``` + +Check your email for a 6-digit code, then: + +```bash +jack login --email you@example.com --code 847291 +``` + +The CLI is now authenticated and a `jkt_*` API token is created automatically. See the [AI agents guide](/guides/ai-agents#authentication) for the full breakdown. ## CLI or MCP — Both Work diff --git a/docs/pages/guides/troubleshooting.mdx b/docs/pages/guides/troubleshooting.mdx index 2820d5b..84323f0 100644 --- a/docs/pages/guides/troubleshooting.mdx +++ b/docs/pages/guides/troubleshooting.mdx @@ -17,18 +17,37 @@ jack ship ### "Authentication required" -Your Cloudflare session expired or was never set up. +Your session expired or was never set up. **Fix:** Run the login flow: ```bash -# For managed mode (jack cloud) +# Interactive (opens browser) jack login +# Headless / agent (email verification, no browser) +jack login --email you@example.com +# then: jack login --email you@example.com --code <6-digit-code> + +# Or set an API token directly +export JACK_API_TOKEN=jkt_abc123... + # For BYO mode (your Cloudflare account) wrangler login ``` +### "Cannot open browser in this environment" + +You ran `jack login` in a non-interactive environment (CI, Docker, SSH, agent) without specifying an email. + +**Fix:** Use the headless login flow: + +```bash +jack login --email you@example.com +``` + +Or set `JACK_API_TOKEN` if you already have a token. + ### "Worker not found" or "404 after deploy" The worker exists but isn't responding at the expected URL. diff --git a/scripts/benchmark-ask-project.sh b/scripts/benchmark-ask-project.sh new file mode 100755 index 0000000..9a4247c --- /dev/null +++ b/scripts/benchmark-ask-project.sh @@ -0,0 +1,159 @@ +#!/usr/bin/env bash +set -euo pipefail + +if ! command -v curl >/dev/null 2>&1; then + echo "error: curl is required" >&2 + exit 1 +fi + +if ! command -v jq >/dev/null 2>&1; then + echo "error: jq is required" >&2 + exit 1 +fi + +CONTROL_BASE="${CONTROL_BASE:-https://control.getjack.org}" +JACK_API_TOKEN="${JACK_API_TOKEN:-}" +PROJECT_ID="${PROJECT_ID:-}" +PROJECT_SLUG="${PROJECT_SLUG:-}" +BENCHMARK_FILE="${BENCHMARK_FILE:-scripts/fixtures/ask-project-benchmark.json}" +MIN_USEFUL_RATE="${MIN_USEFUL_RATE:-75}" + +if [[ -z "$JACK_API_TOKEN" ]]; then + echo "error: set JACK_API_TOKEN" >&2 + exit 1 +fi + +if [[ ! -f "$BENCHMARK_FILE" ]]; then + echo "error: benchmark file not found: $BENCHMARK_FILE" >&2 + exit 1 +fi + +auth_header=("Authorization: Bearer $JACK_API_TOKEN") +json_header=("Content-Type: application/json") + +resolve_project_id() { + if [[ -n "$PROJECT_ID" ]]; then + echo "$PROJECT_ID" + return 0 + fi + + if [[ -n "$PROJECT_SLUG" ]]; then + local body + body="$(curl -sS \ + -H "${auth_header[0]}" \ + "$CONTROL_BASE/v1/projects/by-slug/$PROJECT_SLUG")" + echo "$body" | jq -r '.project.id // empty' + return 0 + fi + + local body + body="$(curl -sS -H "${auth_header[0]}" "$CONTROL_BASE/v1/projects")" + echo "$body" | jq -r '.projects[0].id // empty' +} + +PID="$(resolve_project_id)" +if [[ -z "$PID" ]]; then + echo "error: could not resolve PROJECT_ID (set PROJECT_ID or PROJECT_SLUG)" >&2 + exit 1 +fi + +echo "Using project: $PID" +echo "Benchmark file: $BENCHMARK_FILE" + +total="$(jq 'length' "$BENCHMARK_FILE")" +if [[ "$total" -eq 0 ]]; then + echo "error: benchmark file is empty" >&2 + exit 1 +fi + +pass=0 +fail=0 +unknown=0 +idx=0 + +while IFS= read -r row; do + idx=$((idx + 1)) + name="$(echo "$row" | jq -r '.name')" + question="$(echo "$row" | jq -r '.question')" + min_evidence="$(echo "$row" | jq -r '.min_evidence // 1')" + expect_contains="$(echo "$row" | jq -r '.expect_answer_contains // ""')" + hints="$(echo "$row" | jq -c '.hints // {}')" + + payload="$(jq -n \ + --arg q "$question" \ + --argjson h "$hints" \ + '{question:$q,hints:$h}')" + + tmp_body="$(mktemp)" + http_code="$( + curl -sS \ + -o "$tmp_body" \ + -w "%{http_code}" \ + -H "${auth_header[0]}" \ + -H "${json_header[0]}" \ + -X POST "$CONTROL_BASE/v1/projects/$PID/ask" \ + -d "$payload" + )" + + if [[ "$http_code" == "404" ]]; then + echo "[$idx/$total] $name -> SKIP (endpoint missing)" + rm -f "$tmp_body" + unknown=$((unknown + 1)) + continue + fi + + if [[ "$http_code" -lt 200 || "$http_code" -ge 300 ]]; then + echo "[$idx/$total] $name -> FAIL (HTTP $http_code)" + fail=$((fail + 1)) + rm -f "$tmp_body" + continue + fi + + ok_shape=0 + if jq -e ' + (.answer | type == "string" and length > 0) and + (.evidence | type == "array") + ' "$tmp_body" >/dev/null; then + ok_shape=1 + fi + + evidence_count="$(jq '.evidence | length' "$tmp_body")" + ok_evidence=0 + if [[ "$evidence_count" -ge "$min_evidence" ]]; then + ok_evidence=1 + fi + + ok_text=1 + if [[ -n "$expect_contains" ]]; then + if ! jq -e --arg s "$expect_contains" '.answer | ascii_downcase | contains($s | ascii_downcase)' "$tmp_body" >/dev/null; then + ok_text=0 + fi + fi + + if [[ "$ok_shape" -eq 1 && "$ok_evidence" -eq 1 && "$ok_text" -eq 1 ]]; then + echo "[$idx/$total] $name -> PASS" + pass=$((pass + 1)) + else + echo "[$idx/$total] $name -> FAIL (shape=$ok_shape evidence=$evidence_count text=$ok_text)" + fail=$((fail + 1)) + fi + + rm -f "$tmp_body" +done < <(jq -c '.[]' "$BENCHMARK_FILE") + +echo +echo "Summary: pass=$pass fail=$fail unknown=$unknown total=$total" +useful_rate="$(awk -v p="$pass" -v t="$total" 'BEGIN { if (t==0) print 0; else printf "%.2f", (p*100.0)/t }')" +echo "Useful rate (raw pass/total): ${useful_rate}%" + +if [[ "$unknown" -eq "$total" ]]; then + echo "No benchmark cases executed because endpoint is unavailable." + exit 2 +fi + +if awk -v r="$useful_rate" -v m="$MIN_USEFUL_RATE" 'BEGIN { exit !(r < m) }'; then + echo "FAIL: useful rate ${useful_rate}% is below MIN_USEFUL_RATE=${MIN_USEFUL_RATE}%" + exit 1 +fi + +echo "PASS: useful rate ${useful_rate}% meets MIN_USEFUL_RATE=${MIN_USEFUL_RATE}%" diff --git a/scripts/fixtures/ask-project-benchmark.json b/scripts/fixtures/ask-project-benchmark.json new file mode 100644 index 0000000..97ec2f5 --- /dev/null +++ b/scripts/fixtures/ask-project-benchmark.json @@ -0,0 +1,35 @@ +[ + { + "name": "Runtime 500 diagnosis", + "question": "Why is /api/todos returning 500 right now?", + "hints": { "endpoint": "/api/todos", "method": "GET" }, + "min_evidence": 1 + }, + { + "name": "Recent change impact", + "question": "What changed recently that likely caused this issue?", + "min_evidence": 2 + }, + { + "name": "Deployment rationale", + "question": "Why did we ship the latest deployment?", + "min_evidence": 1 + }, + { + "name": "Code to prod mapping", + "question": "Which code areas are most likely tied to this endpoint behavior?", + "hints": { "endpoint": "/api/todos", "method": "GET" }, + "min_evidence": 1 + }, + { + "name": "Config mismatch probe", + "question": "Do runtime bindings and schema look mismatched with this endpoint?", + "hints": { "endpoint": "/api/todos", "method": "GET" }, + "min_evidence": 1 + }, + { + "name": "Next best check", + "question": "What should I check next to confirm root cause?", + "min_evidence": 1 + } +] diff --git a/scripts/test-ask-project-e2e.sh b/scripts/test-ask-project-e2e.sh new file mode 100755 index 0000000..28af5b5 --- /dev/null +++ b/scripts/test-ask-project-e2e.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash +set -euo pipefail + +if ! command -v curl >/dev/null 2>&1; then + echo "error: curl is required" >&2 + exit 1 +fi + +if ! command -v jq >/dev/null 2>&1; then + echo "error: jq is required" >&2 + exit 1 +fi + +CONTROL_BASE="${CONTROL_BASE:-https://control.getjack.org}" +JACK_API_TOKEN="${JACK_API_TOKEN:-}" +PROJECT_ID="${PROJECT_ID:-}" +PROJECT_SLUG="${PROJECT_SLUG:-}" +QUESTION="${QUESTION:-Why is /api/todos returning 500?}" + +if [[ -z "$JACK_API_TOKEN" ]]; then + echo "error: set JACK_API_TOKEN" >&2 + exit 1 +fi + +auth_header=("Authorization: Bearer $JACK_API_TOKEN") +json_header=("Content-Type: application/json") + +resolve_project_id() { + if [[ -n "$PROJECT_ID" ]]; then + echo "$PROJECT_ID" + return 0 + fi + + if [[ -n "$PROJECT_SLUG" ]]; then + local body + body="$(curl -sS \ + -H "${auth_header[0]}" \ + "$CONTROL_BASE/v1/projects/by-slug/$PROJECT_SLUG")" + echo "$body" | jq -r '.project.id // empty' + return 0 + fi + + local body + body="$(curl -sS -H "${auth_header[0]}" "$CONTROL_BASE/v1/projects")" + echo "$body" | jq -r '.projects[0].id // empty' +} + +PID="$(resolve_project_id)" +if [[ -z "$PID" ]]; then + echo "error: could not resolve PROJECT_ID (set PROJECT_ID or PROJECT_SLUG)" >&2 + exit 1 +fi + +echo "Using project: $PID" + +payload="$(jq -n --arg q "$QUESTION" '{ + question: $q, + hints: { endpoint: "/api/todos", method: "GET" } +}')" + +tmp_body="$(mktemp)" +http_code="$( + curl -sS \ + -o "$tmp_body" \ + -w "%{http_code}" \ + -H "${auth_header[0]}" \ + -H "${json_header[0]}" \ + -X POST "$CONTROL_BASE/v1/projects/$PID/ask" \ + -d "$payload" +)" + +if [[ "$http_code" == "404" ]]; then + echo "ask endpoint not found (not implemented/deployed on target env yet)." >&2 + cat "$tmp_body" >&2 || true + rm -f "$tmp_body" + exit 2 +fi + +if [[ "$http_code" -lt 200 || "$http_code" -ge 300 ]]; then + echo "error: ask endpoint returned HTTP $http_code" >&2 + cat "$tmp_body" >&2 || true + rm -f "$tmp_body" + exit 1 +fi + +jq . "$tmp_body" + +jq -e ' + (.answer | type == "string" and length > 0) and + (.evidence | type == "array" and length > 0) +' "$tmp_body" >/dev/null + +jq -e ' + [.evidence[] | + (has("id") and has("type") and has("source") and has("summary") and has("timestamp") and has("relation")) + ] | all +' "$tmp_body" >/dev/null + +jq -e ' + [.evidence[] | (.relation == "supports" or .relation == "conflicts" or .relation == "gap")] | all +' "$tmp_body" >/dev/null + +echo "PASS: ask_project response contract is valid" +rm -f "$tmp_body"