diff --git a/CLAUDE.md b/CLAUDE.md index cb293c0..a8ed64e 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -132,6 +132,86 @@ STAGING_CLOUDFLARE_PAGES_PROJECT_NAME # Staging Pages project name - **Security audit logging**: All auth events logged to `security_audit_log` table - **Rate limiting**: Cloudflare Workers rate limit API per plan tier +## AI Features Configuration + +TuvixRSS includes optional AI-powered features using OpenAI and the Vercel AI SDK. + +### Features + +- **AI Category Suggestions**: Automatically suggests feed categories based on feed metadata and recent articles +- **Model**: GPT-4o-mini (via `@ai-sdk/openai`) +- **Location**: `packages/api/src/services/ai-category-suggester.ts` + +### Feature Access Control + +AI features are **triple-gated** for security and cost control: + +1. **Global Setting**: `aiEnabled` flag in `global_settings` table (admin-controlled via admin dashboard) +2. **User Plan**: Only Pro or Enterprise plan users have access +3. **Environment**: `OPENAI_API_KEY` must be configured + +Access check: `packages/api/src/services/limits.ts:checkAiFeatureAccess()` + +### Configuration + +**Local Development (Docker/Node.js):** + +```env +# Add to .env +OPENAI_API_KEY=sk-proj-xxxxxxxxxxxxx +``` + +**Cloudflare Workers (Production/Staging):** + +```bash +# Use wrangler CLI to set secret +cd packages/api +npx wrangler secret put OPENAI_API_KEY +# Enter: sk-proj-xxxxxxxxxxxxx +``` + +**GitHub Actions (CI/CD):** +Add `OPENAI_API_KEY` to repository secrets for production deployments. + +### Sentry Instrumentation + +AI calls are automatically tracked by Sentry via the `vercelAIIntegration`: + +- **Token usage**: Tracked automatically by AI SDK telemetry +- **Latency**: Per-call duration metrics +- **Model info**: Model name and version +- **Errors**: AI SDK errors and failures +- **Input/Output**: Captured when `experimental_telemetry.recordInputs/recordOutputs` is enabled + +**Configuration:** + +- Node.js: `packages/api/src/entries/node.ts` (Sentry.init with vercelAIIntegration) +- Cloudflare: `packages/api/src/entries/cloudflare.ts` (withSentry config) +- AI calls: Include `experimental_telemetry` with `functionId` for better tracking + +**Example:** + +```typescript +const result = await generateObject({ + model: openai("gpt-4o-mini"), + // ... schema and prompts + experimental_telemetry: { + isEnabled: true, + functionId: "ai.suggestCategories", + recordInputs: true, + recordOutputs: true, + }, +}); +``` + +### Best Practices + +1. **Always check access**: Use `checkAiFeatureAccess()` before calling AI services +2. **Graceful degradation**: Return `undefined` if AI is unavailable (don't error) +3. **Add telemetry**: Include `experimental_telemetry` in all AI SDK calls +4. **Function IDs**: Use descriptive `functionId` for easier tracking in Sentry +5. **Cost awareness**: AI features are gated to Pro/Enterprise to manage costs + ## Observability with Sentry TuvixRSS uses Sentry for comprehensive observability: error tracking, performance monitoring, and custom metrics. diff --git a/docs/deployment.md b/docs/deployment.md index 1babd7b..09ad18c 100644 --- a/docs/deployment.md +++ b/docs/deployment.md @@ -295,6 +295,11 @@ ADMIN_PASSWORD= # RESEND_API_KEY=re_xxxxxxxxx # EMAIL_FROM=noreply@yourdomain.com +# Optional: AI Features (requires Pro or Enterprise plan) +# OpenAI API key for AI-powered category suggestions +# Get your API key from: https://platform.openai.com/api-keys +# OPENAI_API_KEY=sk-proj-xxxxxxxxxxxxx + # Optional: Customize fetch behavior FETCH_INTERVAL_MINUTES=60 # How often to fetch RSS feeds ``` @@ -662,6 +667,12 @@ npx wrangler secret put RESEND_API_KEY npx wrangler secret put EMAIL_FROM npx wrangler secret put BASE_URL +# AI Features (requires Pro or Enterprise plan) +# OpenAI API key for AI-powered category suggestions +# Get your API key from: https://platform.openai.com/api-keys +npx wrangler secret put OPENAI_API_KEY +# Enter: sk-proj-xxxxxxxxxxxxx + # Cross-subdomain cookies (if frontend/API on different subdomains) npx wrangler secret put COOKIE_DOMAIN # Enter: example.com (root domain, not subdomain like api.example.com) diff --git a/env.example b/env.example index 156f0cf..ade79b3 100644 --- a/env.example +++ b/env.example @@ -75,6 +75,12 @@ ADMIN_PASSWORD=change-me-in-production # is more deterministic and avoids any timing concerns. # ALLOW_FIRST_USER_ADMIN=true +# AI Features (optional - requires Pro or Enterprise plan) +# OpenAI API key for AI-powered category suggestions +# Get your API key from: https://platform.openai.com/api-keys +# Leave unset to disable AI features +# OPENAI_API_KEY=sk-proj-xxxxxxxxxxxxx + # Sentry Configuration (optional) # Backend Sentry DSN (for Express/Cloudflare Workers) # SENTRY_DSN=https://xxx@xxx.ingest.sentry.io/xxx diff --git a/packages/api/src/config/sentry.ts b/packages/api/src/config/sentry.ts index cc80871..7cc9720 100644 --- a/packages/api/src/config/sentry.ts +++ b/packages/api/src/config/sentry.ts @@ -76,6 +76,12 @@ export function getSentryConfig(env: Env): Record | null { // Debug mode (verbose logging - useful for development) debug: environment === "development", + // Vercel AI SDK integration for automatic AI span tracking + // Captures token usage, model info, latency, and errors from AI SDK calls + // Note: Integration setup is handled differently for Cloudflare Workers vs Node.js + // For Cloudflare, integrations are configured in the entry point via withSentry + enableAIIntegration: true, + /** * beforeSendMetric callback * diff --git a/packages/api/src/entries/cloudflare.ts b/packages/api/src/entries/cloudflare.ts index 12038e5..5646ef0 100644 --- a/packages/api/src/entries/cloudflare.ts +++ b/packages/api/src/entries/cloudflare.ts @@ -116,6 +116,15 @@ export default Sentry.withSentry((env: Env) => { config.release = versionId; } + // Add Vercel AI SDK integration for automatic AI span tracking + // Captures token usage, model info, latency, and errors from AI SDK calls + // Note: Input/output recording is controlled via experimental_telemetry in AI SDK calls + // Type assertion needed since getSentryConfig returns Record + const existingIntegrations = Array.isArray(config.integrations) + ? (config.integrations as unknown[]) + : []; + config.integrations = [...existingIntegrations, Sentry.vercelAIIntegration()]; + // Log Sentry initialization in development const environment = (env.SENTRY_ENVIRONMENT || env.NODE_ENV || @@ -125,6 +134,7 @@ export default Sentry.withSentry((env: Env) => { environment, release: config.release, hasDsn: !!config.dsn, + aiTracking: true, }); } diff --git a/packages/api/src/entries/node.ts b/packages/api/src/entries/node.ts index f4d0dab..ab672bf 100644 --- a/packages/api/src/entries/node.ts +++ b/packages/api/src/entries/node.ts @@ -62,9 +62,15 @@ if (env.SENTRY_DSN) { ignoreIncomingRequestBody: (url) => url.includes("/trpc"), }), Sentry.nativeNodeFetchIntegration(), + // Vercel AI SDK integration for automatic AI span tracking + // Captures token usage, model info, latency, and errors from AI SDK calls + Sentry.vercelAIIntegration({ + recordInputs: true, // Safe: only used for pro/enterprise users with opt-in + recordOutputs: true, // Captures structured category suggestions + }), ], }); - console.log("✅ Sentry initialized (with metrics enabled)"); + console.log("✅ Sentry initialized (with metrics and AI tracking enabled)"); } } diff --git a/packages/api/src/services/ai-category-suggester.ts b/packages/api/src/services/ai-category-suggester.ts index b4de512..db9e203 100644 --- a/packages/api/src/services/ai-category-suggester.ts +++ b/packages/api/src/services/ai-category-suggester.ts @@ -117,6 +117,14 @@ INSTRUCTIONS: prompt: "Based on the provided context, suggest relevant categories for this RSS feed.", system: systemPrompt, + // Enable Sentry AI SDK telemetry for automatic span tracking + // Captures token usage, model info, latency, and errors + experimental_telemetry: { + isEnabled: true, + functionId: "ai.suggestCategories", + recordInputs: true, // Safe: only captures feed metadata, not user PII + recordOutputs: true, // Captures structured category suggestions + }, }); // Filter by confidence threshold (85%) diff --git a/packages/app/src/__tests__/routes/app-admin-route-offline.test.tsx b/packages/app/src/__tests__/routes/app-admin-route-offline.test.tsx index e200e17..26feed7 100644 --- a/packages/app/src/__tests__/routes/app-admin-route-offline.test.tsx +++ b/packages/app/src/__tests__/routes/app-admin-route-offline.test.tsx @@ -43,7 +43,9 @@ const routeModule = await import("../../routes/app/admin/route"); describe("Admin Route - Offline Navigation", () => { beforeEach(() => { vi.clearAllMocks(); - localStorage.clear(); + if (typeof localStorage.clear === "function") { + localStorage.clear(); + } }); describe("network error handling", () => { diff --git a/packages/app/src/__tests__/routes/app-route-offline.test.tsx b/packages/app/src/__tests__/routes/app-route-offline.test.tsx index a456648..a0e55b5 100644 --- a/packages/app/src/__tests__/routes/app-route-offline.test.tsx +++ b/packages/app/src/__tests__/routes/app-route-offline.test.tsx @@ -107,7 +107,9 @@ const routeModule = await import("../../routes/app/route"); describe("App Route - Offline Navigation", () => { beforeEach(() => { vi.clearAllMocks(); - localStorage.clear(); + if (typeof localStorage.clear === "function") { + localStorage.clear(); + } // Reset mocks mockCheckVerificationStatus.mockResolvedValue({ diff --git a/packages/app/src/components/settings/__tests__/pwa-install-card.integration.test.tsx b/packages/app/src/components/settings/__tests__/pwa-install-card.integration.test.tsx index 7e10901..0bf0a63 100644 --- a/packages/app/src/components/settings/__tests__/pwa-install-card.integration.test.tsx +++ b/packages/app/src/components/settings/__tests__/pwa-install-card.integration.test.tsx @@ -156,7 +156,11 @@ describe("PWAInstallCard Integration Tests", () => { }); // Verify success toast was shown - expect(toast.success).toHaveBeenCalledWith("App installed successfully!"); + await waitFor(() => { + expect(toast.success).toHaveBeenCalledWith( + "App installed successfully!" + ); + }); // Verify installed state UI expect(