From 0905fbde84a7c6ffb1447954f9e19e14191e4de8 Mon Sep 17 00:00:00 2001 From: cevheri Date: Tue, 3 Mar 2026 23:51:06 +0300 Subject: [PATCH 01/20] feat(storage): implement pluggable storage layer with SQLite and PostgreSQL support, including migration from localStorage --- .env.example | 18 + CLAUDE.md | 28 +- Dockerfile | 10 +- bun.lock | 66 ++ docker-compose.yml | 8 + docs/ARCHITECTURE.md | 23 +- docs/STORAGE_ARCHITECTURE.md | 564 ++++++++++++++++++ docs/STORAGE_QUICK_SETUP.md | 254 ++++++++ package.json | 2 + src/app/api/storage/[collection]/route.ts | 47 ++ src/app/api/storage/config/route.ts | 12 + src/app/api/storage/migrate/route.ts | 32 + src/app/api/storage/route.ts | 27 + src/components/DataCharts.tsx | 32 +- src/components/Studio.tsx | 4 + src/components/admin/tabs/SecurityTab.tsx | 22 +- src/components/studio/BottomPanel.tsx | 7 +- src/hooks/use-storage-sync.ts | 235 ++++++++ src/lib/audit.ts | 21 +- src/lib/data-masking.ts | 30 +- src/lib/storage.ts | 191 ------ src/lib/storage/factory.ts | 84 +++ src/lib/storage/index.ts | 14 + src/lib/storage/local-storage.ts | 76 +++ src/lib/storage/providers/postgres.ts | 153 +++++ src/lib/storage/providers/sqlite.ts | 146 +++++ src/lib/storage/storage-facade.ts | 272 +++++++++ src/lib/storage/types.ts | 75 +++ src/proxy.ts | 7 +- tests/api/storage/config.test.ts | 42 ++ tests/api/storage/storage-routes.test.ts | 159 +++++ tests/components/DataCharts.test.tsx | 27 + tests/run-components.sh | 15 +- tests/unit/lib/storage/factory.test.ts | 71 +++ tests/unit/lib/storage/local-storage.test.ts | 70 +++ .../lib/storage/providers/postgres.test.ts | 134 +++++ .../unit/lib/storage/providers/sqlite.test.ts | 140 +++++ tests/unit/lib/storage/storage-facade.test.ts | 184 ++++++ 38 files changed, 3026 insertions(+), 276 deletions(-) create mode 100644 docs/STORAGE_ARCHITECTURE.md create mode 100644 docs/STORAGE_QUICK_SETUP.md create mode 100644 src/app/api/storage/[collection]/route.ts create mode 100644 src/app/api/storage/config/route.ts create mode 100644 src/app/api/storage/migrate/route.ts create mode 100644 src/app/api/storage/route.ts create mode 100644 src/hooks/use-storage-sync.ts delete mode 100644 src/lib/storage.ts create mode 100644 src/lib/storage/factory.ts create mode 100644 src/lib/storage/index.ts create mode 100644 src/lib/storage/local-storage.ts create mode 100644 src/lib/storage/providers/postgres.ts create mode 100644 src/lib/storage/providers/sqlite.ts create mode 100644 src/lib/storage/storage-facade.ts create mode 100644 src/lib/storage/types.ts create mode 100644 tests/api/storage/config.test.ts create mode 100644 tests/api/storage/storage-routes.test.ts create mode 100644 tests/unit/lib/storage/factory.test.ts create mode 100644 tests/unit/lib/storage/local-storage.test.ts create mode 100644 tests/unit/lib/storage/providers/postgres.test.ts create mode 100644 tests/unit/lib/storage/providers/sqlite.test.ts create mode 100644 tests/unit/lib/storage/storage-facade.test.ts diff --git a/.env.example b/.env.example index b580f2d..82953e3 100644 --- a/.env.example +++ b/.env.example @@ -57,6 +57,24 @@ NEXT_PUBLIC_AUTH_PROVIDER=local # Okta: OIDC_ROLE_CLAIM=groups # Azure AD: OIDC_ROLE_CLAIM=roles +# ============================================ +# STORAGE PROVIDER (Optional) +# ============================================ +# Controls where application data is persisted. +# "local" (default) = browser localStorage only (zero config, great for dev) +# "sqlite" = SQLite file on server (persistent, single-node) +# "postgres" = PostgreSQL database (persistent, multi-node, enterprise) +# +# Note: NOT prefixed with NEXT_PUBLIC_ — server-side only, discovered at runtime +# via GET /api/storage/config endpoint. +STORAGE_PROVIDER=local + +# SQLite storage path (required when STORAGE_PROVIDER=sqlite) +# STORAGE_SQLITE_PATH=./data/libredb-storage.db + +# PostgreSQL connection URL (required when STORAGE_PROVIDER=postgres) +# STORAGE_POSTGRES_URL=postgresql://user:pass@host:5432/libredb + # =========================================== # LLM Configuration (Strategy Pattern) # =========================================== diff --git a/CLAUDE.md b/CLAUDE.md index 033f599..1f76736 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -6,6 +6,11 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co LibreDB Studio is a web-based SQL IDE for cloud-native teams. It supports PostgreSQL, MySQL, SQLite, Oracle, SQL Server, MongoDB, Redis, and a demo mode with AI-powered query assistance. +## Github +* Repository: https://github.com/libredb/libredb-studio +* Container Registry: https://github.com/libredb/libredb-studio/pkgs/container/libredb-studio +* Docker Image: ghcr.io/libredb/libredb-studio:latest + ## Development Commands ```bash @@ -58,6 +63,7 @@ The project uses ESLint 9 for linting and `bun:test` for testing with `@testing- - **AI:** Multi-model support (Gemini, OpenAI, Ollama, Custom) - **Databases:** PostgreSQL (`pg`), MySQL (`mysql2`), SQLite (`better-sqlite3`), Oracle (`oracledb`), SQL Server (`mssql`), MongoDB (`mongodb`), Redis (`ioredis`) - **Auth:** JWT-based with `jose` library + OIDC SSO with `openid-client` (Auth0, Keycloak, Okta, Azure AD) +- **Storage:** Pluggable storage layer — localStorage (default), SQLite (`better-sqlite3`), or PostgreSQL (`pg`) ### Directory Structure @@ -69,6 +75,7 @@ src/ │ │ │ └── oidc/ # OIDC login + callback routes (PKCE, code exchange) │ │ ├── ai/ # AI endpoints (chat, nl2sql, explain, safety) │ │ ├── db/ # Query, schema, health, maintenance, transactions +│ │ ├── storage/ # Storage sync API (config, CRUD, migrate) │ │ └── admin/ # Fleet health, audit endpoints │ ├── admin/ # Admin dashboard (RBAC protected) │ └── login/ # Login page @@ -83,6 +90,15 @@ src/ │ └── ui/ # Shadcn/UI primitives ├── hooks/ # Custom React hooks └── lib/ + ├── storage/ # Storage abstraction layer + │ ├── index.ts # Barrel export + │ ├── types.ts # StorageData, ServerStorageProvider interfaces + │ ├── storage-facade.ts # Public sync API + CustomEvent dispatch + │ ├── local-storage.ts # Pure localStorage CRUD + │ ├── factory.ts # Env-based provider factory (singleton) + │ └── providers/ + │ ├── sqlite.ts # better-sqlite3 backend + │ └── postgres.ts # pg backend ├── db/ # Database provider module (Strategy Pattern) │ ├── providers/ │ │ ├── sql/ # SQL providers (postgres, mysql, sqlite, oracle, mssql) @@ -133,7 +149,12 @@ e2e/ # Playwright E2E tests (browser) 4. **API Routes:** All backend logic in `src/app/api/`. Protected routes require valid JWT. Public routes: `/login`, `/api/auth`, `/api/db/health` -5. **Client State:** LocalStorage for connections, query history, and saved queries (`src/lib/storage.ts`) +5. **Storage Abstraction:** `src/lib/storage/` module provides pluggable persistence: + - **Local** (default): Browser localStorage, zero config + - **SQLite**: `better-sqlite3` file DB for single-node persistent storage + - **PostgreSQL**: `pg` for multi-node enterprise storage + - Write-through cache: localStorage always serves reads; `useStorageSync` hook pushes mutations to server (debounced) + - Controlled by `STORAGE_PROVIDER` env var (server-side only, discovered at runtime via `/api/storage/config`) 6. **Multi-Tab Workspace:** Each query tab has independent state (query, results, execution status) @@ -164,6 +185,11 @@ LLM_PROVIDER=gemini # gemini, openai, ollama, custom LLM_API_KEY= LLM_MODEL=gemini-2.5-flash LLM_API_URL= # For ollama/custom providers + +# Optional storage config (server-side only, not NEXT_PUBLIC_) +STORAGE_PROVIDER=local # local (default) | sqlite | postgres +STORAGE_SQLITE_PATH=./data/libredb-storage.db # SQLite file path +STORAGE_POSTGRES_URL=postgresql://... # PostgreSQL connection URL ``` ### Path Aliases diff --git a/Dockerfile b/Dockerfile index 4cd08df..26b7514 100644 --- a/Dockerfile +++ b/Dockerfile @@ -49,14 +49,20 @@ ENV NODE_OPTIONS="--max-old-space-size=384" COPY --from=builder /usr/src/app/public ./public -# Set the correct permission for prerender cache -RUN mkdir -p .next +# Set the correct permission for prerender cache and storage +RUN mkdir -p .next data # Automatically leverage output traces to reduce image size # https://nextjs.org/docs/advanced-features/output-file-tracing COPY --from=builder /usr/src/app/.next/standalone ./ COPY --from=builder /usr/src/app/.next/static ./.next/static +# Copy better-sqlite3 native binding for server storage support +COPY --from=builder /usr/src/app/node_modules/better-sqlite3 ./node_modules/better-sqlite3 +COPY --from=builder /usr/src/app/node_modules/bindings ./node_modules/bindings +COPY --from=builder /usr/src/app/node_modules/file-uri-to-path ./node_modules/file-uri-to-path +COPY --from=builder /usr/src/app/node_modules/prebuild-install ./node_modules/prebuild-install 2>/dev/null || true + # Create non-root user for security RUN addgroup --system --gid 1001 nodejs && \ adduser --system --uid 1001 nextjs && \ diff --git a/bun.lock b/bun.lock index 6b5d1e2..2045ca7 100644 --- a/bun.lock +++ b/bun.lock @@ -37,6 +37,7 @@ "@tanstack/react-table": "^8.21.3", "@tanstack/react-virtual": "^3.13.13", "@xyflow/react": "^12.10.0", + "better-sqlite3": "^12.6.2", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "cmdk": "^1.1.1", @@ -78,6 +79,7 @@ "@testing-library/jest-dom": "^6.9.1", "@testing-library/react": "^16.3.2", "@testing-library/user-event": "^14.6.1", + "@types/better-sqlite3": "^7.6.13", "@types/bun": "latest", "@types/node": "^20", "@types/pg": "^8.16.0", @@ -480,6 +482,8 @@ "@types/aria-query": ["@types/aria-query@5.0.4", "", {}, "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw=="], + "@types/better-sqlite3": ["@types/better-sqlite3@7.6.13", "", { "dependencies": { "@types/node": "*" } }, "sha512-NMv9ASNARoKksWtsq/SHakpYAYnhBrQgGD8zkLYk/jaK8jUGn08CfEdTRgYhMypUQAfzSP8W6gNLe0q19/t4VA=="], + "@types/bun": ["@types/bun@1.3.5", "", { "dependencies": { "bun-types": "1.3.5" } }, "sha512-RnygCqNrd3srIPEWBd5LFeUYG7plCoH2Yw9WaZGyNmdTEei+gWaHqydbaIRkIkcbXwhBT94q78QljxN0Sk838w=="], "@types/d3-array": ["@types/d3-array@3.2.2", "", {}, "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw=="], @@ -660,6 +664,10 @@ "bcrypt-pbkdf": ["bcrypt-pbkdf@1.0.2", "", { "dependencies": { "tweetnacl": "^0.14.3" } }, "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w=="], + "better-sqlite3": ["better-sqlite3@12.6.2", "", { "dependencies": { "bindings": "^1.5.0", "prebuild-install": "^7.1.1" } }, "sha512-8VYKM3MjCa9WcaSAI3hzwhmyHVlH8tiGFwf0RlTsZPWJ1I5MkzjiudCo4KC4DxOaL/53A5B1sI/IbldNFDbsKA=="], + + "bindings": ["bindings@1.5.0", "", { "dependencies": { "file-uri-to-path": "1.0.0" } }, "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ=="], + "bl": ["bl@6.1.6", "", { "dependencies": { "@types/readable-stream": "^4.0.0", "buffer": "^6.0.3", "inherits": "^2.0.4", "readable-stream": "^4.2.0" } }, "sha512-jLsPgN/YSvPUg9UX0Kd73CXpm2Psg9FxMeCSXnk3WBO3CMT10JMwijubhGfHCnFu6TPn1ei3b975dxv7K2pWVg=="], "brace-expansion": ["brace-expansion@1.1.12", "", { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg=="], @@ -692,6 +700,8 @@ "chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], + "chownr": ["chownr@1.1.4", "", {}, "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="], + "class-variance-authority": ["class-variance-authority@0.7.1", "", { "dependencies": { "clsx": "^2.1.1" } }, "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg=="], "classcat": ["classcat@5.0.5", "", {}, "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w=="], @@ -772,6 +782,10 @@ "decimal.js-light": ["decimal.js-light@2.5.1", "", {}, "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg=="], + "decompress-response": ["decompress-response@6.0.0", "", { "dependencies": { "mimic-response": "^3.1.0" } }, "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ=="], + + "deep-extend": ["deep-extend@0.6.0", "", {}, "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA=="], + "deep-is": ["deep-is@0.1.4", "", {}, "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="], "default-browser": ["default-browser@5.5.0", "", { "dependencies": { "bundle-name": "^4.1.0", "default-browser-id": "^5.0.0" } }, "sha512-H9LMLr5zwIbSxrmvikGuI/5KGhZ8E2zH3stkMgM5LpOWDutGM2JZaj460Udnf1a+946zc7YBgrqEWwbk7zHvGw=="], @@ -818,6 +832,8 @@ "emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + "end-of-stream": ["end-of-stream@1.4.5", "", { "dependencies": { "once": "^1.4.0" } }, "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg=="], + "enhanced-resolve": ["enhanced-resolve@5.18.4", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" } }, "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q=="], "entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="], @@ -880,6 +896,8 @@ "events": ["events@3.3.0", "", {}, "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q=="], + "expand-template": ["expand-template@2.0.3", "", {}, "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg=="], + "fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="], "fast-equals": ["fast-equals@5.4.0", "", {}, "sha512-jt2DW/aNFNwke7AUd+Z+e6pz39KO5rzdbbFCg2sGafS4mk13MI7Z8O5z9cADNn5lhGODIgLwug6TZO2ctf7kcw=="], @@ -896,6 +914,8 @@ "file-entry-cache": ["file-entry-cache@8.0.0", "", { "dependencies": { "flat-cache": "^4.0.0" } }, "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ=="], + "file-uri-to-path": ["file-uri-to-path@1.0.0", "", {}, "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw=="], + "fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="], "find-up": ["find-up@5.0.0", "", { "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" } }, "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng=="], @@ -908,6 +928,8 @@ "framer-motion": ["framer-motion@12.23.26", "", { "dependencies": { "motion-dom": "^12.23.23", "motion-utils": "^12.23.6", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-cPcIhgR42xBn1Uj+PzOyheMtZ73H927+uWPDVhUMqxy8UHt6Okavb6xIz9J/phFUHUj0OncR6UvMfJTXoc/LKA=="], + "fs-constants": ["fs-constants@1.0.0", "", {}, "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow=="], + "fsevents": ["fsevents@2.3.2", "", { "os": "darwin" }, "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA=="], "function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="], @@ -932,6 +954,8 @@ "get-tsconfig": ["get-tsconfig@4.13.0", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ=="], + "github-from-package": ["github-from-package@0.0.0", "", {}, "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw=="], + "glob-parent": ["glob-parent@6.0.2", "", { "dependencies": { "is-glob": "^4.0.3" } }, "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A=="], "globals": ["globals@14.0.0", "", {}, "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ=="], @@ -982,6 +1006,8 @@ "inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="], + "ini": ["ini@1.3.8", "", {}, "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="], + "input-otp": ["input-otp@1.4.2", "", { "peerDependencies": { "react": "^16.8 || ^17.0 || ^18.0 || ^19.0.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0.0 || ^19.0.0-rc" } }, "sha512-l3jWwYNvrEa6NTCt7BECfCm48GvwuZzkoeG3gBL2w4CHeOXW3eKFmf9UNYkNfYc3mxMrthMnxjIE07MT0zLBQA=="], "internal-slot": ["internal-slot@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "hasown": "^2.0.2", "side-channel": "^1.1.0" } }, "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw=="], @@ -1164,12 +1190,16 @@ "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], + "mimic-response": ["mimic-response@3.1.0", "", {}, "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ=="], + "min-indent": ["min-indent@1.0.1", "", {}, "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg=="], "minimatch": ["minimatch@3.1.2", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw=="], "minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], + "mkdirp-classic": ["mkdirp-classic@0.5.3", "", {}, "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A=="], + "monaco-editor": ["monaco-editor@0.55.1", "", { "dependencies": { "dompurify": "3.2.7", "marked": "14.0.0" } }, "sha512-jz4x+TJNFHwHtwuV9vA9rMujcZRb0CEilTEwG2rRSpe/A7Jdkuj8xPKttCgOh+v/lkHy7HsZ64oj+q3xoAFl9A=="], "mongodb": ["mongodb@7.0.0", "", { "dependencies": { "@mongodb-js/saslprep": "^1.3.0", "bson": "^7.0.0", "mongodb-connection-string-url": "^7.0.0" }, "peerDependencies": { "@aws-sdk/credential-providers": "^3.806.0", "@mongodb-js/zstd": "^7.0.0", "gcp-metadata": "^7.0.1", "kerberos": "^7.0.0", "mongodb-client-encryption": ">=7.0.0 <7.1.0", "snappy": "^7.3.2", "socks": "^2.8.6" }, "optionalPeers": ["@aws-sdk/credential-providers", "@mongodb-js/zstd", "gcp-metadata", "kerberos", "mongodb-client-encryption", "snappy", "socks"] }, "sha512-vG/A5cQrvGGvZm2mTnCSz1LUcbOPl83hfB6bxULKQ8oFZauyox/2xbZOoGNl+64m8VBrETkdGCDBdOsCr3F3jg=="], @@ -1194,6 +1224,8 @@ "nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="], + "napi-build-utils": ["napi-build-utils@2.0.0", "", {}, "sha512-GEbrYkbfF7MoNaoh2iGG84Mnf/WZfB0GdGEsM8wz7Expx/LlWf5U8t9nvJKXSp3qr5IsEbK04cBGhol/KwOsWA=="], + "napi-postinstall": ["napi-postinstall@0.3.4", "", { "bin": { "napi-postinstall": "lib/cli.js" } }, "sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ=="], "native-duplexpair": ["native-duplexpair@1.0.0", "", {}, "sha512-E7QQoM+3jvNtlmyfqRZ0/U75VFgCls+fSkbml2MpgWkWyz3ox8Y58gNhfuziuQYGNNQAbFZJQck55LHCnCK6CA=="], @@ -1206,6 +1238,8 @@ "next-themes": ["next-themes@0.4.6", "", { "peerDependencies": { "react": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc", "react-dom": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc" } }, "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA=="], + "node-abi": ["node-abi@3.87.0", "", { "dependencies": { "semver": "^7.3.5" } }, "sha512-+CGM1L1CgmtheLcBuleyYOn7NWPVu0s0EJH2C4puxgEZb9h8QpR9G2dBfZJOAUhi7VQxuBPMd0hiISWcTyiYyQ=="], + "node-releases": ["node-releases@2.0.27", "", {}, "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA=="], "oauth4webapi": ["oauth4webapi@3.8.5", "", {}, "sha512-A8jmyUckVhRJj5lspguklcl90Ydqk61H3dcU0oLhH3Yv13KpAliKTt5hknpGGPZSSfOwGyraNEFmofDYH+1kSg=="], @@ -1226,6 +1260,8 @@ "object.values": ["object.values@1.2.1", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" } }, "sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA=="], + "once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="], + "open": ["open@10.2.0", "", { "dependencies": { "default-browser": "^5.2.1", "define-lazy-prop": "^3.0.0", "is-inside-container": "^1.0.0", "wsl-utils": "^0.1.0" } }, "sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA=="], "openid-client": ["openid-client@6.8.2", "", { "dependencies": { "jose": "^6.1.3", "oauth4webapi": "^3.8.4" } }, "sha512-uOvTCndr4udZsKihJ68H9bUICrriHdUVJ6Az+4Ns6cW55rwM5h0bjVIzDz2SxgOI84LKjFyjOFvERLzdTUROGA=="], @@ -1284,6 +1320,8 @@ "postgres-interval": ["postgres-interval@1.2.0", "", { "dependencies": { "xtend": "^4.0.0" } }, "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ=="], + "prebuild-install": ["prebuild-install@7.1.3", "", { "dependencies": { "detect-libc": "^2.0.0", "expand-template": "^2.0.3", "github-from-package": "0.0.0", "minimist": "^1.2.3", "mkdirp-classic": "^0.5.3", "napi-build-utils": "^2.0.0", "node-abi": "^3.3.0", "pump": "^3.0.0", "rc": "^1.2.7", "simple-get": "^4.0.0", "tar-fs": "^2.0.0", "tunnel-agent": "^0.6.0" }, "bin": { "prebuild-install": "bin.js" } }, "sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug=="], + "prelude-ls": ["prelude-ls@1.2.1", "", {}, "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g=="], "pretty-format": ["pretty-format@27.5.1", "", { "dependencies": { "ansi-regex": "^5.0.1", "ansi-styles": "^5.0.0", "react-is": "^17.0.1" } }, "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ=="], @@ -1292,6 +1330,8 @@ "prop-types": ["prop-types@15.8.1", "", { "dependencies": { "loose-envify": "^1.4.0", "object-assign": "^4.1.1", "react-is": "^16.13.1" } }, "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg=="], + "pump": ["pump@3.0.4", "", { "dependencies": { "end-of-stream": "^1.1.0", "once": "^1.3.1" } }, "sha512-VS7sjc6KR7e1ukRFhQSY5LM2uBWAUPiOPa/A3mkKmiMwSmRFUITt0xuj+/lesgnCv+dPIEYlkzrcyXgquIHMcA=="], + "punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="], "queue-microtask": ["queue-microtask@1.2.3", "", {}, "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="], @@ -1300,6 +1340,8 @@ "randexp": ["randexp@0.4.6", "", { "dependencies": { "discontinuous-range": "1.0.0", "ret": "~0.1.10" } }, "sha512-80WNmd9DA0tmZrw9qQa62GPPWfuXJknrmVmLcxvq4uZBdYqb1wYoKTmnlGUchvVWe0XiLupYkBoXVOxz3C8DYQ=="], + "rc": ["rc@1.2.8", "", { "dependencies": { "deep-extend": "^0.6.0", "ini": "~1.3.0", "minimist": "^1.2.0", "strip-json-comments": "~2.0.1" }, "bin": { "rc": "./cli.js" } }, "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw=="], + "react": ["react@19.2.4", "", {}, "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ=="], "react-day-picker": ["react-day-picker@9.13.0", "", { "dependencies": { "@date-fns/tz": "^1.4.1", "date-fns": "^4.1.0", "date-fns-jalali": "^4.1.0-0" }, "peerDependencies": { "react": ">=16.8.0" } }, "sha512-euzj5Hlq+lOHqI53NiuNhCP8HWgsPf/bBAVijR50hNaY1XwjKjShAnIe8jm8RD2W9IJUvihDIZ+KrmqfFzNhFQ=="], @@ -1388,6 +1430,10 @@ "side-channel-weakmap": ["side-channel-weakmap@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3", "side-channel-map": "^1.0.1" } }, "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A=="], + "simple-concat": ["simple-concat@1.0.1", "", {}, "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q=="], + + "simple-get": ["simple-get@4.0.1", "", { "dependencies": { "decompress-response": "^6.0.0", "once": "^1.3.1", "simple-concat": "^1.0.0" } }, "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA=="], + "sonner": ["sonner@2.0.7", "", { "peerDependencies": { "react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc" } }, "sha512-W6ZN4p58k8aDKA4XPcx2hpIQXBRAgyiWVkYhT7CvK6D3iAu7xjvVyhQHg2/iaKJZ1XVJ4r7XuwGL+WGEK37i9w=="], "source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="], @@ -1444,6 +1490,10 @@ "tapable": ["tapable@2.3.0", "", {}, "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg=="], + "tar-fs": ["tar-fs@2.1.4", "", { "dependencies": { "chownr": "^1.1.1", "mkdirp-classic": "^0.5.2", "pump": "^3.0.0", "tar-stream": "^2.1.4" } }, "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ=="], + + "tar-stream": ["tar-stream@2.2.0", "", { "dependencies": { "bl": "^4.0.3", "end-of-stream": "^1.4.1", "fs-constants": "^1.0.0", "inherits": "^2.0.3", "readable-stream": "^3.1.1" } }, "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ=="], + "tarn": ["tarn@3.0.2", "", {}, "sha512-51LAVKUSZSVfI05vjPESNc5vwqqZpbXCsU+/+wxlOrUjk2SnFTt97v9ZgQrD4YmxYW1Px6w2KjaDitCfkvgxMQ=="], "tedious": ["tedious@19.2.1", "", { "dependencies": { "@azure/core-auth": "^1.7.2", "@azure/identity": "^4.2.1", "@azure/keyvault-keys": "^4.4.0", "@js-joda/core": "^5.6.5", "@types/node": ">=18", "bl": "^6.1.4", "iconv-lite": "^0.7.0", "js-md4": "^0.3.2", "native-duplexpair": "^1.0.0", "sprintf-js": "^1.1.3" } }, "sha512-pk1Q16Yl62iocuQB+RWbg6rFUFkIyzqOFQ6NfysCltRvQqKwfurgj8v/f2X+CKvDhSL4IJ0cCOfCHDg9PWEEYA=="], @@ -1464,6 +1514,8 @@ "tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], + "tunnel-agent": ["tunnel-agent@0.6.0", "", { "dependencies": { "safe-buffer": "^5.0.1" } }, "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w=="], + "tw-animate-css": ["tw-animate-css@1.4.0", "", {}, "sha512-7bziOlRqH0hJx80h/3mbicLW7o8qLsH5+RaLR2t+OHM3D0JlWGODQKQ4cxbK7WlvmUxpcj6Kgu6EKqjrGFe3QQ=="], "tweetnacl": ["tweetnacl@0.14.5", "", {}, "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA=="], @@ -1498,6 +1550,8 @@ "use-sync-external-store": ["use-sync-external-store@1.6.0", "", { "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w=="], + "util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="], + "utrie": ["utrie@1.0.2", "", { "dependencies": { "base64-arraybuffer": "^1.0.2" } }, "sha512-1MLa5ouZiOmQzUbjbu9VmjLzn1QLXBhwpUa7kdLUQK+KQ5KA9I1vk5U4YHe/X2Ch7PYnJfWuWT+VbuxbGwljhw=="], "uuid": ["uuid@8.3.2", "", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg=="], @@ -1524,6 +1578,8 @@ "word-wrap": ["word-wrap@1.2.5", "", {}, "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA=="], + "wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="], + "ws": ["ws@8.19.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg=="], "wsl-utils": ["wsl-utils@0.1.0", "", { "dependencies": { "is-wsl": "^3.1.0" } }, "sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw=="], @@ -1626,20 +1682,30 @@ "next/postcss": ["postcss@8.4.31", "", { "dependencies": { "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" } }, "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ=="], + "node-abi/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], + "pretty-format/ansi-styles": ["ansi-styles@5.2.0", "", {}, "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA=="], "pretty-format/react-is": ["react-is@17.0.2", "", {}, "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w=="], "prop-types/react-is": ["react-is@16.13.1", "", {}, "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="], + "rc/strip-json-comments": ["strip-json-comments@2.0.1", "", {}, "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ=="], + "sharp/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], + "tar-stream/bl": ["bl@4.1.0", "", { "dependencies": { "buffer": "^5.5.0", "inherits": "^2.0.4", "readable-stream": "^3.4.0" } }, "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w=="], + + "tar-stream/readable-stream": ["readable-stream@3.6.2", "", { "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } }, "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA=="], + "@types/ssh2/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="], "@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@5.0.4", "", { "dependencies": { "balanced-match": "^4.0.2" } }, "sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg=="], "@typescript-eslint/utils/@eslint-community/eslint-utils/eslint-visitor-keys": ["eslint-visitor-keys@3.4.3", "", {}, "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag=="], + "tar-stream/bl/buffer": ["buffer@5.7.1", "", { "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.1.13" } }, "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ=="], + "@typescript-eslint/typescript-estree/minimatch/brace-expansion/balanced-match": ["balanced-match@4.0.4", "", {}, "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA=="], } } diff --git a/docker-compose.yml b/docker-compose.yml index cdbf501..449369e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -10,4 +10,12 @@ services: - LLM_API_KEY=${LLM_API_KEY} - LLM_MODEL=${LLM_MODEL:-gemini-2.5-flash} - LLM_API_URL=${LLM_API_URL} + - STORAGE_PROVIDER=${STORAGE_PROVIDER:-local} + - STORAGE_SQLITE_PATH=${STORAGE_SQLITE_PATH:-/app/data/libredb-storage.db} + - STORAGE_POSTGRES_URL=${STORAGE_POSTGRES_URL} + volumes: + - storage-data:/app/data restart: always + +volumes: + storage-data: diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index e379016..cf8205e 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -150,9 +150,20 @@ Controlled by `NEXT_PUBLIC_AUTH_PROVIDER` (`local` | `oidc`). Both flows result Multi-statement queries execute sequentially via `POST /api/db/multi-query`. -### 4.4. Client State Management +### 4.4. Storage Abstraction Layer -- **LocalStorage** for persistent data: connections, query history, saved queries, schema snapshots, chart configs, masking config +- **Write-through cache architecture**: localStorage (L1 cache) + optional server storage (L2 persistent) +- **Three storage modes** controlled by `STORAGE_PROVIDER` env var: + - `local` (default): Browser localStorage only, zero configuration + - `sqlite`: Server-side SQLite file via `better-sqlite3` + - `postgres`: Server-side PostgreSQL via `pg` +- **`useStorageSync` hook** in Studio.tsx: discovers mode at runtime via `/api/storage/config`, pulls on mount, pushes mutations (debounced 500ms) +- **Migration**: First login auto-migrates localStorage to server; `libredb_server_migrated` flag prevents re-migration +- **Graceful degradation**: If server unreachable, localStorage continues working + +### 4.5. Client State Management + +- **Storage module** (`src/lib/storage/`) for persistent data: connections, query history, saved queries, schema snapshots, chart configs, audit log, masking config, threshold config - **React hooks** for UI state: tabs, active connection, execution status - **Custom hooks** extracted from Studio.tsx: `useAuth`, `useConnectionManager`, `useTabManager`, `useTransactionControl`, `useQueryExecution`, `useInlineEditing` @@ -165,6 +176,7 @@ src/ │ │ ├── auth/ # Login/logout/me + OIDC (PKCE, callback) │ │ ├── ai/ # Chat, NL2SQL, explain, safety │ │ ├── db/ # Query, schema, health, maintenance, transactions +│ │ ├── storage/ # Storage sync API (config, CRUD, migrate) │ │ └── admin/ # Fleet health, audit │ ├── admin/ # Admin dashboard (RBAC protected) │ └── login/ # Login page @@ -192,7 +204,12 @@ src/ ├── ssh/ # SSH tunnel support ├── auth.ts # JWT utilities ├── oidc.ts # OIDC utilities - └── storage.ts # LocalStorage management + └── storage/ # Storage abstraction layer + ├── index.ts # Barrel export + ├── storage-facade.ts # Public sync API + CustomEvent dispatch + ├── local-storage.ts # Pure localStorage CRUD + ├── factory.ts # Env-based provider factory + └── providers/ # SQLite + PostgreSQL backends ``` ## 6. Deployment diff --git a/docs/STORAGE_ARCHITECTURE.md b/docs/STORAGE_ARCHITECTURE.md new file mode 100644 index 0000000..6ed961d --- /dev/null +++ b/docs/STORAGE_ARCHITECTURE.md @@ -0,0 +1,564 @@ +# Storage Architecture — LibreDB Studio + +This document describes the **Storage Abstraction Layer**, a pluggable persistence system that allows LibreDB Studio to operate in two modes: + +- **Local mode** (default): Zero-config, all data lives in the browser's `localStorage`. Ideal for single-user / open-source usage. +- **Server mode**: Data is persisted to a server-side database (SQLite or PostgreSQL) with per-user scoping. Ideal for teams and enterprise deployments. + +Switching between modes requires **only one environment variable** — no code changes, no rebuild. + +--- + +## Table of Contents + +1. [Design Goals](#1-design-goals) +2. [Architecture Overview](#2-architecture-overview) +3. [Data Model](#3-data-model) +4. [Module Structure](#4-module-structure) +5. [Local Storage Layer](#5-local-storage-layer) +6. [Storage Facade](#6-storage-facade) +7. [Server Storage Providers](#7-server-storage-providers) +8. [API Routes](#8-api-routes) +9. [Write-Through Cache & Sync Hook](#9-write-through-cache--sync-hook) +10. [Migration Flow](#10-migration-flow) +11. [Configuration](#11-configuration) +12. [User Scoping & Security](#12-user-scoping--security) +13. [Docker Deployment](#13-docker-deployment) +14. [Adding a New Provider](#14-adding-a-new-provider) + +--- + +## 1. Design Goals + +| Goal | Approach | +|------|----------| +| **Zero breaking changes** | All 16+ consumer components keep the same synchronous `storage.*` API | +| **Zero-config default** | `localStorage` works out of the box — no database, no env vars needed | +| **Single image, all modes** | Runtime config via env var, not build-time `NEXT_PUBLIC_*` | +| **Per-user isolation** | Server storage scoped by JWT `username` — no cross-user leaks | +| **Graceful degradation** | If server is unreachable, `localStorage` continues to work | +| **Extensible** | Adding a new backend (e.g., MySQL, DynamoDB) requires one file implementing `ServerStorageProvider` | + +--- + +## 2. Architecture Overview + +``` +┌──────────────────────────────┐ +│ 16+ Consumer Components │ ← Unchanged, same sync API +│ storage.getConnections() │ +│ storage.saveConnection() │ +└──────────────┬───────────────┘ + │ sync read/write +┌──────────────▼───────────────┐ +│ Storage Facade │ ← localStorage read/write + CustomEvent dispatch +│ src/lib/storage/ │ +│ storage-facade.ts │ +└──────────────┬───────────────┘ + │ CustomEvent: 'libredb-storage-change' +┌──────────────▼───────────────┐ +│ useStorageSync Hook │ ← Mounted in Studio.tsx (server mode only) +│ src/hooks/ │ +│ use-storage-sync.ts │ +└──────────────┬───────────────┘ + │ fetch (debounced 500ms) +┌──────────────▼───────────────┐ +│ API Routes │ ← JWT auth + user scoping +│ /api/storage/* │ +└──────────────┬───────────────┘ + │ +┌──────────────▼───────────────┐ +│ ServerStorageProvider │ ← Strategy Pattern +│ ┌─────────┐ ┌────────────┐ │ +│ │ SQLite │ │ PostgreSQL │ │ +│ └─────────┘ └────────────┘ │ +└──────────────────────────────┘ +``` + +**Key insight:** `localStorage` is always the **rendering source** (L1 cache). The server database is the **persistent source of truth** (L2). The sync hook keeps them in sync via a write-through cache pattern. + +--- + +## 3. Data Model + +### 3.1 Collections + +All application state is organized into **9 collections**, each stored as a JSON blob: + +| Collection | Type | Description | Max Items | +|-----------|------|-------------|-----------| +| `connections` | `DatabaseConnection[]` | Saved database connections | — | +| `history` | `QueryHistoryItem[]` | Query execution history | 500 | +| `saved_queries` | `SavedQuery[]` | User-saved SQL/JSON queries | — | +| `schema_snapshots` | `SchemaSnapshot[]` | Schema diff snapshots | 50 | +| `saved_charts` | `SavedChartConfig[]` | Saved chart configurations | — | +| `active_connection_id` | `string \| null` | Currently active connection | — | +| `audit_log` | `AuditEvent[]` | Audit trail events | 1000 | +| `masking_config` | `MaskingConfig` | Data masking rules and RBAC | — | +| `threshold_config` | `ThresholdConfig[]` | Monitoring alert thresholds | — | + +### 3.2 Server Database Schema + +Both SQLite and PostgreSQL use the same logical schema — a single table with collection-based JSON blobs: + +```sql +CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, -- JWT username (email) + collection TEXT NOT NULL, -- 'connections', 'history', etc. + data TEXT NOT NULL, -- JSON serialized + updated_at TEXT/TIMESTAMPTZ NOT NULL, -- Last modification time + PRIMARY KEY (user_id, collection) +); +``` + +This design is intentionally simple: +- **No schema migrations** needed when adding new collections +- **One row per user per collection** — efficient upsert +- **JSON blobs** keep the server storage schema-agnostic + +### 3.3 localStorage Keys + +Each collection maps to a `libredb_`-prefixed localStorage key: + +``` +connections → libredb_connections +history → libredb_history +saved_queries → libredb_saved_queries +schema_snapshots → libredb_schema_snapshots +saved_charts → libredb_saved_charts +active_connection_id → libredb_active_connection_id +audit_log → libredb_audit_log +masking_config → libredb_masking_config +threshold_config → libredb_threshold_config +``` + +--- + +## 4. Module Structure + +``` +src/lib/storage/ +├── index.ts # Barrel export — preserves @/lib/storage import path +├── types.ts # StorageData, StorageCollection, ServerStorageProvider +├── local-storage.ts # Pure localStorage CRUD (SSR-safe) +├── storage-facade.ts # Public storage object with domain methods +├── factory.ts # Env-based provider instantiation (singleton) +└── providers/ + ├── sqlite.ts # better-sqlite3 implementation + └── postgres.ts # pg (Pool) implementation + +src/hooks/ +└── use-storage-sync.ts # Write-through cache hook + +src/app/api/storage/ +├── config/route.ts # GET: storage mode discovery (public) +├── route.ts # GET: fetch all user data (auth required) +├── [collection]/route.ts # PUT: update single collection (auth required) +└── migrate/route.ts # POST: localStorage → server migration (auth required) +``` + +--- + +## 5. Local Storage Layer + +**File:** `src/lib/storage/local-storage.ts` + +Pure, side-effect-free localStorage CRUD with SSR safety: + +```typescript +// All operations check isClient() before accessing localStorage +export function readJSON(collection: string): T | null; +export function writeJSON(collection: string, data: unknown): void; +export function readString(collection: string): string | null; +export function writeString(collection: string, value: string): void; +export function remove(collection: string): void; +export function getKey(collection: string): string; // → 'libredb_' + collection +``` + +- Every function is guarded by `isClient()` — safe to call during SSR (returns `null` / no-op) +- JSON parse failures return `null` instead of throwing + +--- + +## 6. Storage Facade + +**File:** `src/lib/storage/storage-facade.ts` + +The public `storage` object provides the same **synchronous API** that all 16+ consumer components use. Every mutation method: + +1. Writes to `localStorage` (immediate) +2. Dispatches a `CustomEvent('libredb-storage-change')` with the collection name and data + +```typescript +// Example: saving a connection +storage.saveConnection(conn); +// 1. Reads existing connections from localStorage +// 2. Upserts by ID +// 3. Writes back to localStorage +// 4. Dispatches CustomEvent({ collection: 'connections', data: updatedList }) +``` + +### Public API + +| Category | Methods | +|----------|---------| +| **Connections** | `getConnections()`, `saveConnection(conn)`, `deleteConnection(id)` | +| **History** | `getHistory()`, `addToHistory(item)`, `clearHistory()` | +| **Saved Queries** | `getSavedQueries()`, `saveQuery(query)`, `deleteSavedQuery(id)` | +| **Schema Snapshots** | `getSchemaSnapshots(connId?)`, `saveSchemaSnapshot(snap)`, `deleteSchemaSnapshot(id)` | +| **Charts** | `getSavedCharts()`, `saveChart(chart)`, `deleteChart(id)` | +| **Active Connection** | `getActiveConnectionId()`, `setActiveConnectionId(id)` | +| **Audit Log** | `getAuditLog()`, `saveAuditLog(events)` | +| **Masking Config** | `getMaskingConfig()`, `saveMaskingConfig(config)` | +| **Threshold Config** | `getThresholdConfig()`, `saveThresholdConfig(thresholds)` | + +All read methods are **synchronous** — they read from `localStorage` only. No network calls. + +--- + +## 7. Server Storage Providers + +### 7.1 Provider Interface + +**File:** `src/lib/storage/types.ts` + +```typescript +interface ServerStorageProvider { + initialize(): Promise; + getAllData(userId: string): Promise>; + getCollection( + userId: string, collection: K + ): Promise; + setCollection( + userId: string, collection: K, data: StorageData[K] + ): Promise; + mergeData(userId: string, data: Partial): Promise; + isHealthy(): Promise; + close(): Promise; +} +``` + +### 7.2 SQLite Provider + +**File:** `src/lib/storage/providers/sqlite.ts` +**Package:** `better-sqlite3` (Node.js compatible, not `bun:sqlite`) + +| Feature | Detail | +|---------|--------| +| **WAL mode** | Enabled for concurrent read performance | +| **Auto-create** | Directory and database file created on `initialize()` | +| **Upsert** | `INSERT OR REPLACE INTO user_storage` | +| **Transactions** | `mergeData()` wraps all inserts in a single transaction | +| **Health check** | `SELECT 1 AS ok` | + +```env +STORAGE_PROVIDER=sqlite +STORAGE_SQLITE_PATH=./data/libredb-storage.db # default +``` + +### 7.3 PostgreSQL Provider + +**File:** `src/lib/storage/providers/postgres.ts` +**Package:** `pg` (connection pool) + +| Feature | Detail | +|---------|--------| +| **Pool config** | max: 5, idleTimeoutMillis: 30000 | +| **Upsert** | `INSERT ... ON CONFLICT (user_id, collection) DO UPDATE` | +| **Transactions** | `mergeData()` uses `BEGIN`/`COMMIT`/`ROLLBACK` with client checkout | +| **Health check** | `SELECT 1 AS ok` | + +```env +STORAGE_PROVIDER=postgres +STORAGE_POSTGRES_URL=postgresql://user:pass@host:5432/libredb +``` + +### 7.4 Factory + +**File:** `src/lib/storage/factory.ts` + +The factory uses the **Singleton pattern** — one provider instance per process, lazy-initialized on first access: + +```typescript +getStorageProviderType() // → 'local' | 'sqlite' | 'postgres' +isServerStorageEnabled() // → true if not 'local' +getStorageConfig() // → { provider, serverMode } +getStorageProvider() // → ServerStorageProvider | null (singleton) +closeStorageProvider() // → cleanup for testing +``` + +Provider classes are **dynamically imported** — SQLite and PostgreSQL dependencies are only loaded when their provider is selected. + +--- + +## 8. API Routes + +All routes (except `/config`) require JWT authentication. The authenticated user's `username` (email) is used as the `user_id` for storage scoping. + +| Endpoint | Method | Auth | Purpose | +|----------|--------|------|---------| +| `/api/storage/config` | GET | Public | Runtime storage mode discovery | +| `/api/storage` | GET | JWT | Fetch all collections for the authenticated user | +| `/api/storage/[collection]` | PUT | JWT | Update a single collection | +| `/api/storage/migrate` | POST | JWT | Merge localStorage dump into server storage | + +### Response Examples + +**GET /api/storage/config** +```json +{ "provider": "sqlite", "serverMode": true } +``` + +**GET /api/storage** +```json +{ + "connections": [{ "id": "c1", "name": "Prod DB", ... }], + "history": [{ "id": "h1", "query": "SELECT ...", ... }], + ... +} +``` + +**PUT /api/storage/connections** +```json +// Request: { "data": [{ "id": "c1", "name": "Prod DB", ... }] } +// Response: { "ok": true } +``` + +**POST /api/storage/migrate** +```json +// Request: { "connections": [...], "history": [...], ... } +// Response: { "ok": true, "migrated": ["connections", "history"] } +``` + +When `STORAGE_PROVIDER=local`, all data routes return `404 Not Found` (config route always works). + +--- + +## 9. Write-Through Cache & Sync Hook + +**File:** `src/hooks/use-storage-sync.ts` + +The hook is mounted in `Studio.tsx` after `useAuth()` and orchestrates all client-server synchronization. + +### Sync States + +```typescript +interface StorageSyncState { + isServerMode: boolean; // Server storage active? + isSyncing: boolean; // Currently transferring data? + lastSyncedAt: Date | null; // Last successful sync timestamp + syncError: string | null; // Last error message (null = healthy) +} +``` + +### Lifecycle + +``` +App Mount + │ + ├─ GET /api/storage/config + │ ├─ serverMode: false → done (localStorage only) + │ └─ serverMode: true ──┐ + │ │ + │ ┌──────────────────────▼──────────────────────┐ + │ │ Check libredb_server_migrated flag │ + │ │ ├─ Not migrated → POST /api/storage/migrate│ + │ │ │ (send all localStorage → server merge) │ + │ │ │ Set flag in localStorage │ + │ │ └─ Already migrated → skip │ + │ └──────────────────────┬──────────────────────┘ + │ │ + │ ┌──────────────────────▼──────────────────────┐ + │ │ Pull: GET /api/storage │ + │ │ → Write server data into localStorage │ + │ │ → Components re-render from localStorage │ + │ └──────────────────────┬──────────────────────┘ + │ │ + │ ┌──────────────────────▼──────────────────────┐ + │ │ Listen: 'libredb-storage-change' events │ + │ │ → Collect pending collections │ + │ │ → Debounce 500ms │ + │ │ → PUT /api/storage/[collection] for each │ + │ └─────────────────────────────────────────────┘ + │ + ▼ (ongoing) +``` + +### Push Behavior (Debounced) + +When any `storage.*` mutation fires: + +1. Facade writes to `localStorage` (immediate, synchronous) +2. Facade dispatches `CustomEvent('libredb-storage-change', { collection, data })` +3. Hook captures event, adds collection to pending set +4. After 500ms of no new mutations, hook flushes: + - Reads each pending collection from `localStorage` + - Sends `PUT /api/storage/[collection]` for each + +### Graceful Degradation + +- If `/api/storage/config` fails → stays in localStorage-only mode +- If push fails → logs warning, sets `syncError`, does **not** block the UI +- Components always read from `localStorage` — no loading states for storage + +--- + +## 10. Migration Flow + +When a user first enables server mode (or a new user logs in for the first time): + +``` +1. Hook detects serverMode = true +2. Checks localStorage('libredb_server_migrated') flag +3. If not migrated: + a. Reads all 9 collections from localStorage + b. POST /api/storage/migrate with full payload + c. Server calls provider.mergeData() — ID-based deduplication + d. Sets 'libredb_server_migrated' flag in localStorage +4. Pull: GET /api/storage → overwrite localStorage with server data +5. Subsequent mutations sync normally via push +``` + +This ensures existing localStorage data is preserved when transitioning to server mode. + +--- + +## 11. Configuration + +### Environment Variables + +| Variable | Default | Required | Description | +|----------|---------|----------|-------------| +| `STORAGE_PROVIDER` | `local` | No | Storage backend: `local`, `sqlite`, or `postgres` | +| `STORAGE_SQLITE_PATH` | `./data/libredb-storage.db` | No | Path to SQLite database file | +| `STORAGE_POSTGRES_URL` | — | If `postgres` | PostgreSQL connection string | + +### Why Not `NEXT_PUBLIC_*`? + +Next.js `NEXT_PUBLIC_*` variables are **inlined at build time** as static strings. This means: +- Every storage mode would require a separate Docker build +- Cannot change storage mode without rebuilding + +Instead, the client discovers the storage mode at **runtime** via `GET /api/storage/config`. One Docker image supports all modes. + +--- + +## 12. User Scoping & Security + +### Per-User Isolation + +Every row in `user_storage` is scoped by `user_id`: + +``` +(admin@libredb.org, connections) → [{"id":"c1", "name":"Prod DB"...}] +(admin@libredb.org, history) → [{"id":"h1", "query":"SELECT..."...}] +(user@libredb.org, connections) → [{"id":"c2", "name":"Dev DB"...}] +``` + +- `user_id` = JWT session `username` (email address) +- **Client never sends `user_id`** — server always extracts from JWT cookie +- Every query includes `WHERE user_id = $username` — no cross-user access possible + +### Authentication + +- `/api/storage/config` is **public** — returns only `{ provider, serverMode }`, no sensitive data +- All other `/api/storage/*` routes require a valid JWT session via `getSession()` +- Unauthorized requests receive `401 Unauthorized` + +### OIDC Users + +OIDC users (Auth0, Keycloak, Okta, Azure AD) have their `preferred_username` or email claim mapped to the same `username` field used as `user_id`. + +--- + +## 13. Docker Deployment + +### SQLite Mode + +```yaml +# docker-compose.yml +services: + libredb-studio: + environment: + STORAGE_PROVIDER: sqlite + STORAGE_SQLITE_PATH: /app/data/libredb-storage.db + volumes: + - storage-data:/app/data + +volumes: + storage-data: +``` + +The Dockerfile includes `better-sqlite3` native bindings and creates the `/app/data` directory. + +### PostgreSQL Mode + +```yaml +services: + libredb-studio: + environment: + STORAGE_PROVIDER: postgres + STORAGE_POSTGRES_URL: postgresql://user:pass@db:5432/libredb + depends_on: + - db + db: + image: postgres:16-alpine + environment: + POSTGRES_DB: libredb + POSTGRES_USER: user + POSTGRES_PASSWORD: pass +``` + +No volume needed on the app container — data lives in PostgreSQL. + +--- + +## 14. Adding a New Provider + +To add a new storage backend (e.g., MySQL, DynamoDB): + +### Step 1: Implement the Interface + +Create `src/lib/storage/providers/your-provider.ts`: + +```typescript +import type { ServerStorageProvider, StorageData, StorageCollection } from '../types'; + +export class YourStorageProvider implements ServerStorageProvider { + async initialize(): Promise { /* create table */ } + async getAllData(userId: string): Promise> { /* ... */ } + async getCollection( + userId: string, collection: K + ): Promise { /* ... */ } + async setCollection( + userId: string, collection: K, data: StorageData[K] + ): Promise { /* upsert */ } + async mergeData( + userId: string, data: Partial + ): Promise { /* batch upsert in transaction */ } + async isHealthy(): Promise { /* SELECT 1 */ } + async close(): Promise { /* cleanup */ } +} +``` + +### Step 2: Register in Factory + +Update `src/lib/storage/factory.ts`: + +```typescript +// Add to StorageProviderType +type StorageProviderType = 'local' | 'sqlite' | 'postgres' | 'your-provider'; + +// Add dynamic import in getStorageProvider() +case 'your-provider': { + const { YourStorageProvider } = await import('./providers/your-provider'); + instance = new YourStorageProvider(process.env.STORAGE_YOUR_URL!); + break; +} +``` + +### Step 3: Add Tests + +Create `tests/unit/lib/storage/providers/your-provider.test.ts` with mocked driver. + +That's it — no changes needed to the facade, API routes, sync hook, or any consumer components. diff --git a/docs/STORAGE_QUICK_SETUP.md b/docs/STORAGE_QUICK_SETUP.md new file mode 100644 index 0000000..255227a --- /dev/null +++ b/docs/STORAGE_QUICK_SETUP.md @@ -0,0 +1,254 @@ +# Storage Quick Setup Guide + +LibreDB Studio supports three storage modes. Pick the one that fits your use case and follow the steps below. + +> For a deep dive into the architecture, see [STORAGE_ARCHITECTURE.md](./STORAGE_ARCHITECTURE.md). + +--- + +## Which Mode Should I Use? + +| Mode | Best For | Persistence | Multi-User | Setup | +|------|----------|-------------|------------|-------| +| **Local** (default) | Solo dev, quick start | Browser only | No | Zero config | +| **SQLite** | Small teams, single server | Server file | Yes | 1 env var | +| **PostgreSQL** | Enterprise, multi-node | External DB | Yes | 2 env vars | + +--- + +## 1. Local Mode (Default) + +No configuration needed. All data stays in the browser's `localStorage`. + +```bash +# Just start the app — that's it +bun dev +``` + +**What you get:** +- Instant start, no database required +- Data persists across page reloads +- Data is lost if browser storage is cleared or you switch browsers/devices + +**When to move on:** When you need data to survive across devices, browsers, or team members. + +--- + +## 2. SQLite Mode + +A single file on the server. Great for self-hosted single-node deployments. + +### Local Development + +```bash +# .env.local +STORAGE_PROVIDER=sqlite +STORAGE_SQLITE_PATH=./data/libredb-storage.db +``` + +```bash +bun dev +``` + +The database file and directory are created automatically on first request. + +### Docker + +```yaml +# docker-compose.yml +services: + app: + image: ghcr.io/libredb/libredb-studio:latest + ports: + - "3000:3000" + environment: + - STORAGE_PROVIDER=sqlite + - STORAGE_SQLITE_PATH=/app/data/libredb-storage.db + volumes: + - storage-data:/app/data + +volumes: + storage-data: +``` + +```bash +docker-compose up -d +``` + +> **Volume is essential.** Without it, data is lost when the container restarts. + +### Verify + +```bash +curl http://localhost:3000/api/storage/config +# → {"provider":"sqlite","serverMode":true} +``` + +--- + +## 3. PostgreSQL Mode + +Recommended for production, teams, and high-availability deployments. + +### Local Development + +```bash +# Start a PostgreSQL instance (if you don't have one) +docker run -d --name libredb-pg \ + -e POSTGRES_DB=libredb \ + -e POSTGRES_USER=libredb \ + -e POSTGRES_PASSWORD=secret \ + -p 5432:5432 \ + postgres:16-alpine +``` + +```bash +# .env.local +STORAGE_PROVIDER=postgres +STORAGE_POSTGRES_URL=postgresql://libredb:secret@localhost:5432/libredb +``` + +```bash +bun dev +``` + +The `user_storage` table is created automatically on first request. + +### Docker Compose (App + PostgreSQL) + +```yaml +# docker-compose.yml +services: + app: + image: ghcr.io/libredb/libredb-studio:latest + ports: + - "3000:3000" + environment: + - STORAGE_PROVIDER=postgres + - STORAGE_POSTGRES_URL=postgresql://libredb:secret@db:5432/libredb + depends_on: + db: + condition: service_healthy + + db: + image: postgres:16-alpine + environment: + - POSTGRES_DB=libredb + - POSTGRES_USER=libredb + - POSTGRES_PASSWORD=secret + volumes: + - pgdata:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U libredb"] + interval: 5s + timeout: 3s + retries: 5 + +volumes: + pgdata: +``` + +```bash +docker-compose up -d +``` + +### Using an Existing PostgreSQL + +Just set the connection string — no special schema setup needed: + +```bash +STORAGE_PROVIDER=postgres +STORAGE_POSTGRES_URL=postgresql://user:pass@your-pg-host:5432/your_db +``` + +The required table is auto-created on startup. The user needs `CREATE TABLE` and `INSERT`/`UPDATE`/`SELECT` privileges. + +### Verify + +```bash +curl http://localhost:3000/api/storage/config +# → {"provider":"postgres","serverMode":true} +``` + +--- + +## Migration: Local to Server + +When you switch from local mode to SQLite or PostgreSQL, **existing browser data is automatically migrated** on first login: + +1. User opens the app in server mode +2. The sync hook detects it's the first time (no `libredb_server_migrated` flag) +3. All localStorage data is sent to the server via `POST /api/storage/migrate` +4. Server merges the data (ID-based deduplication — no duplicates) +5. A flag is set in localStorage to prevent re-migration +6. From this point on, the server is the source of truth + +**No manual steps required.** Just change the env var and restart. + +> If multiple users were sharing a browser in local mode, only the data from the user who migrates first will be sent. Each user's server storage is isolated by their login email. + +--- + +## Environment Variables Reference + +| Variable | Default | Description | +|----------|---------|-------------| +| `STORAGE_PROVIDER` | `local` | `local`, `sqlite`, or `postgres` | +| `STORAGE_SQLITE_PATH` | `./data/libredb-storage.db` | Path to SQLite file (sqlite mode) | +| `STORAGE_POSTGRES_URL` | — | PostgreSQL connection string (postgres mode) | + +> These are **server-side only** variables (no `NEXT_PUBLIC_` prefix). The client discovers the mode at runtime via `GET /api/storage/config`. This means one Docker image works for all modes. + +--- + +## Health Check + +Check if the storage backend is reachable: + +```bash +# Storage mode info (always works, no auth needed) +curl http://localhost:3000/api/storage/config + +# Full data fetch (requires auth cookie) +curl -b cookies.txt http://localhost:3000/api/storage +``` + +--- + +## Troubleshooting + +### "Data not syncing to server" + +1. Check storage mode: `curl http://localhost:3000/api/storage/config` +2. Make sure the response shows `"serverMode": true` +3. Check browser console for sync errors (look for `[StorageSync]` prefixed logs) + +### SQLite: "SQLITE_CANTOPEN" + +- The directory in `STORAGE_SQLITE_PATH` must be writable by the app process +- In Docker, make sure the volume is mounted correctly + +### PostgreSQL: "Connection refused" + +- Verify `STORAGE_POSTGRES_URL` is correct and the database is reachable +- In Docker Compose, use the service name (`db`) as the host, not `localhost` +- Check that the PostgreSQL container is healthy: `docker-compose ps` + +### "Data disappeared after switching modes" + +- Switching from server mode **back** to local mode doesn't pull data from the server +- Local mode only reads from localStorage +- To recover: switch back to server mode, the data is still in the database + +### "Duplicate data after migration" + +- Migration uses ID-based deduplication — this shouldn't happen +- If it does, check if the same user logged in from multiple browsers before migration completed + +--- + +## What's Next? + +- [STORAGE_ARCHITECTURE.md](./STORAGE_ARCHITECTURE.md) — Deep dive into the write-through cache, sync hook, and provider internals +- [ARCHITECTURE.md](./ARCHITECTURE.md) — Overall system architecture +- [OIDC_SETUP.md](./OIDC_SETUP.md) — SSO configuration (pairs well with server storage for team deployments) diff --git a/package.json b/package.json index 152daef..00dc987 100644 --- a/package.json +++ b/package.json @@ -53,6 +53,7 @@ "@tanstack/react-table": "^8.21.3", "@tanstack/react-virtual": "^3.13.13", "@xyflow/react": "^12.10.0", + "better-sqlite3": "^12.6.2", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "cmdk": "^1.1.1", @@ -94,6 +95,7 @@ "@testing-library/jest-dom": "^6.9.1", "@testing-library/react": "^16.3.2", "@testing-library/user-event": "^14.6.1", + "@types/better-sqlite3": "^7.6.13", "@types/bun": "latest", "@types/node": "^20", "@types/pg": "^8.16.0", diff --git a/src/app/api/storage/[collection]/route.ts b/src/app/api/storage/[collection]/route.ts new file mode 100644 index 0000000..da1c028 --- /dev/null +++ b/src/app/api/storage/[collection]/route.ts @@ -0,0 +1,47 @@ +/** + * PUT /api/storage/[collection] + * Updates a single storage collection for the authenticated user. + * Only works when server storage is enabled. + */ + +import { NextRequest, NextResponse } from 'next/server'; +import { getSession } from '@/lib/auth'; +import { getStorageProvider } from '@/lib/storage/factory'; +import { STORAGE_COLLECTIONS, type StorageCollection } from '@/lib/storage/types'; + +export async function PUT( + request: NextRequest, + { params }: { params: Promise<{ collection: string }> } +) { + const provider = await getStorageProvider(); + if (!provider) { + return NextResponse.json( + { error: 'Server storage is not enabled' }, + { status: 404 } + ); + } + + const session = await getSession(); + if (!session) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + const { collection } = await params; + + if (!STORAGE_COLLECTIONS.includes(collection as StorageCollection)) { + return NextResponse.json( + { error: `Invalid collection: ${collection}` }, + { status: 400 } + ); + } + + const body = await request.json(); + + await provider.setCollection( + session.username, + collection as StorageCollection, + body.data + ); + + return NextResponse.json({ ok: true }); +} diff --git a/src/app/api/storage/config/route.ts b/src/app/api/storage/config/route.ts new file mode 100644 index 0000000..d3e86cf --- /dev/null +++ b/src/app/api/storage/config/route.ts @@ -0,0 +1,12 @@ +/** + * GET /api/storage/config + * Returns storage configuration (public endpoint, no auth required). + * Client uses this to discover if server-side storage is enabled at runtime. + */ + +import { NextResponse } from 'next/server'; +import { getStorageConfig } from '@/lib/storage/factory'; + +export async function GET() { + return NextResponse.json(getStorageConfig()); +} diff --git a/src/app/api/storage/migrate/route.ts b/src/app/api/storage/migrate/route.ts new file mode 100644 index 0000000..59cf71b --- /dev/null +++ b/src/app/api/storage/migrate/route.ts @@ -0,0 +1,32 @@ +/** + * POST /api/storage/migrate + * Migrates localStorage data to server storage. + * Client sends all its localStorage collections; server merges them. + * Only works when server storage is enabled. + */ + +import { NextRequest, NextResponse } from 'next/server'; +import { getSession } from '@/lib/auth'; +import { getStorageProvider } from '@/lib/storage/factory'; +import type { StorageData } from '@/lib/storage/types'; + +export async function POST(request: NextRequest) { + const provider = await getStorageProvider(); + if (!provider) { + return NextResponse.json( + { error: 'Server storage is not enabled' }, + { status: 404 } + ); + } + + const session = await getSession(); + if (!session) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + const body = (await request.json()) as Partial; + + await provider.mergeData(session.username, body); + + return NextResponse.json({ ok: true, migrated: Object.keys(body) }); +} diff --git a/src/app/api/storage/route.ts b/src/app/api/storage/route.ts new file mode 100644 index 0000000..812e255 --- /dev/null +++ b/src/app/api/storage/route.ts @@ -0,0 +1,27 @@ +/** + * GET /api/storage + * Returns all storage data for the authenticated user. + * Only works when server storage is enabled. + */ + +import { NextResponse } from 'next/server'; +import { getSession } from '@/lib/auth'; +import { getStorageProvider } from '@/lib/storage/factory'; + +export async function GET() { + const provider = await getStorageProvider(); + if (!provider) { + return NextResponse.json( + { error: 'Server storage is not enabled' }, + { status: 404 } + ); + } + + const session = await getSession(); + if (!session) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + const data = await provider.getAllData(session.username); + return NextResponse.json(data); +} diff --git a/src/components/DataCharts.tsx b/src/components/DataCharts.tsx index 50e260a..4e9d387 100644 --- a/src/components/DataCharts.tsx +++ b/src/components/DataCharts.tsx @@ -55,6 +55,7 @@ import { SelectTrigger, SelectValue, } from '@/components/ui/select'; +import { storage } from '@/lib/storage'; // Chart colors matching CSS variables const CHART_COLORS = [ @@ -320,12 +321,20 @@ export function DataCharts({ result }: DataChartsProps) { const [showSaveDialog, setShowSaveDialog] = useState(false); const [saveName, setSaveName] = useState(''); - // Load saved charts from localStorage + // Load saved charts from storage React.useEffect(() => { - try { - const stored = localStorage.getItem('libredb_saved_charts'); - if (stored) setSavedCharts(JSON.parse(stored)); - } catch { /* ignore */ } + const charts = storage.getSavedCharts(); + if (charts.length > 0) { + setSavedCharts(charts.map(c => ({ + id: c.id, + name: c.name, + chartType: c.chartType as ChartType, + xAxis: c.xAxis, + yAxis: c.yAxis, + aggregation: (c.aggregation || 'none') as AggregationType, + dateGrouping: c.dateGrouping || '', + }))); + } }, []); // Initialize axis selections when analysis changes @@ -411,7 +420,16 @@ export function DataCharts({ result }: DataChartsProps) { }; const updated = [...savedCharts, newChart]; setSavedCharts(updated); - localStorage.setItem('libredb_saved_charts', JSON.stringify(updated)); + storage.saveChart({ + id: newChart.id, + name: newChart.name, + chartType: newChart.chartType, + xAxis: newChart.xAxis, + yAxis: newChart.yAxis, + aggregation: newChart.aggregation, + dateGrouping: (newChart.dateGrouping || undefined) as DateGrouping | undefined, + createdAt: new Date(), + }); setShowSaveDialog(false); setSaveName(''); }, [saveName, chartType, xAxis, yAxis, aggregation, dateGrouping, savedCharts]); @@ -429,7 +447,7 @@ export function DataCharts({ result }: DataChartsProps) { const deleteSavedChart = useCallback((id: string) => { const updated = savedCharts.filter(c => c.id !== id); setSavedCharts(updated); - localStorage.setItem('libredb_saved_charts', JSON.stringify(updated)); + storage.deleteChart(id); }, [savedCharts]); const exportChart = useCallback(async (format: 'png' | 'svg') => { diff --git a/src/components/Studio.tsx b/src/components/Studio.tsx index 5649de0..fc7afe7 100644 --- a/src/components/Studio.tsx +++ b/src/components/Studio.tsx @@ -31,6 +31,7 @@ import { useTabManager } from '@/hooks/use-tab-manager'; import { useTransactionControl } from '@/hooks/use-transaction-control'; import { useQueryExecution } from '@/hooks/use-query-execution'; import { useInlineEditing } from '@/hooks/use-inline-editing'; +import { useStorageSync } from '@/hooks/use-storage-sync'; import { storage } from '@/lib/storage'; import { getRandomShowcaseQuery } from '@/lib/showcase-queries'; import { @@ -65,6 +66,9 @@ export default function Studio() { // 1. Auth const { user, isAdmin, handleLogout } = useAuth(); + // 1.5. Storage sync (write-through cache for server mode) + useStorageSync(); + // 2. Connection Manager + Provider Metadata const conn = useConnectionManager(); const { metadata } = useProviderMetadata(conn.activeConnection); diff --git a/src/components/admin/tabs/SecurityTab.tsx b/src/components/admin/tabs/SecurityTab.tsx index ac5c04e..d673d02 100644 --- a/src/components/admin/tabs/SecurityTab.tsx +++ b/src/components/admin/tabs/SecurityTab.tsx @@ -19,23 +19,9 @@ import { DEFAULT_THRESHOLDS, type ThresholdConfig, } from '@/lib/monitoring-thresholds'; +import { storage } from '@/lib/storage'; import { toast } from 'sonner'; -const THRESHOLD_STORAGE_KEY = 'libredb_threshold_config'; - -function loadThresholds(): ThresholdConfig[] { - if (typeof window === 'undefined') return DEFAULT_THRESHOLDS; - try { - const stored = localStorage.getItem(THRESHOLD_STORAGE_KEY); - if (stored) return JSON.parse(stored); - } catch { /* ignore */ } - return DEFAULT_THRESHOLDS; -} - -function saveThresholds(thresholds: ThresholdConfig[]) { - localStorage.setItem(THRESHOLD_STORAGE_KEY, JSON.stringify(thresholds)); -} - export function SecurityTab() { return (
@@ -151,7 +137,7 @@ function ThresholdSettings() { const [hasChanges, setHasChanges] = useState(false); useEffect(() => { - setThresholds(loadThresholds()); + setThresholds(storage.getThresholdConfig()); }, []); const updateThreshold = ( @@ -168,14 +154,14 @@ function ThresholdSettings() { }; const handleSave = () => { - saveThresholds(thresholds); + storage.saveThresholdConfig(thresholds); setHasChanges(false); toast.success('Threshold configuration saved'); }; const handleReset = () => { setThresholds(DEFAULT_THRESHOLDS); - saveThresholds(DEFAULT_THRESHOLDS); + storage.saveThresholdConfig(DEFAULT_THRESHOLDS); setHasChanges(false); toast.success('Thresholds reset to defaults'); }; diff --git a/src/components/studio/BottomPanel.tsx b/src/components/studio/BottomPanel.tsx index 9388ef2..0143120 100644 --- a/src/components/studio/BottomPanel.tsx +++ b/src/components/studio/BottomPanel.tsx @@ -26,6 +26,7 @@ import { DropdownMenuItem, DropdownMenuTrigger, } from '@/components/ui/dropdown-menu'; import { Button } from '@/components/ui/button'; +import { storage } from '@/lib/storage'; export type BottomPanelMode = 'results' | 'explain' | 'history' | 'saved' | 'charts' | 'nl2sql' | 'autopilot' | 'pivot' | 'docs' | 'schemadiff' | 'dashboard'; @@ -33,10 +34,8 @@ export type BottomPanelMode = 'results' | 'explain' | 'history' | 'saved' | 'cha function ChartDashboardLazy({ result }: { result: QueryResult | null }) { const [savedCharts, setSavedCharts] = React.useState<{ id: string; name: string; chartType: string; xAxis: string; yAxis: string[] }[]>([]); React.useEffect(() => { - try { - const stored = localStorage.getItem('libredb_saved_charts'); - if (stored) setSavedCharts(JSON.parse(stored)); - } catch { /* ignore */ } + const charts = storage.getSavedCharts(); + if (charts.length > 0) setSavedCharts(charts); }, []); if (savedCharts.length === 0) { diff --git a/src/hooks/use-storage-sync.ts b/src/hooks/use-storage-sync.ts new file mode 100644 index 0000000..a263235 --- /dev/null +++ b/src/hooks/use-storage-sync.ts @@ -0,0 +1,235 @@ +'use client'; + +import { useState, useEffect, useRef, useCallback } from 'react'; +import { storage, type StorageConfigResponse, type StorageChangeDetail, type StorageData, STORAGE_COLLECTIONS } from '@/lib/storage'; + +const MIGRATION_FLAG = 'libredb_server_migrated'; +const DEBOUNCE_MS = 500; + +export interface StorageSyncState { + isServerMode: boolean; + isSyncing: boolean; + lastSyncedAt: Date | null; + syncError: string | null; +} + +/** + * Write-through cache sync hook. + * Mounts in Studio.tsx after useAuth. + * + * - Discovers storage mode via GET /api/storage/config + * - In server mode: pulls data on mount, pushes mutations (debounced) + * - Handles first-login migration from localStorage to server + * - Graceful degradation: if server unreachable, localStorage continues + */ +export function useStorageSync(): StorageSyncState { + const [isServerMode, setIsServerMode] = useState(false); + const [isSyncing, setIsSyncing] = useState(false); + const [lastSyncedAt, setLastSyncedAt] = useState(null); + const [syncError, setSyncError] = useState(null); + + const debounceTimerRef = useRef | null>(null); + const pendingCollectionsRef = useRef>(new Set()); + const serverModeRef = useRef(false); + + // ── Push a collection to server (debounced) ── + const pushToServer = useCallback(async (collection: string, data: unknown) => { + try { + const res = await fetch(`/api/storage/${collection}`, { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data }), + }); + if (!res.ok) { + const err = await res.json().catch(() => ({})); + throw new Error(err.error || `HTTP ${res.status}`); + } + setLastSyncedAt(new Date()); + setSyncError(null); + } catch (err) { + console.warn(`[StorageSync] Push failed for ${collection}:`, err); + setSyncError(err instanceof Error ? err.message : 'Sync failed'); + } + }, []); + + // ── Flush pending collections ── + const flushPending = useCallback(async () => { + const collections = Array.from(pendingCollectionsRef.current); + pendingCollectionsRef.current.clear(); + if (collections.length === 0) return; + + setIsSyncing(true); + try { + await Promise.all( + collections.map((col) => { + const getter = getCollectionData(col); + return pushToServer(col, getter); + }) + ); + } finally { + setIsSyncing(false); + } + }, [pushToServer]); + + // ── Schedule debounced push ── + const schedulePush = useCallback( + (collection: string) => { + pendingCollectionsRef.current.add(collection); + if (debounceTimerRef.current) { + clearTimeout(debounceTimerRef.current); + } + debounceTimerRef.current = setTimeout(() => { + flushPending(); + }, DEBOUNCE_MS); + }, + [flushPending] + ); + + // ── Pull all data from server → localStorage ── + const pullFromServer = useCallback(async () => { + setIsSyncing(true); + try { + const res = await fetch('/api/storage'); + if (!res.ok) return; + const data = (await res.json()) as Partial; + + // Write server data to localStorage (overwrite) + if (data.connections) writeCollectionToLocal('connections', data.connections); + if (data.history) writeCollectionToLocal('history', data.history); + if (data.saved_queries) writeCollectionToLocal('saved_queries', data.saved_queries); + if (data.schema_snapshots) writeCollectionToLocal('schema_snapshots', data.schema_snapshots); + if (data.saved_charts) writeCollectionToLocal('saved_charts', data.saved_charts); + if (data.active_connection_id !== undefined) writeCollectionToLocal('active_connection_id', data.active_connection_id); + if (data.audit_log) writeCollectionToLocal('audit_log', data.audit_log); + if (data.masking_config) writeCollectionToLocal('masking_config', data.masking_config); + if (data.threshold_config) writeCollectionToLocal('threshold_config', data.threshold_config); + + setLastSyncedAt(new Date()); + setSyncError(null); + } catch (err) { + console.warn('[StorageSync] Pull failed:', err); + setSyncError(err instanceof Error ? err.message : 'Pull failed'); + } finally { + setIsSyncing(false); + } + }, []); + + // ── Migration: localStorage → server ── + const migrateToServer = useCallback(async () => { + if (typeof window === 'undefined') return; + if (localStorage.getItem(MIGRATION_FLAG)) return; + + setIsSyncing(true); + try { + const allData: Partial = {}; + for (const col of STORAGE_COLLECTIONS) { + const data = getCollectionData(col); + if (data !== null && data !== undefined) { + (allData as Record)[col] = data; + } + } + + if (Object.keys(allData).length === 0) { + localStorage.setItem(MIGRATION_FLAG, new Date().toISOString()); + return; + } + + const res = await fetch('/api/storage/migrate', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(allData), + }); + + if (res.ok) { + localStorage.setItem(MIGRATION_FLAG, new Date().toISOString()); + } + } catch (err) { + console.warn('[StorageSync] Migration failed:', err); + } finally { + setIsSyncing(false); + } + }, []); + + // ── Initialize: discover storage mode ── + useEffect(() => { + let cancelled = false; + + async function init() { + try { + const res = await fetch('/api/storage/config'); + if (!res.ok || cancelled) return; + const config = (await res.json()) as StorageConfigResponse; + + if (config.serverMode && !cancelled) { + setIsServerMode(true); + serverModeRef.current = true; + + // Migration first, then pull + await migrateToServer(); + if (!cancelled) { + await pullFromServer(); + } + } + } catch { + // Server unreachable — stay in local mode + } + } + + init(); + return () => { + cancelled = true; + }; + }, [migrateToServer, pullFromServer]); + + // ── Listen for storage mutations ── + useEffect(() => { + if (!isServerMode) return; + + function handleStorageChange(event: Event) { + const detail = (event as CustomEvent).detail; + if (detail?.collection) { + schedulePush(detail.collection); + } + } + + window.addEventListener('libredb-storage-change', handleStorageChange); + return () => { + window.removeEventListener('libredb-storage-change', handleStorageChange); + if (debounceTimerRef.current) { + clearTimeout(debounceTimerRef.current); + } + }; + }, [isServerMode, schedulePush]); + + return { isServerMode, isSyncing, lastSyncedAt, syncError }; +} + +// ── Helpers ── + +/** Read a collection's current data from the storage facade */ +function getCollectionData(collection: string): unknown { + switch (collection) { + case 'connections': return storage.getConnections(); + case 'history': return storage.getHistory(); + case 'saved_queries': return storage.getSavedQueries(); + case 'schema_snapshots': return storage.getSchemaSnapshots(); + case 'saved_charts': return storage.getSavedCharts(); + case 'active_connection_id': return storage.getActiveConnectionId(); + case 'audit_log': return storage.getAuditLog(); + case 'masking_config': return storage.getMaskingConfig(); + case 'threshold_config': return storage.getThresholdConfig(); + default: return null; + } +} + +/** Write server data directly to localStorage via storage key */ +function writeCollectionToLocal(collection: string, data: unknown): void { + const key = `libredb_${collection}`; + if (data === null || data === undefined) { + localStorage.removeItem(key); + } else if (typeof data === 'string') { + localStorage.setItem(key, data); + } else { + localStorage.setItem(key, JSON.stringify(data)); + } +} diff --git a/src/lib/audit.ts b/src/lib/audit.ts index 0174b85..99699ee 100644 --- a/src/lib/audit.ts +++ b/src/lib/audit.ts @@ -1,3 +1,5 @@ +import { storage } from '@/lib/storage'; + export type AuditEventType = | 'maintenance' | 'kill_session' @@ -19,7 +21,6 @@ export interface AuditEvent { details?: string; } -const AUDIT_STORAGE_KEY = 'libredb_audit_log'; const MAX_EVENTS = 1000; export class AuditRingBuffer { @@ -93,23 +94,11 @@ export function getServerAuditBuffer(): AuditRingBuffer { return _serverBuffer; } -// Client-side localStorage persistence +// Client-side localStorage persistence — delegates to storage module export function loadAuditFromStorage(): AuditEvent[] { - if (typeof window === 'undefined') return []; - try { - const stored = localStorage.getItem(AUDIT_STORAGE_KEY); - return stored ? JSON.parse(stored) : []; - } catch { - return []; - } + return storage.getAuditLog(); } export function saveAuditToStorage(events: AuditEvent[]) { - if (typeof window === 'undefined') return; - try { - const trimmed = events.slice(-MAX_EVENTS); - localStorage.setItem(AUDIT_STORAGE_KEY, JSON.stringify(trimmed)); - } catch { - // Storage full, ignore - } + storage.saveAuditLog(events); } diff --git a/src/lib/data-masking.ts b/src/lib/data-masking.ts index 5e53493..1973ac0 100644 --- a/src/lib/data-masking.ts +++ b/src/lib/data-masking.ts @@ -351,38 +351,16 @@ export function canReveal(role: string | undefined, config: MaskingConfig): bool // ─── Config Persistence ────────────────────────────────────────────────────── +import { storage } from '@/lib/storage'; + export const MASKING_CONFIG_KEY = 'libredb_masking_config'; export function loadMaskingConfig(): MaskingConfig { - if (typeof window === 'undefined') return DEFAULT_MASKING_CONFIG; - try { - const stored = localStorage.getItem(MASKING_CONFIG_KEY); - if (!stored) return DEFAULT_MASKING_CONFIG; - const parsed = JSON.parse(stored) as MaskingConfig; - // Merge with defaults to ensure new builtin patterns are included - const builtinIds = new Set(DEFAULT_MASKING_CONFIG.patterns.filter(p => p.isBuiltin).map(p => p.id)); - const storedIds = new Set(parsed.patterns.map(p => p.id)); - // Add any new builtins that don't exist in stored config - for (const defaultPattern of DEFAULT_MASKING_CONFIG.patterns) { - if (defaultPattern.isBuiltin && !storedIds.has(defaultPattern.id)) { - parsed.patterns.push(defaultPattern); - } - } - // Ensure roleSettings exists - if (!parsed.roleSettings) { - parsed.roleSettings = DEFAULT_MASKING_CONFIG.roleSettings; - } - // Remove stale builtin IDs that are no longer in defaults (unlikely but safe) - parsed.patterns = parsed.patterns.filter(p => !p.isBuiltin || builtinIds.has(p.id) || !p.id.startsWith('builtin-')); - return parsed; - } catch { - return DEFAULT_MASKING_CONFIG; - } + return storage.getMaskingConfig(); } export function saveMaskingConfig(config: MaskingConfig): void { - if (typeof window === 'undefined') return; - localStorage.setItem(MASKING_CONFIG_KEY, JSON.stringify(config)); + storage.saveMaskingConfig(config); } // ─── Preview Samples ───────────────────────────────────────────────────────── diff --git a/src/lib/storage.ts b/src/lib/storage.ts deleted file mode 100644 index a5fa01c..0000000 --- a/src/lib/storage.ts +++ /dev/null @@ -1,191 +0,0 @@ -import { DatabaseConnection, QueryHistoryItem, SavedQuery, SchemaSnapshot, SavedChartConfig } from './types'; - -const CONNECTIONS_KEY = 'libredb_connections'; -const HISTORY_KEY = 'libredb_history'; -const SAVED_QUERIES_KEY = 'libredb_saved_queries'; -const SCHEMA_SNAPSHOTS_KEY = 'libredb_schema_snapshots'; -const SAVED_CHARTS_KEY = 'libredb_saved_charts'; -const ACTIVE_CONNECTION_KEY = 'libredb_active_connection_id'; -const MAX_HISTORY_ITEMS = 500; -const MAX_SNAPSHOTS = 50; - -export const storage = { - // Connections - getConnections: (): DatabaseConnection[] => { - if (typeof window === 'undefined') return []; - const stored = localStorage.getItem(CONNECTIONS_KEY); - if (!stored) return []; - try { - return JSON.parse(stored).map((conn: DatabaseConnection) => ({ - ...conn, - createdAt: new Date(conn.createdAt) - })); - } catch (e) { - console.error('Failed to parse connections', e); - return []; - } - }, - - saveConnection: (connection: DatabaseConnection) => { - const connections = storage.getConnections(); - const existingIndex = connections.findIndex(c => c.id === connection.id); - - if (existingIndex > -1) { - connections[existingIndex] = connection; - } else { - connections.push(connection); - } - - localStorage.setItem(CONNECTIONS_KEY, JSON.stringify(connections)); - }, - - deleteConnection: (id: string) => { - const connections = storage.getConnections(); - const filtered = connections.filter(c => c.id !== id); - localStorage.setItem(CONNECTIONS_KEY, JSON.stringify(filtered)); - }, - - // History - getHistory: (): QueryHistoryItem[] => { - if (typeof window === 'undefined') return []; - const stored = localStorage.getItem(HISTORY_KEY); - if (!stored) return []; - try { - return JSON.parse(stored).map((item: QueryHistoryItem) => ({ - ...item, - executedAt: new Date(item.executedAt) - })); - } catch (e) { - console.error('Failed to parse history', e); - return []; - } - }, - - addToHistory: (item: QueryHistoryItem) => { - const history = storage.getHistory(); - const newHistory = [item, ...history].slice(0, MAX_HISTORY_ITEMS); - localStorage.setItem(HISTORY_KEY, JSON.stringify(newHistory)); - }, - - clearHistory: () => { - localStorage.setItem(HISTORY_KEY, JSON.stringify([])); - }, - - // Saved Queries - getSavedQueries: (): SavedQuery[] => { - if (typeof window === 'undefined') return []; - const stored = localStorage.getItem(SAVED_QUERIES_KEY); - if (!stored) return []; - try { - return JSON.parse(stored).map((q: SavedQuery) => ({ - ...q, - createdAt: new Date(q.createdAt), - updatedAt: new Date(q.updatedAt) - })); - } catch (e) { - console.error('Failed to parse saved queries', e); - return []; - } - }, - - saveQuery: (query: SavedQuery) => { - const queries = storage.getSavedQueries(); - const existingIndex = queries.findIndex(q => q.id === query.id); - - if (existingIndex > -1) { - queries[existingIndex] = { ...query, updatedAt: new Date() }; - } else { - queries.push({ ...query, createdAt: new Date(), updatedAt: new Date() }); - } - - localStorage.setItem(SAVED_QUERIES_KEY, JSON.stringify(queries)); - }, - - deleteSavedQuery: (id: string) => { - const queries = storage.getSavedQueries(); - const filtered = queries.filter(q => q.id !== id); - localStorage.setItem(SAVED_QUERIES_KEY, JSON.stringify(filtered)); - }, - - // Schema Snapshots - getSchemaSnapshots: (connectionId?: string): SchemaSnapshot[] => { - if (typeof window === 'undefined') return []; - const stored = localStorage.getItem(SCHEMA_SNAPSHOTS_KEY); - if (!stored) return []; - try { - const snapshots: SchemaSnapshot[] = JSON.parse(stored).map((s: SchemaSnapshot) => ({ - ...s, - createdAt: new Date(s.createdAt), - })); - if (connectionId) { - return snapshots.filter(s => s.connectionId === connectionId); - } - return snapshots; - } catch (e) { - console.error('Failed to parse schema snapshots', e); - return []; - } - }, - - saveSchemaSnapshot: (snapshot: SchemaSnapshot) => { - const snapshots = storage.getSchemaSnapshots(); - snapshots.push({ ...snapshot, createdAt: new Date() }); - // Keep only the latest MAX_SNAPSHOTS - const trimmed = snapshots.slice(-MAX_SNAPSHOTS); - localStorage.setItem(SCHEMA_SNAPSHOTS_KEY, JSON.stringify(trimmed)); - }, - - deleteSchemaSnapshot: (id: string) => { - const snapshots = storage.getSchemaSnapshots(); - const filtered = snapshots.filter(s => s.id !== id); - localStorage.setItem(SCHEMA_SNAPSHOTS_KEY, JSON.stringify(filtered)); - }, - - // Saved Charts - getSavedCharts: (): SavedChartConfig[] => { - if (typeof window === 'undefined') return []; - const stored = localStorage.getItem(SAVED_CHARTS_KEY); - if (!stored) return []; - try { - return JSON.parse(stored).map((c: SavedChartConfig) => ({ - ...c, - createdAt: new Date(c.createdAt), - })); - } catch (e) { - console.error('Failed to parse saved charts', e); - return []; - } - }, - - saveChart: (chart: SavedChartConfig) => { - const charts = storage.getSavedCharts(); - const existingIndex = charts.findIndex(c => c.id === chart.id); - if (existingIndex > -1) { - charts[existingIndex] = chart; - } else { - charts.push({ ...chart, createdAt: new Date() }); - } - localStorage.setItem(SAVED_CHARTS_KEY, JSON.stringify(charts)); - }, - - deleteChart: (id: string) => { - const charts = storage.getSavedCharts(); - const filtered = charts.filter(c => c.id !== id); - localStorage.setItem(SAVED_CHARTS_KEY, JSON.stringify(filtered)); - }, - - // Active Connection ID - getActiveConnectionId: (): string | null => { - if (typeof window === 'undefined') return null; - return localStorage.getItem(ACTIVE_CONNECTION_KEY); - }, - - setActiveConnectionId: (id: string | null) => { - if (typeof window === 'undefined') return; - if (id) { - localStorage.setItem(ACTIVE_CONNECTION_KEY, id); - } else { - localStorage.removeItem(ACTIVE_CONNECTION_KEY); - } - }, -}; diff --git a/src/lib/storage/factory.ts b/src/lib/storage/factory.ts new file mode 100644 index 0000000..1e2495c --- /dev/null +++ b/src/lib/storage/factory.ts @@ -0,0 +1,84 @@ +/** + * Storage Provider Factory + * Creates the appropriate server storage provider based on STORAGE_PROVIDER env var. + * Uses singleton pattern — one provider instance per process. + */ + +import type { ServerStorageProvider, StorageConfigResponse } from './types'; + +let _provider: ServerStorageProvider | null = null; +let _initialized = false; + +export type StorageProviderType = 'local' | 'sqlite' | 'postgres'; + +/** + * Get the configured storage provider type from environment. + * Returns 'local' if not set or invalid. + */ +export function getStorageProviderType(): StorageProviderType { + const env = process.env.STORAGE_PROVIDER?.toLowerCase(); + if (env === 'sqlite' || env === 'postgres') return env; + return 'local'; +} + +/** + * Check if server-side storage is enabled. + */ +export function isServerStorageEnabled(): boolean { + return getStorageProviderType() !== 'local'; +} + +/** + * Get the storage configuration for the /api/storage/config endpoint. + */ +export function getStorageConfig(): StorageConfigResponse { + const provider = getStorageProviderType(); + return { + provider, + serverMode: provider !== 'local', + }; +} + +/** + * Get or create the singleton server storage provider. + * Returns null if STORAGE_PROVIDER is 'local' or not set. + * The provider is automatically initialized on first call. + */ +export async function getStorageProvider(): Promise { + const providerType = getStorageProviderType(); + + if (providerType === 'local') return null; + + if (_provider && _initialized) return _provider; + + switch (providerType) { + case 'sqlite': { + const { SQLiteStorageProvider } = await import('./providers/sqlite'); + _provider = new SQLiteStorageProvider(); + break; + } + case 'postgres': { + const { PostgresStorageProvider } = await import('./providers/postgres'); + _provider = new PostgresStorageProvider(); + break; + } + } + + if (_provider && !_initialized) { + await _provider.initialize(); + _initialized = true; + } + + return _provider; +} + +/** + * Close and reset the singleton provider. Used for testing/cleanup. + */ +export async function closeStorageProvider(): Promise { + if (_provider) { + await _provider.close(); + _provider = null; + _initialized = false; + } +} diff --git a/src/lib/storage/index.ts b/src/lib/storage/index.ts new file mode 100644 index 0000000..f155bc9 --- /dev/null +++ b/src/lib/storage/index.ts @@ -0,0 +1,14 @@ +/** + * Storage module — barrel export. + * Import path `@/lib/storage` is preserved for all consumers. + */ + +export { storage } from './storage-facade'; +export type { + StorageData, + StorageCollection, + ServerStorageProvider, + StorageConfigResponse, + StorageChangeDetail, +} from './types'; +export { STORAGE_COLLECTIONS } from './types'; diff --git a/src/lib/storage/local-storage.ts b/src/lib/storage/local-storage.ts new file mode 100644 index 0000000..0b6d7e0 --- /dev/null +++ b/src/lib/storage/local-storage.ts @@ -0,0 +1,76 @@ +/** + * Pure localStorage CRUD operations. + * All reads/writes go through these functions. + * No event dispatching — that's the facade's responsibility. + */ + +const KEY_PREFIX = 'libredb_'; + +/** Map collection names to localStorage keys */ +const COLLECTION_KEYS: Record = { + connections: `${KEY_PREFIX}connections`, + history: `${KEY_PREFIX}history`, + saved_queries: `${KEY_PREFIX}saved_queries`, + schema_snapshots: `${KEY_PREFIX}schema_snapshots`, + saved_charts: `${KEY_PREFIX}saved_charts`, + active_connection_id: `${KEY_PREFIX}active_connection_id`, + audit_log: `${KEY_PREFIX}audit_log`, + masking_config: `${KEY_PREFIX}masking_config`, + threshold_config: `${KEY_PREFIX}threshold_config`, +}; + +function isClient(): boolean { + return typeof window !== 'undefined'; +} + +export function getKey(collection: string): string { + return COLLECTION_KEYS[collection] || `${KEY_PREFIX}${collection}`; +} + +/** + * Read raw JSON from localStorage. + * Returns null if not found or parse fails. + */ +export function readJSON(collection: string): T | null { + if (!isClient()) return null; + try { + const key = getKey(collection); + const raw = localStorage.getItem(key); + if (raw === null) return null; + return JSON.parse(raw) as T; + } catch { + return null; + } +} + +/** + * Read raw string from localStorage. + */ +export function readString(collection: string): string | null { + if (!isClient()) return null; + return localStorage.getItem(getKey(collection)); +} + +/** + * Write JSON to localStorage. + */ +export function writeJSON(collection: string, data: unknown): void { + if (!isClient()) return; + localStorage.setItem(getKey(collection), JSON.stringify(data)); +} + +/** + * Write raw string to localStorage. + */ +export function writeString(collection: string, value: string): void { + if (!isClient()) return; + localStorage.setItem(getKey(collection), value); +} + +/** + * Remove a key from localStorage. + */ +export function remove(collection: string): void { + if (!isClient()) return; + localStorage.removeItem(getKey(collection)); +} diff --git a/src/lib/storage/providers/postgres.ts b/src/lib/storage/providers/postgres.ts new file mode 100644 index 0000000..166f0e4 --- /dev/null +++ b/src/lib/storage/providers/postgres.ts @@ -0,0 +1,153 @@ +/** + * PostgreSQL Server Storage Provider + * Uses the existing `pg` package (already a project dependency). + */ + +import type { ServerStorageProvider, StorageCollection, StorageData } from '../types'; +import { STORAGE_COLLECTIONS } from '../types'; + +let Pool: typeof import('pg').Pool; + +export class PostgresStorageProvider implements ServerStorageProvider { + private pool: InstanceType | null = null; + private connectionString: string; + + constructor(connectionString?: string) { + this.connectionString = + connectionString || process.env.STORAGE_POSTGRES_URL || ''; + } + + async initialize(): Promise { + if (!this.connectionString) { + throw new Error( + 'STORAGE_POSTGRES_URL is required when STORAGE_PROVIDER=postgres' + ); + } + + // Dynamic import to avoid requiring pg when not needed + if (!Pool) { + const pg = await import('pg'); + Pool = pg.Pool; + } + + this.pool = new Pool({ + connectionString: this.connectionString, + max: 5, + idleTimeoutMillis: 30000, + }); + + // Create table + await this.pool.query(` + CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (user_id, collection) + ) + `); + } + + async getAllData(userId: string): Promise> { + this.ensurePool(); + const { rows } = await this.pool!.query( + 'SELECT collection, data FROM user_storage WHERE user_id = $1', + [userId] + ); + + const result: Partial = {}; + for (const row of rows) { + try { + (result as Record)[row.collection] = JSON.parse( + row.data + ); + } catch { + // Skip corrupted data + } + } + return result; + } + + async getCollection( + userId: string, + collection: K + ): Promise { + this.ensurePool(); + const { rows } = await this.pool!.query( + 'SELECT data FROM user_storage WHERE user_id = $1 AND collection = $2', + [userId, collection] + ); + if (rows.length === 0) return null; + try { + return JSON.parse(rows[0].data) as StorageData[K]; + } catch { + return null; + } + } + + async setCollection( + userId: string, + collection: K, + data: StorageData[K] + ): Promise { + this.ensurePool(); + await this.pool!.query( + `INSERT INTO user_storage (user_id, collection, data, updated_at) + VALUES ($1, $2, $3, NOW()) + ON CONFLICT (user_id, collection) + DO UPDATE SET data = EXCLUDED.data, updated_at = NOW()`, + [userId, collection, JSON.stringify(data)] + ); + } + + async mergeData(userId: string, data: Partial): Promise { + this.ensurePool(); + const client = await this.pool!.connect(); + try { + await client.query('BEGIN'); + for (const collection of STORAGE_COLLECTIONS) { + const collectionData = (data as Record)[collection]; + if (collectionData !== undefined) { + await client.query( + `INSERT INTO user_storage (user_id, collection, data, updated_at) + VALUES ($1, $2, $3, NOW()) + ON CONFLICT (user_id, collection) + DO UPDATE SET data = EXCLUDED.data, updated_at = NOW()`, + [userId, collection, JSON.stringify(collectionData)] + ); + } + } + await client.query('COMMIT'); + } catch (err) { + await client.query('ROLLBACK'); + throw err; + } finally { + client.release(); + } + } + + async isHealthy(): Promise { + try { + this.ensurePool(); + const { rows } = await this.pool!.query('SELECT 1 as ok'); + return rows[0]?.ok === 1; + } catch { + return false; + } + } + + async close(): Promise { + if (this.pool) { + await this.pool.end(); + this.pool = null; + } + } + + private ensurePool(): void { + if (!this.pool) { + throw new Error( + 'PostgreSQL storage not initialized. Call initialize() first.' + ); + } + } +} diff --git a/src/lib/storage/providers/sqlite.ts b/src/lib/storage/providers/sqlite.ts new file mode 100644 index 0000000..af7ee85 --- /dev/null +++ b/src/lib/storage/providers/sqlite.ts @@ -0,0 +1,146 @@ +/** + * SQLite Server Storage Provider + * Uses better-sqlite3 (Node.js compatible, works in production runner). + * WAL mode enabled for concurrent read performance. + */ + +import type { ServerStorageProvider, StorageCollection, StorageData } from '../types'; +import { STORAGE_COLLECTIONS } from '../types'; +import type BetterSqlite3 from 'better-sqlite3'; + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +let Database: any; + +export class SQLiteStorageProvider implements ServerStorageProvider { + private db: BetterSqlite3.Database | null = null; + private dbPath: string; + + constructor(dbPath?: string) { + this.dbPath = dbPath || process.env.STORAGE_SQLITE_PATH || './data/libredb-storage.db'; + } + + async initialize(): Promise { + // Dynamic import to avoid requiring better-sqlite3 when not needed + if (!Database) { + const mod = await import('better-sqlite3'); + Database = mod.default; + } + + // Ensure directory exists + const path = await import('path'); + const fs = await import('fs'); + const dir = path.dirname(this.dbPath); + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }); + } + + this.db = new Database(this.dbPath) as BetterSqlite3.Database; + + // Enable WAL mode for better concurrent read performance + this.db!.pragma('journal_mode = WAL'); + + // Create table + this.db!.exec(` + CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TEXT NOT NULL DEFAULT (datetime('now')), + PRIMARY KEY (user_id, collection) + ) + `); + } + + async getAllData(userId: string): Promise> { + this.ensureDb(); + const stmt = this.db!.prepare( + 'SELECT collection, data FROM user_storage WHERE user_id = ?' + ); + const rows = stmt.all(userId) as { collection: string; data: string }[]; + + const result: Partial = {}; + for (const row of rows) { + try { + (result as Record)[row.collection] = JSON.parse(row.data); + } catch { + // Skip corrupted data + } + } + return result; + } + + async getCollection( + userId: string, + collection: K + ): Promise { + this.ensureDb(); + const stmt = this.db!.prepare( + 'SELECT data FROM user_storage WHERE user_id = ? AND collection = ?' + ); + const row = stmt.get(userId, collection) as { data: string } | undefined; + if (!row) return null; + try { + return JSON.parse(row.data) as StorageData[K]; + } catch { + return null; + } + } + + async setCollection( + userId: string, + collection: K, + data: StorageData[K] + ): Promise { + this.ensureDb(); + const stmt = this.db!.prepare(` + INSERT INTO user_storage (user_id, collection, data, updated_at) + VALUES (?, ?, ?, datetime('now')) + ON CONFLICT (user_id, collection) + DO UPDATE SET data = excluded.data, updated_at = excluded.updated_at + `); + stmt.run(userId, collection, JSON.stringify(data)); + } + + async mergeData(userId: string, data: Partial): Promise { + this.ensureDb(); + const stmt = this.db!.prepare(` + INSERT INTO user_storage (user_id, collection, data, updated_at) + VALUES (?, ?, ?, datetime('now')) + ON CONFLICT (user_id, collection) + DO UPDATE SET data = excluded.data, updated_at = excluded.updated_at + `); + + const tx = this.db!.transaction(() => { + for (const collection of STORAGE_COLLECTIONS) { + const collectionData = (data as Record)[collection]; + if (collectionData !== undefined) { + stmt.run(userId, collection, JSON.stringify(collectionData)); + } + } + }); + tx(); + } + + async isHealthy(): Promise { + try { + this.ensureDb(); + const result = this.db!.prepare('SELECT 1 as ok').get() as { ok: number }; + return result?.ok === 1; + } catch { + return false; + } + } + + async close(): Promise { + if (this.db) { + this.db.close(); + this.db = null; + } + } + + private ensureDb(): void { + if (!this.db) { + throw new Error('SQLite storage not initialized. Call initialize() first.'); + } + } +} diff --git a/src/lib/storage/storage-facade.ts b/src/lib/storage/storage-facade.ts new file mode 100644 index 0000000..f18d22b --- /dev/null +++ b/src/lib/storage/storage-facade.ts @@ -0,0 +1,272 @@ +/** + * Storage Facade — public API for all storage operations. + * Maintains the same sync interface as the original storage.ts. + * Dispatches CustomEvent on every mutation for the sync hook. + */ + +import { + DatabaseConnection, + QueryHistoryItem, + SavedQuery, + SchemaSnapshot, + SavedChartConfig, +} from '../types'; +import { type AuditEvent } from '../audit'; +import { DEFAULT_MASKING_CONFIG, type MaskingConfig } from '../data-masking'; +import { DEFAULT_THRESHOLDS, type ThresholdConfig } from '../monitoring-thresholds'; +import { readJSON, writeJSON, readString, writeString, remove } from './local-storage'; +import type { StorageCollection } from './types'; + +const MAX_HISTORY_ITEMS = 500; +const MAX_SNAPSHOTS = 50; +const MAX_AUDIT_EVENTS = 1000; + +/** Dispatch a custom event to notify the sync hook of a mutation */ +function dispatchChange(collection: StorageCollection, data: unknown): void { + if (typeof window === 'undefined') return; + window.dispatchEvent( + new CustomEvent('libredb-storage-change', { + detail: { collection, data }, + }) + ); +} + +/** Revive Date fields from JSON-parsed objects */ +function reviveDates(items: T[], ...dateFields: string[]): T[] { + return items.map((item) => { + const revived = { ...item } as Record; + for (const field of dateFields) { + if (revived[field]) { + revived[field] = new Date(revived[field] as string); + } + } + return revived as unknown as T; + }); +} + +export const storage = { + // ═══════════════════════════════════════════════════════════════════════════ + // Connections + // ═══════════════════════════════════════════════════════════════════════════ + + getConnections: (): DatabaseConnection[] => { + const data = readJSON('connections'); + if (!data) return []; + return reviveDates(data, 'createdAt'); + }, + + saveConnection: (connection: DatabaseConnection) => { + const connections = storage.getConnections(); + const existingIndex = connections.findIndex((c) => c.id === connection.id); + + if (existingIndex > -1) { + connections[existingIndex] = connection; + } else { + connections.push(connection); + } + + writeJSON('connections', connections); + dispatchChange('connections', connections); + }, + + deleteConnection: (id: string) => { + const connections = storage.getConnections(); + const filtered = connections.filter((c) => c.id !== id); + writeJSON('connections', filtered); + dispatchChange('connections', filtered); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // History + // ═══════════════════════════════════════════════════════════════════════════ + + getHistory: (): QueryHistoryItem[] => { + const data = readJSON('history'); + if (!data) return []; + return reviveDates(data, 'executedAt'); + }, + + addToHistory: (item: QueryHistoryItem) => { + const history = storage.getHistory(); + const newHistory = [item, ...history].slice(0, MAX_HISTORY_ITEMS); + writeJSON('history', newHistory); + dispatchChange('history', newHistory); + }, + + clearHistory: () => { + writeJSON('history', []); + dispatchChange('history', []); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Saved Queries + // ═══════════════════════════════════════════════════════════════════════════ + + getSavedQueries: (): SavedQuery[] => { + const data = readJSON('saved_queries'); + if (!data) return []; + return reviveDates(data, 'createdAt', 'updatedAt'); + }, + + saveQuery: (query: SavedQuery) => { + const queries = storage.getSavedQueries(); + const existingIndex = queries.findIndex((q) => q.id === query.id); + + if (existingIndex > -1) { + queries[existingIndex] = { ...query, updatedAt: new Date() }; + } else { + queries.push({ ...query, createdAt: new Date(), updatedAt: new Date() }); + } + + writeJSON('saved_queries', queries); + dispatchChange('saved_queries', queries); + }, + + deleteSavedQuery: (id: string) => { + const queries = storage.getSavedQueries(); + const filtered = queries.filter((q) => q.id !== id); + writeJSON('saved_queries', filtered); + dispatchChange('saved_queries', filtered); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Schema Snapshots + // ═══════════════════════════════════════════════════════════════════════════ + + getSchemaSnapshots: (connectionId?: string): SchemaSnapshot[] => { + const data = readJSON('schema_snapshots'); + if (!data) return []; + const snapshots = reviveDates(data, 'createdAt'); + if (connectionId) { + return snapshots.filter((s) => s.connectionId === connectionId); + } + return snapshots; + }, + + saveSchemaSnapshot: (snapshot: SchemaSnapshot) => { + const snapshots = storage.getSchemaSnapshots(); + snapshots.push({ ...snapshot, createdAt: new Date() }); + const trimmed = snapshots.slice(-MAX_SNAPSHOTS); + writeJSON('schema_snapshots', trimmed); + dispatchChange('schema_snapshots', trimmed); + }, + + deleteSchemaSnapshot: (id: string) => { + const snapshots = storage.getSchemaSnapshots(); + const filtered = snapshots.filter((s) => s.id !== id); + writeJSON('schema_snapshots', filtered); + dispatchChange('schema_snapshots', filtered); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Saved Charts + // ═══════════════════════════════════════════════════════════════════════════ + + getSavedCharts: (): SavedChartConfig[] => { + const data = readJSON('saved_charts'); + if (!data) return []; + return reviveDates(data, 'createdAt'); + }, + + saveChart: (chart: SavedChartConfig) => { + const charts = storage.getSavedCharts(); + const existingIndex = charts.findIndex((c) => c.id === chart.id); + if (existingIndex > -1) { + charts[existingIndex] = chart; + } else { + charts.push({ ...chart, createdAt: new Date() }); + } + writeJSON('saved_charts', charts); + dispatchChange('saved_charts', charts); + }, + + deleteChart: (id: string) => { + const charts = storage.getSavedCharts(); + const filtered = charts.filter((c) => c.id !== id); + writeJSON('saved_charts', filtered); + dispatchChange('saved_charts', filtered); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Active Connection ID + // ═══════════════════════════════════════════════════════════════════════════ + + getActiveConnectionId: (): string | null => { + return readString('active_connection_id'); + }, + + setActiveConnectionId: (id: string | null) => { + if (typeof window === 'undefined') return; + if (id) { + writeString('active_connection_id', id); + } else { + remove('active_connection_id'); + } + dispatchChange('active_connection_id', id); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Audit Log (consolidated from audit.ts) + // ═══════════════════════════════════════════════════════════════════════════ + + getAuditLog: (): AuditEvent[] => { + const data = readJSON('audit_log'); + return data ?? []; + }, + + saveAuditLog: (events: AuditEvent[]) => { + const trimmed = events.slice(-MAX_AUDIT_EVENTS); + writeJSON('audit_log', trimmed); + dispatchChange('audit_log', trimmed); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Masking Config (consolidated from data-masking.ts) + // ═══════════════════════════════════════════════════════════════════════════ + + getMaskingConfig: (): MaskingConfig => { + const data = readJSON('masking_config'); + if (!data) return DEFAULT_MASKING_CONFIG; + + // Merge with defaults to ensure new builtin patterns are included + const builtinIds = new Set( + DEFAULT_MASKING_CONFIG.patterns.filter((p) => p.isBuiltin).map((p) => p.id) + ); + const storedIds = new Set(data.patterns.map((p) => p.id)); + + for (const defaultPattern of DEFAULT_MASKING_CONFIG.patterns) { + if (defaultPattern.isBuiltin && !storedIds.has(defaultPattern.id)) { + data.patterns.push(defaultPattern); + } + } + + if (!data.roleSettings) { + data.roleSettings = DEFAULT_MASKING_CONFIG.roleSettings; + } + + data.patterns = data.patterns.filter( + (p) => !p.isBuiltin || builtinIds.has(p.id) || !p.id.startsWith('builtin-') + ); + + return data; + }, + + saveMaskingConfig: (config: MaskingConfig) => { + writeJSON('masking_config', config); + dispatchChange('masking_config', config); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Threshold Config (consolidated from SecurityTab.tsx) + // ═══════════════════════════════════════════════════════════════════════════ + + getThresholdConfig: (): ThresholdConfig[] => { + const data = readJSON('threshold_config'); + return data ?? DEFAULT_THRESHOLDS; + }, + + saveThresholdConfig: (thresholds: ThresholdConfig[]) => { + writeJSON('threshold_config', thresholds); + dispatchChange('threshold_config', thresholds); + }, +}; diff --git a/src/lib/storage/types.ts b/src/lib/storage/types.ts new file mode 100644 index 0000000..9598234 --- /dev/null +++ b/src/lib/storage/types.ts @@ -0,0 +1,75 @@ +import type { + DatabaseConnection, + QueryHistoryItem, + SavedQuery, + SchemaSnapshot, + SavedChartConfig, +} from '../types'; +import type { AuditEvent } from '../audit'; +import type { MaskingConfig } from '../data-masking'; +import type { ThresholdConfig } from '../monitoring-thresholds'; + +/** + * All persistable collections and their data types. + * Maps 1:1 with localStorage keys (minus the `libredb_` prefix). + */ +export interface StorageData { + connections: DatabaseConnection[]; + history: QueryHistoryItem[]; + saved_queries: SavedQuery[]; + schema_snapshots: SchemaSnapshot[]; + saved_charts: SavedChartConfig[]; + active_connection_id: string | null; + audit_log: AuditEvent[]; + masking_config: MaskingConfig; + threshold_config: ThresholdConfig[]; +} + +/** Collection names that can be synced to server storage */ +export type StorageCollection = keyof StorageData; + +/** All persistable collection names */ +export const STORAGE_COLLECTIONS: StorageCollection[] = [ + 'connections', + 'history', + 'saved_queries', + 'schema_snapshots', + 'saved_charts', + 'active_connection_id', + 'audit_log', + 'masking_config', + 'threshold_config', +]; + +/** + * Server-side storage provider interface. + * Implements the Strategy Pattern — SQLite and PostgreSQL both implement this. + */ +export interface ServerStorageProvider { + /** Create tables if they don't exist */ + initialize(): Promise; + /** Get all collections for a user */ + getAllData(userId: string): Promise>; + /** Get a single collection for a user */ + getCollection(userId: string, collection: K): Promise; + /** Set a single collection for a user */ + setCollection(userId: string, collection: K, data: StorageData[K]): Promise; + /** Merge multiple collections (used for migration) */ + mergeData(userId: string, data: Partial): Promise; + /** Health check */ + isHealthy(): Promise; + /** Cleanup resources */ + close(): Promise; +} + +/** Storage config returned by /api/storage/config */ +export interface StorageConfigResponse { + provider: 'local' | 'sqlite' | 'postgres'; + serverMode: boolean; +} + +/** Event dispatched on storage mutations */ +export interface StorageChangeDetail { + collection: StorageCollection; + data: unknown; +} diff --git a/src/proxy.ts b/src/proxy.ts index 3803718..ae4d11e 100644 --- a/src/proxy.ts +++ b/src/proxy.ts @@ -59,7 +59,9 @@ export async function proxy(request: NextRequest) { // Health check endpoint for load balancers (Render, K8s, etc.) pathname === '/api/db/health' || // Demo connection endpoint (public for initial load) - pathname === '/api/demo-connection' + pathname === '/api/demo-connection' || + // Storage config endpoint (public, returns only mode info) + pathname === '/api/storage/config' ) { return NextResponse.next(); } @@ -90,10 +92,11 @@ export const config = { * - api/auth (auth endpoints) * - api/db/health (health check for load balancers) * - api/demo-connection (demo database connection - public) + * - api/storage/config (storage mode discovery - public) * - _next/static (static files) * - _next/image (image optimization files) * - favicon.ico (favicon file) */ - '/((?!api/auth|api/db/health|api/demo-connection|_next/static|_next/image|favicon.ico).*)', + '/((?!api/auth|api/db/health|api/demo-connection|api/storage/config|_next/static|_next/image|favicon.ico).*)', ], }; diff --git a/tests/api/storage/config.test.ts b/tests/api/storage/config.test.ts new file mode 100644 index 0000000..9a96365 --- /dev/null +++ b/tests/api/storage/config.test.ts @@ -0,0 +1,42 @@ +import { describe, test, expect, beforeEach, afterEach } from 'bun:test'; +import { GET } from '@/app/api/storage/config/route'; + +describe('GET /api/storage/config', () => { + const originalEnv = process.env.STORAGE_PROVIDER; + + beforeEach(() => { + delete process.env.STORAGE_PROVIDER; + }); + + afterEach(() => { + if (originalEnv === undefined) { + delete process.env.STORAGE_PROVIDER; + } else { + process.env.STORAGE_PROVIDER = originalEnv; + } + }); + + test('returns local config by default', async () => { + const res = await GET(); + expect(res.status).toBe(200); + const json = await res.json(); + expect(json.provider).toBe('local'); + expect(json.serverMode).toBe(false); + }); + + test('returns sqlite config when STORAGE_PROVIDER=sqlite', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + const res = await GET(); + const json = await res.json(); + expect(json.provider).toBe('sqlite'); + expect(json.serverMode).toBe(true); + }); + + test('returns postgres config when STORAGE_PROVIDER=postgres', async () => { + process.env.STORAGE_PROVIDER = 'postgres'; + const res = await GET(); + const json = await res.json(); + expect(json.provider).toBe('postgres'); + expect(json.serverMode).toBe(true); + }); +}); diff --git a/tests/api/storage/storage-routes.test.ts b/tests/api/storage/storage-routes.test.ts new file mode 100644 index 0000000..8da2930 --- /dev/null +++ b/tests/api/storage/storage-routes.test.ts @@ -0,0 +1,159 @@ +import { describe, test, expect, mock, beforeEach } from 'bun:test'; +import { NextRequest } from 'next/server'; + +// ── Mock auth ──────────────────────────────────────────────────────────────── + +let mockSession: { username: string; role: string } | null = { username: 'admin@test.com', role: 'admin' }; + +mock.module('@/lib/auth', () => ({ + getSession: async () => mockSession, +})); + +// ── Mock storage factory ───────────────────────────────────────────────────── + +const mockProvider = { + getAllData: mock(async () => ({ + connections: [{ id: 'c1' }], + })), + getCollection: mock(async () => [{ id: 'c1' }]), + setCollection: mock(async () => {}), + mergeData: mock(async () => {}), +}; + +let providerEnabled = true; + +mock.module('@/lib/storage/factory', () => ({ + getStorageProvider: async () => (providerEnabled ? mockProvider : null), +})); + +mock.module('@/lib/storage/types', () => ({ + STORAGE_COLLECTIONS: [ + 'connections', 'history', 'saved_queries', 'schema_snapshots', + 'saved_charts', 'active_connection_id', 'audit_log', + 'masking_config', 'threshold_config', + ], +})); + +// ── Import routes ──────────────────────────────────────────────────────────── + +import { GET } from '@/app/api/storage/route'; +import { PUT } from '@/app/api/storage/[collection]/route'; +import { POST } from '@/app/api/storage/migrate/route'; + +// ── Tests ──────────────────────────────────────────────────────────────────── + +describe('GET /api/storage', () => { + beforeEach(() => { + mockSession = { username: 'admin@test.com', role: 'admin' }; + providerEnabled = true; + mockProvider.getAllData.mockClear(); + }); + + test('returns 404 when storage not enabled', async () => { + providerEnabled = false; + const res = await GET(); + expect(res.status).toBe(404); + }); + + test('returns 401 when not authenticated', async () => { + mockSession = null; + const res = await GET(); + expect(res.status).toBe(401); + }); + + test('returns user data on success', async () => { + const res = await GET(); + expect(res.status).toBe(200); + const json = await res.json(); + expect(json.connections).toEqual([{ id: 'c1' }]); + expect(mockProvider.getAllData).toHaveBeenCalledWith('admin@test.com'); + }); +}); + +describe('PUT /api/storage/[collection]', () => { + beforeEach(() => { + mockSession = { username: 'admin@test.com', role: 'admin' }; + providerEnabled = true; + mockProvider.setCollection.mockClear(); + }); + + function makeRequest(collection: string, data: unknown) { + return PUT( + new NextRequest(`http://localhost/api/storage/${collection}`, { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data }), + }), + { params: Promise.resolve({ collection }) } + ); + } + + test('returns 404 when storage not enabled', async () => { + providerEnabled = false; + const res = await makeRequest('connections', []); + expect(res.status).toBe(404); + }); + + test('returns 401 when not authenticated', async () => { + mockSession = null; + const res = await makeRequest('connections', []); + expect(res.status).toBe(401); + }); + + test('returns 400 for invalid collection', async () => { + const res = await makeRequest('invalid_collection', []); + expect(res.status).toBe(400); + }); + + test('updates collection on success', async () => { + const data = [{ id: 'c1', name: 'New DB' }]; + const res = await makeRequest('connections', data); + expect(res.status).toBe(200); + expect(mockProvider.setCollection).toHaveBeenCalledWith( + 'admin@test.com', + 'connections', + data + ); + }); +}); + +describe('POST /api/storage/migrate', () => { + beforeEach(() => { + mockSession = { username: 'admin@test.com', role: 'admin' }; + providerEnabled = true; + mockProvider.mergeData.mockClear(); + }); + + function makeMigrateRequest(data: Record) { + return POST( + new NextRequest('http://localhost/api/storage/migrate', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(data), + }) + ); + } + + test('returns 404 when storage not enabled', async () => { + providerEnabled = false; + const res = await makeMigrateRequest({}); + expect(res.status).toBe(404); + }); + + test('returns 401 when not authenticated', async () => { + mockSession = null; + const res = await makeMigrateRequest({}); + expect(res.status).toBe(401); + }); + + test('merges data on success', async () => { + const data = { connections: [{ id: 'c1' }], history: [] }; + const res = await makeMigrateRequest(data); + expect(res.status).toBe(200); + const json = await res.json(); + expect(json.ok).toBe(true); + expect(json.migrated).toContain('connections'); + expect(json.migrated).toContain('history'); + expect(mockProvider.mergeData).toHaveBeenCalledWith('admin@test.com', data); + }); +}); diff --git a/tests/components/DataCharts.test.tsx b/tests/components/DataCharts.test.tsx index f50de48..ff25f98 100644 --- a/tests/components/DataCharts.test.tsx +++ b/tests/components/DataCharts.test.tsx @@ -71,6 +71,33 @@ mock.module('@/components/ui/dropdown-menu', () => ({ React.createElement('div', { role: 'menuitem', onClick: onClick as (() => void), className }, children as React.ReactNode), })); +const mockGetSavedCharts = mock(() => { + try { + const stored = localStorage.getItem('libredb_saved_charts'); + return stored ? JSON.parse(stored) : []; + } catch { return []; } +}); +const mockSaveChart = mock((chart: Record) => { + const stored = localStorage.getItem('libredb_saved_charts'); + const charts = stored ? JSON.parse(stored) : []; + charts.push(chart); + localStorage.setItem('libredb_saved_charts', JSON.stringify(charts)); +}); +const mockDeleteChart = mock((id: string) => { + const stored = localStorage.getItem('libredb_saved_charts'); + const charts = stored ? JSON.parse(stored) : []; + const filtered = charts.filter((c: Record) => c.id !== id); + localStorage.setItem('libredb_saved_charts', JSON.stringify(filtered)); +}); + +mock.module('@/lib/storage', () => ({ + storage: { + getSavedCharts: mockGetSavedCharts, + saveChart: mockSaveChart, + deleteChart: mockDeleteChart, + }, +})); + mock.module('@/components/ui/select', () => ({ Select: ({ children, value }: Record) => React.createElement('div', { 'data-testid': 'select', 'data-value': value }, children as React.ReactNode), diff --git a/tests/run-components.sh b/tests/run-components.sh index 4214351..92c373a 100755 --- a/tests/run-components.sh +++ b/tests/run-components.sh @@ -24,7 +24,7 @@ set -e PASS=0 FAIL=0 -TOTAL_GROUPS=15 +TOTAL_GROUPS=16 EXTRA_BUN_ARGS=("$@") GROUP_INDEX=0 COVERAGE_MODE=0 @@ -140,13 +140,16 @@ run_group "Group 12/13: MaskingSettings" \ run_group "Group 13/14: SchemaDiff" \ tests/components/SchemaDiff.test.tsx -# Group 15: ConnectionModal Mobile Drawer (isolated - useIsMobile returns true) -run_group "Group 15/15: ConnectionModal Mobile" \ +# Group 16: ConnectionModal Mobile Drawer (isolated - useIsMobile returns true) +run_group "Group 16/16: ConnectionModal Mobile" \ tests/components/ConnectionModal.mobile.test.tsx -# Group 14: All remaining files (safe together) -run_group "Group 14/15: Remaining components" \ - tests/components/DataCharts.test.tsx \ +# Group 14: DataCharts (isolated — mocks @/lib/storage with chart methods) +run_group "Group 14/16: DataCharts" \ + tests/components/DataCharts.test.tsx + +# Group 15: All remaining files (safe together) +run_group "Group 15/16: Remaining components" \ tests/components/QueryEditor.test.tsx \ tests/components/QuerySafetyDialog.test.tsx \ tests/components/QueryHistory.test.tsx \ diff --git a/tests/unit/lib/storage/factory.test.ts b/tests/unit/lib/storage/factory.test.ts new file mode 100644 index 0000000..6d79875 --- /dev/null +++ b/tests/unit/lib/storage/factory.test.ts @@ -0,0 +1,71 @@ +import { describe, test, expect, beforeEach } from 'bun:test'; +import { + getStorageProviderType, + isServerStorageEnabled, + getStorageConfig, +} from '@/lib/storage/factory'; + +// Clean env before every test to prevent leakage +beforeEach(() => { + delete process.env.STORAGE_PROVIDER; +}); + +describe('storage factory: getStorageProviderType', () => { + test('returns "local" when STORAGE_PROVIDER not set', () => { + expect(getStorageProviderType()).toBe('local'); + }); + + test('returns "local" for empty string', () => { + process.env.STORAGE_PROVIDER = ''; + expect(getStorageProviderType()).toBe('local'); + }); + + test('returns "sqlite" when STORAGE_PROVIDER=sqlite', () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + expect(getStorageProviderType()).toBe('sqlite'); + }); + + test('returns "postgres" when STORAGE_PROVIDER=postgres', () => { + process.env.STORAGE_PROVIDER = 'postgres'; + expect(getStorageProviderType()).toBe('postgres'); + }); + + test('returns "local" for unknown values', () => { + process.env.STORAGE_PROVIDER = 'redis'; + expect(getStorageProviderType()).toBe('local'); + }); + + test('is case-insensitive', () => { + process.env.STORAGE_PROVIDER = 'SQLite'; + expect(getStorageProviderType()).toBe('sqlite'); + }); +}); + +describe('storage factory: isServerStorageEnabled', () => { + test('returns false when local', () => { + expect(isServerStorageEnabled()).toBe(false); + }); + + test('returns true for sqlite', () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + expect(isServerStorageEnabled()).toBe(true); + }); + + test('returns true for postgres', () => { + process.env.STORAGE_PROVIDER = 'postgres'; + expect(isServerStorageEnabled()).toBe(true); + }); +}); + +describe('storage factory: getStorageConfig', () => { + test('returns correct shape for local', () => { + const config = getStorageConfig(); + expect(config).toEqual({ provider: 'local', serverMode: false }); + }); + + test('returns correct shape for sqlite', () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + const config = getStorageConfig(); + expect(config).toEqual({ provider: 'sqlite', serverMode: true }); + }); +}); diff --git a/tests/unit/lib/storage/local-storage.test.ts b/tests/unit/lib/storage/local-storage.test.ts new file mode 100644 index 0000000..c908375 --- /dev/null +++ b/tests/unit/lib/storage/local-storage.test.ts @@ -0,0 +1,70 @@ +import { describe, test, expect, beforeEach } from 'bun:test'; + +if (typeof globalThis.window === 'undefined') { + // @ts-expect-error — minimal window stub + globalThis.window = globalThis; +} + +import { readJSON, writeJSON, readString, writeString, remove, getKey } from '@/lib/storage/local-storage'; + +describe('local-storage: getKey', () => { + test('maps known collection names to libredb_ prefix keys', () => { + expect(getKey('connections')).toBe('libredb_connections'); + expect(getKey('history')).toBe('libredb_history'); + expect(getKey('saved_queries')).toBe('libredb_saved_queries'); + expect(getKey('audit_log')).toBe('libredb_audit_log'); + expect(getKey('masking_config')).toBe('libredb_masking_config'); + expect(getKey('threshold_config')).toBe('libredb_threshold_config'); + }); + + test('falls back to libredb_ prefix for unknown collections', () => { + expect(getKey('unknown')).toBe('libredb_unknown'); + }); +}); + +describe('local-storage: readJSON / writeJSON', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('writeJSON / readJSON round-trip', () => { + writeJSON('connections', [{ id: 1 }]); + expect(readJSON<{ id: number }[]>('connections')).toEqual([{ id: 1 }]); + }); + + test('readJSON returns null for non-existent key', () => { + expect(readJSON('nonexistent')).toBeNull(); + }); + + test('readJSON returns null for invalid JSON', () => { + localStorage.setItem('libredb_connections', 'not-json{{{'); + expect(readJSON('connections')).toBeNull(); + }); +}); + +describe('local-storage: readString / writeString', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('writeString / readString round-trip', () => { + writeString('active_connection_id', 'conn-42'); + expect(readString('active_connection_id')).toBe('conn-42'); + }); + + test('readString returns null for non-existent key', () => { + expect(readString('active_connection_id')).toBeNull(); + }); +}); + +describe('local-storage: remove', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('remove deletes the key', () => { + writeString('active_connection_id', 'conn-42'); + remove('active_connection_id'); + expect(readString('active_connection_id')).toBeNull(); + }); +}); diff --git a/tests/unit/lib/storage/providers/postgres.test.ts b/tests/unit/lib/storage/providers/postgres.test.ts new file mode 100644 index 0000000..6f7e956 --- /dev/null +++ b/tests/unit/lib/storage/providers/postgres.test.ts @@ -0,0 +1,134 @@ +import { describe, test, expect, beforeEach, afterEach, mock } from 'bun:test'; +import type { ServerStorageProvider } from '@/lib/storage/types'; + +// ── Mock pg ────────────────────────────────────────────────────────────────── + +/* eslint-disable @typescript-eslint/no-explicit-any */ +const mockQuery = mock(async (..._args: any[]): Promise => ({ rows: [] })); +const mockRelease = mock(() => {}); +const mockEnd = mock(async () => {}); + +const mockClient = { + query: mockQuery, + release: mockRelease, +}; + +const mockPool: Record = { + query: mockQuery, + connect: mock(async () => mockClient), + end: mockEnd, +}; + +mock.module('pg', () => ({ + Pool: mock(() => mockPool), +})); +/* eslint-enable @typescript-eslint/no-explicit-any */ + +import { PostgresStorageProvider } from '@/lib/storage/providers/postgres'; + +describe('PostgresStorageProvider', () => { + let provider: ServerStorageProvider; + + beforeEach(() => { + mockQuery.mockClear(); + mockEnd.mockClear(); + mockRelease.mockClear(); + provider = new PostgresStorageProvider('postgresql://localhost:5432/test'); + }); + + afterEach(async () => { + await provider.close(); + }); + + test('initialize creates table', async () => { + await provider.initialize(); + expect(mockQuery).toHaveBeenCalledTimes(1); + const sql = (mockQuery.mock.calls as unknown[][])[0][0] as string; + expect(sql).toContain('CREATE TABLE IF NOT EXISTS user_storage'); + }); + + test('getAllData returns parsed collections', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ + rows: [ + { collection: 'connections', data: JSON.stringify([{ id: 'c1' }]) }, + { collection: 'history', data: JSON.stringify([{ id: 'h1' }]) }, + ], + }); + + const result = await provider.getAllData('admin@test.com'); + expect(result.connections as unknown).toEqual([{ id: 'c1' }]); + expect(result.history as unknown).toEqual([{ id: 'h1' }]); + }); + + test('getCollection returns null when not found', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ rows: [] }); + + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result).toBeNull(); + }); + + test('getCollection returns parsed data', async () => { + const data = [{ id: 'c1', name: 'Test' }]; + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ + rows: [{ data: JSON.stringify(data) }], + }); + + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result as unknown).toEqual(data); + }); + + test('setCollection calls INSERT with ON CONFLICT', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ rows: [] }); + + await provider.setCollection('admin@test.com', 'connections', []); + + const calls = mockQuery.mock.calls as unknown[][]; + const lastCall = calls[calls.length - 1]; + const sql = lastCall[0] as string; + expect(sql).toContain('INSERT INTO user_storage'); + expect(sql).toContain('ON CONFLICT'); + }); + + test('isHealthy returns true on success', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ rows: [{ ok: 1 }] }); + + expect(await provider.isHealthy()).toBe(true); + }); + + test('isHealthy returns false on error', async () => { + await provider.initialize(); + mockQuery.mockRejectedValueOnce(new Error('Connection lost')); + + expect(await provider.isHealthy()).toBe(false); + }); + + test('close calls pool.end()', async () => { + await provider.initialize(); + await provider.close(); + expect(mockEnd).toHaveBeenCalledTimes(1); + }); + + test('mergeData uses transaction', async () => { + await provider.initialize(); + + const mockClientQuery = mock(async (): Promise<{ rows: unknown[] }> => ({ rows: [] })); + const client = { + query: mockClientQuery, + release: mock(() => {}), + }; + mockPool.connect = mock(async () => client); + + await provider.mergeData('admin@test.com', { + connections: [{ id: 'c1', name: 'Test', type: 'postgres', createdAt: new Date() } as import('@/lib/types').DatabaseConnection], + }); + + const queries = (mockClientQuery.mock.calls as unknown[][]).map((c) => c[0] as string); + expect(queries[0]).toBe('BEGIN'); + expect(queries[queries.length - 1]).toBe('COMMIT'); + }); +}); diff --git a/tests/unit/lib/storage/providers/sqlite.test.ts b/tests/unit/lib/storage/providers/sqlite.test.ts new file mode 100644 index 0000000..ed9ba97 --- /dev/null +++ b/tests/unit/lib/storage/providers/sqlite.test.ts @@ -0,0 +1,140 @@ +import { describe, test, expect, beforeEach, afterEach, mock } from 'bun:test'; +import type { ServerStorageProvider } from '@/lib/storage/types'; + +// ── Mock better-sqlite3 ───────────────────────────────────────────────────── + +/* eslint-disable @typescript-eslint/no-explicit-any */ +const mockPrepare = mock((): any => ({ + all: mock((): any[] => []), + get: mock((): any => undefined), + run: mock((..._args: any[]) => {}), +})); +const mockExec = mock((..._args: any[]) => {}); +const mockPragma = mock((..._args: any[]) => {}); +const mockClose = mock(() => {}); + +const mockDbInstance = { + prepare: mockPrepare, + exec: mockExec, + pragma: mockPragma, + close: mockClose, + transaction: mock((fn: () => void) => fn), +}; + +mock.module('better-sqlite3', () => ({ + default: mock(() => mockDbInstance), +})); + +// Mock fs and path for directory creation +mock.module('fs', () => ({ + existsSync: mock(() => true), + mkdirSync: mock(() => {}), +})); + +mock.module('path', () => ({ + dirname: mock((p: string) => p.replace(/\/[^/]*$/, '')), +})); +/* eslint-enable @typescript-eslint/no-explicit-any */ + +import { SQLiteStorageProvider } from '@/lib/storage/providers/sqlite'; + +describe('SQLiteStorageProvider', () => { + let provider: ServerStorageProvider; + + beforeEach(() => { + mockPrepare.mockClear(); + mockExec.mockClear(); + mockPragma.mockClear(); + mockClose.mockClear(); + provider = new SQLiteStorageProvider(':memory:'); + }); + + afterEach(async () => { + await provider.close(); + }); + + test('initialize creates table and enables WAL', async () => { + await provider.initialize(); + expect(mockPragma).toHaveBeenCalledWith('journal_mode = WAL'); + expect(mockExec).toHaveBeenCalledTimes(1); + const sql = (mockExec.mock.calls as unknown[][])[0][0] as string; + expect(sql).toContain('CREATE TABLE IF NOT EXISTS user_storage'); + }); + + test('getAllData returns parsed collections', async () => { + const mockRows = [ + { collection: 'connections', data: JSON.stringify([{ id: 'c1' }]) }, + { collection: 'history', data: JSON.stringify([{ id: 'h1' }]) }, + ]; + mockPrepare.mockReturnValue({ + all: mock(() => mockRows), + get: mock(() => undefined), + run: mock(() => {}), + }); + + await provider.initialize(); + const result = await provider.getAllData('admin@test.com'); + + expect(result.connections as unknown).toEqual([{ id: 'c1' }]); + expect(result.history as unknown).toEqual([{ id: 'h1' }]); + }); + + test('getCollection returns null when not found', async () => { + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => undefined), + run: mock(() => {}), + }); + + await provider.initialize(); + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result).toBeNull(); + }); + + test('getCollection returns parsed data', async () => { + const data = [{ id: 'c1', name: 'Test' }]; + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => ({ data: JSON.stringify(data) })), + run: mock(() => {}), + }); + + await provider.initialize(); + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result as unknown).toEqual(data); + }); + + test('setCollection calls INSERT OR REPLACE', async () => { + const mockRun = mock((..._args: unknown[]) => {}); + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => undefined), + run: mockRun, + }); + + await provider.initialize(); + await provider.setCollection('admin@test.com', 'connections', []); + + expect(mockRun).toHaveBeenCalled(); + const args = (mockRun.mock.calls as unknown[][])[0]; + expect(args[0]).toBe('admin@test.com'); + expect(args[1]).toBe('connections'); + }); + + test('isHealthy returns true when db works', async () => { + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => ({ ok: 1 })), + run: mock(() => {}), + }); + + await provider.initialize(); + expect(await provider.isHealthy()).toBe(true); + }); + + test('close calls db.close()', async () => { + await provider.initialize(); + await provider.close(); + expect(mockClose).toHaveBeenCalledTimes(1); + }); +}); diff --git a/tests/unit/lib/storage/storage-facade.test.ts b/tests/unit/lib/storage/storage-facade.test.ts new file mode 100644 index 0000000..0ee240d --- /dev/null +++ b/tests/unit/lib/storage/storage-facade.test.ts @@ -0,0 +1,184 @@ +import { describe, test, expect, beforeEach, mock } from 'bun:test'; + +// Ensure `typeof window !== 'undefined'` passes +if (typeof globalThis.window === 'undefined') { + // @ts-expect-error — minimal window stub + globalThis.window = globalThis; +} + +import { storage } from '@/lib/storage'; +import type { DatabaseConnection } from '@/lib/types'; +import type { AuditEvent } from '@/lib/audit'; +import type { MaskingConfig } from '@/lib/data-masking'; +import type { ThresholdConfig } from '@/lib/monitoring-thresholds'; + +// ── Helpers ───────────────────────────────────────────────────────────────── + +function makeConnection(overrides: Partial = {}): DatabaseConnection { + return { + id: 'conn-1', + name: 'Test DB', + type: 'postgres', + host: 'localhost', + port: 5432, + createdAt: new Date('2025-01-01'), + ...overrides, + }; +} + +function makeAuditEvent(overrides: Partial = {}): AuditEvent { + return { + id: 'evt-1', + timestamp: '2025-01-01T00:00:00Z', + type: 'query_execution', + action: 'SELECT', + target: 'users', + user: 'admin', + result: 'success', + ...overrides, + }; +} + +// ── CustomEvent dispatch ───────────────────────────────────────────────────── + +describe('storage facade: CustomEvent dispatch', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('saveConnection dispatches libredb-storage-change event', () => { + let captured: CustomEvent | null = null; + const handler = (e: Event) => { captured = e as CustomEvent; }; + window.addEventListener('libredb-storage-change', handler); + + storage.saveConnection(makeConnection()); + + expect(captured).not.toBeNull(); + expect(captured!.detail.collection).toBe('connections'); + + window.removeEventListener('libredb-storage-change', handler); + }); + + test('deleteConnection dispatches event', () => { + storage.saveConnection(makeConnection()); + const handler = mock(() => {}); + window.addEventListener('libredb-storage-change', handler); + + storage.deleteConnection('conn-1'); + + expect(handler).toHaveBeenCalledTimes(1); + window.removeEventListener('libredb-storage-change', handler); + }); + + test('addToHistory dispatches event', () => { + const handler = mock(() => {}); + window.addEventListener('libredb-storage-change', handler); + + storage.addToHistory({ + id: 'h-1', + connectionId: 'c-1', + query: 'SELECT 1', + executionTime: 42, + status: 'success', + executedAt: new Date(), + }); + + expect(handler).toHaveBeenCalledTimes(1); + window.removeEventListener('libredb-storage-change', handler); + }); + + test('setActiveConnectionId dispatches event', () => { + let captured: CustomEvent | null = null; + const handler = (e: Event) => { captured = e as CustomEvent; }; + window.addEventListener('libredb-storage-change', handler); + + storage.setActiveConnectionId('conn-42'); + + expect(captured).not.toBeNull(); + expect(captured!.detail.collection).toBe('active_connection_id'); + expect(captured!.detail.data).toBe('conn-42'); + + window.removeEventListener('libredb-storage-change', handler); + }); +}); + +// ── Audit log ──────────────────────────────────────────────────────────────── + +describe('storage facade: audit log', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('getAuditLog returns empty array when nothing stored', () => { + expect(storage.getAuditLog()).toEqual([]); + }); + + test('saveAuditLog / getAuditLog round-trip', () => { + const events = [makeAuditEvent({ id: 'e1' }), makeAuditEvent({ id: 'e2' })]; + storage.saveAuditLog(events); + const result = storage.getAuditLog(); + expect(result.length).toBe(2); + expect(result[0].id).toBe('e1'); + }); + + test('saveAuditLog trims to 1000 events', () => { + const events: AuditEvent[] = []; + for (let i = 0; i < 1050; i++) { + events.push(makeAuditEvent({ id: `e-${i}` })); + } + storage.saveAuditLog(events); + expect(storage.getAuditLog().length).toBe(1000); + }); +}); + +// ── Masking config ─────────────────────────────────────────────────────────── + +describe('storage facade: masking config', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('getMaskingConfig returns defaults when nothing stored', () => { + const config = storage.getMaskingConfig(); + expect(config.enabled).toBe(true); + expect(config.patterns.length).toBeGreaterThan(0); + }); + + test('saveMaskingConfig / getMaskingConfig round-trip', () => { + const config: MaskingConfig = { + enabled: false, + patterns: [], + roleSettings: { + admin: { canToggle: true, canReveal: true }, + user: { canToggle: false, canReveal: false }, + }, + }; + storage.saveMaskingConfig(config); + const result = storage.getMaskingConfig(); + expect(result.enabled).toBe(false); + }); +}); + +// ── Threshold config ───────────────────────────────────────────────────────── + +describe('storage facade: threshold config', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('getThresholdConfig returns defaults when nothing stored', () => { + const config = storage.getThresholdConfig(); + expect(config.length).toBeGreaterThan(0); + expect(config[0].metric).toBe('cacheHitRatio'); + }); + + test('saveThresholdConfig / getThresholdConfig round-trip', () => { + const config: ThresholdConfig[] = [ + { metric: 'custom', warning: 50, critical: 80, direction: 'above', label: 'Custom' }, + ]; + storage.saveThresholdConfig(config); + const result = storage.getThresholdConfig(); + expect(result.length).toBe(1); + expect(result[0].metric).toBe('custom'); + }); +}); From 7182008c4b0b777929d1fb02d7bc62b1f36c2d94 Mon Sep 17 00:00:00 2001 From: cevheri Date: Tue, 3 Mar 2026 23:51:27 +0300 Subject: [PATCH 02/20] docs(storage): enhance quick setup guide for SQLite and PostgreSQL, detailing automatic behaviors and manual table creation --- docs/STORAGE_QUICK_SETUP.md | 173 +++++++++++++++++++++++++++++++++--- 1 file changed, 161 insertions(+), 12 deletions(-) diff --git a/docs/STORAGE_QUICK_SETUP.md b/docs/STORAGE_QUICK_SETUP.md index 255227a..32edb91 100644 --- a/docs/STORAGE_QUICK_SETUP.md +++ b/docs/STORAGE_QUICK_SETUP.md @@ -38,19 +38,41 @@ bun dev A single file on the server. Great for self-hosted single-node deployments. -### Local Development +### Minimal Setup (Just One Env Var) ```bash # .env.local STORAGE_PROVIDER=sqlite -STORAGE_SQLITE_PATH=./data/libredb-storage.db ``` ```bash bun dev ``` -The database file and directory are created automatically on first request. +That's it. When `STORAGE_SQLITE_PATH` is not provided, the default path is `./data/libredb-storage.db`. + +### What Happens Automatically + +On the first API request, the SQLite provider: + +1. **Creates the directory** — `./data/` (or whatever parent directory the path points to) is created recursively if it doesn't exist +2. **Creates the database file** — `libredb-storage.db` is created by `better-sqlite3` +3. **Enables WAL mode** — Write-Ahead Logging for better concurrent read performance +4. **Creates the table** — `user_storage` table with the schema below + +No manual setup, no migrations, no SQL scripts needed. + +### Custom Path + +If you want the database file in a different location: + +```bash +# .env.local +STORAGE_PROVIDER=sqlite +STORAGE_SQLITE_PATH=/var/lib/libredb/storage.db +``` + +The directory must be writable by the app process. The directory and file are created automatically. ### Docker @@ -84,12 +106,34 @@ curl http://localhost:3000/api/storage/config # → {"provider":"sqlite","serverMode":true} ``` +### Manual Table Creation (Optional) + +The table is auto-created, but if you prefer to create it yourself (e.g., for auditing or version control): + +```sql +CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TEXT NOT NULL DEFAULT (datetime('now')), + PRIMARY KEY (user_id, collection) +); + +-- Recommended: enable WAL mode for concurrent read performance +PRAGMA journal_mode = WAL; +``` + --- ## 3. PostgreSQL Mode Recommended for production, teams, and high-availability deployments. +> **Important:** Unlike SQLite, `STORAGE_POSTGRES_URL` is **required**. There is no default value. If you set `STORAGE_PROVIDER=postgres` without providing a connection string, the app will throw an error on the first storage request: +> ``` +> Error: STORAGE_POSTGRES_URL is required when STORAGE_PROVIDER=postgres +> ``` + ### Local Development ```bash @@ -112,7 +156,27 @@ STORAGE_POSTGRES_URL=postgresql://libredb:secret@localhost:5432/libredb bun dev ``` -The `user_storage` table is created automatically on first request. +### What Happens Automatically + +On the first API request, the PostgreSQL provider: + +1. **Creates a connection pool** — max 5 connections, 30s idle timeout +2. **Creates the table** — `user_storage` table with the schema below via `CREATE TABLE IF NOT EXISTS` + +The database itself must already exist. The **table** is auto-created, but the **database** is not. + +### Required Privileges + +The PostgreSQL user specified in `STORAGE_POSTGRES_URL` needs: + +| Privilege | Why | +|-----------|-----| +| `CREATE TABLE` | Auto-create `user_storage` on first request (only needed once) | +| `INSERT` | Save user data | +| `UPDATE` | Update existing data | +| `SELECT` | Read user data | + +If your DBA restricts `CREATE TABLE`, you can create the table manually (see below) and the user only needs `INSERT`/`UPDATE`/`SELECT`. ### Docker Compose (App + PostgreSQL) @@ -154,15 +218,13 @@ docker-compose up -d ### Using an Existing PostgreSQL -Just set the connection string — no special schema setup needed: +Just set the connection string — the table is auto-created: ```bash STORAGE_PROVIDER=postgres STORAGE_POSTGRES_URL=postgresql://user:pass@your-pg-host:5432/your_db ``` -The required table is auto-created on startup. The user needs `CREATE TABLE` and `INSERT`/`UPDATE`/`SELECT` privileges. - ### Verify ```bash @@ -170,6 +232,33 @@ curl http://localhost:3000/api/storage/config # → {"provider":"postgres","serverMode":true} ``` +### Manual Table Creation (Optional) + +The table is auto-created on first request. However, if you prefer to create it yourself — for example, in environments where the app user doesn't have `CREATE TABLE` privileges, or you want to track schema changes in version control: + +```sql +-- PostgreSQL +CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (user_id, collection) +); + +-- Optional: index for faster lookups by user +CREATE INDEX IF NOT EXISTS idx_user_storage_user_id ON user_storage (user_id); +``` + +#### Minimal Privileges (When Table Already Exists) + +If a DBA creates the table, the app user only needs: + +```sql +-- Grant only data access (no DDL needed) +GRANT SELECT, INSERT, UPDATE ON user_storage TO libredb_app; +``` + --- ## Migration: Local to Server @@ -191,14 +280,22 @@ When you switch from local mode to SQLite or PostgreSQL, **existing browser data ## Environment Variables Reference -| Variable | Default | Description | -|----------|---------|-------------| -| `STORAGE_PROVIDER` | `local` | `local`, `sqlite`, or `postgres` | -| `STORAGE_SQLITE_PATH` | `./data/libredb-storage.db` | Path to SQLite file (sqlite mode) | -| `STORAGE_POSTGRES_URL` | — | PostgreSQL connection string (postgres mode) | +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `STORAGE_PROVIDER` | No | `local` | `local`, `sqlite`, or `postgres` | +| `STORAGE_SQLITE_PATH` | No | `./data/libredb-storage.db` | Path to SQLite file. Directory and file are auto-created. | +| `STORAGE_POSTGRES_URL` | **Yes** (postgres mode) | — | PostgreSQL connection string. **No default — app will error without it.** | > These are **server-side only** variables (no `NEXT_PUBLIC_` prefix). The client discovers the mode at runtime via `GET /api/storage/config`. This means one Docker image works for all modes. +### Default Behavior Summary + +| Mode | Config needed | What's auto-created | +|------|--------------|---------------------| +| `local` | Nothing | N/A (browser localStorage) | +| `sqlite` | Just `STORAGE_PROVIDER=sqlite` | Directory + DB file + WAL mode + table | +| `postgres` | `STORAGE_PROVIDER=postgres` + `STORAGE_POSTGRES_URL` | Table only (database must exist) | + --- ## Health Check @@ -228,6 +325,12 @@ curl -b cookies.txt http://localhost:3000/api/storage - The directory in `STORAGE_SQLITE_PATH` must be writable by the app process - In Docker, make sure the volume is mounted correctly +### PostgreSQL: "STORAGE_POSTGRES_URL is required" + +- You set `STORAGE_PROVIDER=postgres` but didn't provide `STORAGE_POSTGRES_URL` +- Unlike SQLite, PostgreSQL has **no default** — a connection string is always required +- Fix: add `STORAGE_POSTGRES_URL=postgresql://user:pass@host:5432/dbname` to your env + ### PostgreSQL: "Connection refused" - Verify `STORAGE_POSTGRES_URL` is correct and the database is reachable @@ -247,6 +350,52 @@ curl -b cookies.txt http://localhost:3000/api/storage --- +## Database Schema Reference + +Both SQLite and PostgreSQL use the same single-table design. The table is auto-created on first request, but the full DDL is provided here for reference. + +### SQLite + +```sql +CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TEXT NOT NULL DEFAULT (datetime('now')), + PRIMARY KEY (user_id, collection) +); + +PRAGMA journal_mode = WAL; +``` + +### PostgreSQL + +```sql +CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (user_id, collection) +); + +-- Optional: index for faster lookups by user +CREATE INDEX IF NOT EXISTS idx_user_storage_user_id ON user_storage (user_id); +``` + +### Schema Explanation + +| Column | Type | Description | +|--------|------|-------------| +| `user_id` | TEXT | User's email from JWT token (e.g., `admin@libredb.org`) | +| `collection` | TEXT | Data category: `connections`, `history`, `saved_queries`, `schema_snapshots`, `saved_charts`, `active_connection_id`, `audit_log`, `masking_config`, `threshold_config` | +| `data` | TEXT | JSON-serialized collection data | +| `updated_at` | TEXT / TIMESTAMPTZ | Last modification timestamp | + +Each row stores **one user's one collection** as a JSON blob. Adding a new collection type requires no schema changes — just a new row. + +--- + ## What's Next? - [STORAGE_ARCHITECTURE.md](./STORAGE_ARCHITECTURE.md) — Deep dive into the write-through cache, sync hook, and provider internals From 1e2bce9538a89962fd5a187f930d0503e084a636 Mon Sep 17 00:00:00 2001 From: yusuf gundogdu Date: Tue, 3 Mar 2026 02:05:08 +0300 Subject: [PATCH 03/20] feat(login): redesign login page with responsive split layout Add a modern split-panel login page inspired by contemporary SaaS designs. - Left panel: gradient branding section with hero text, feature highlights, and supported database badges (hidden on mobile) - Right panel: login form with preserved authentication functionality - Mobile: compact branding header with login form and database pills - All existing auth logic (local + OIDC) remains untouched Co-Authored-By: Claude Opus 4.6 --- src/app/login/login-form.tsx | 357 +++++++++++++++++++++++------------ 1 file changed, 233 insertions(+), 124 deletions(-) diff --git a/src/app/login/login-form.tsx b/src/app/login/login-form.tsx index 6ed9af8..d44524a 100644 --- a/src/app/login/login-form.tsx +++ b/src/app/login/login-form.tsx @@ -6,7 +6,7 @@ import { Card, CardContent, CardDescription, CardFooter, CardHeader, CardTitle } import { Input } from '@/components/ui/input'; import { Button } from '@/components/ui/button'; import { Label } from '@/components/ui/label'; -import { Database, ExternalLink, Lock, Mail, ShieldCheck, UserCheck } from 'lucide-react'; +import { Database, ExternalLink, Lock, Mail, ShieldCheck, UserCheck, Zap, Globe, Shield, BarChart3 } from 'lucide-react'; import { toast } from 'sonner'; import { Badge } from '@/components/ui/badge'; @@ -53,141 +53,250 @@ function LoginFormInner({ authProvider }: { authProvider: string }) { } }; + const features = [ + { icon: Globe, title: 'Multi-Database Support', desc: 'PostgreSQL, MySQL, SQLite, MongoDB, Redis & more' }, + { icon: Zap, title: 'AI-Powered Queries', desc: 'Natural language to SQL with intelligent suggestions' }, + { icon: Shield, title: 'Enterprise Security', desc: 'JWT auth, OIDC SSO, role-based access control' }, + { icon: BarChart3, title: 'Visual Schema Explorer', desc: 'Explore tables, relations, and data visually' }, + ]; + return ( -
- - -
-
-
-
- -
+
+ {/* Left Panel - Branding (hidden on mobile) */} +
+ {/* Gradient background */} +
+ + {/* Decorative grid pattern */} +
+ + {/* Floating gradient orbs */} +
+
+
+ + {/* Content */} +
+ {/* Top: Logo */} +
+
+
+ LibreDB Studio
-
- LibreDB Studio - - Secure database administration and management portal - -
- - - - {isOIDC ? ( - <> - {oidcError && ( -
- Authentication failed. Please try again. -
- )} - - - ) : ( - <> -
-
- -
- - setEmail(e.target.value)} - required - /> + + {/* Middle: Hero text + Features */} +
+
+

+ Your fastest path to + database mastery +

+

+ Open-source database studio to query, explore, and manage all your databases from a single, powerful interface. +

+
+ +
+ {features.map((feature) => ( +
+
+
-
-
- -
- - setPassword(e.target.value)} - required - /> +
+

{feature.title}

+

{feature.desc}

-
+ + {/* Bottom: DB badges */} +
+

Supported Databases

+
+ {['PostgreSQL', 'MySQL', 'SQLite', 'MongoDB', 'Redis', 'Oracle', 'SQL Server'].map((db) => ( + - {isLoading ? 'Authenticating...' : 'Sign In'} - - + {db} + + ))} +
+
+
+
-
-
- -
-
- Quick Access for Demo -
+ {/* Right Panel - Login Form */} +
+
+ {/* Mobile branding (visible only on mobile) */} +
+
+
+
+
+
+
+

LibreDB Studio

+

Open-source database management

+
+
-
- + + ) : ( + <> +
+
+ +
+ + setEmail(e.target.value)} + required + /> +
+
+
+ +
+ + setPassword(e.target.value)} + required + /> +
+
+ +
+ +
+
+ +
+
+ Quick Access for Demo +
- - admin@libredb.org - - - - + +
- - user@libredb.org - - -
- - )} - - - -

- Enterprise-grade security powered by LibreDB Studio Engine -

- - v{process.env.NEXT_PUBLIC_APP_VERSION} - -
- + + )} + + + +

+ Enterprise-grade security powered by LibreDB Studio Engine +

+ + v{process.env.NEXT_PUBLIC_APP_VERSION} + +
+ + + {/* Mobile feature pills */} +
+ {['PostgreSQL', 'MySQL', 'MongoDB', 'Redis', 'SQLite'].map((db) => ( + + {db} + + ))} +
+
+
); } From 101a74659fa1893c65c29fac34bb15ef48d19ac5 Mon Sep 17 00:00:00 2001 From: cevheri Date: Wed, 4 Mar 2026 01:11:01 +0300 Subject: [PATCH 04/20] test(storage): add comprehensive tests for storage provider error handling and data migration scenarios --- package.json | 6 +- tests/api/storage/storage-routes.test.ts | 108 ++++++ tests/hooks-isolated/use-storage-sync.test.ts | 366 ++++++++++++++++++ tests/isolated/factory-singleton.test.ts | 150 +++++++ tests/run-components.sh | 10 +- .../lib/storage/providers/postgres.test.ts | 97 +++++ .../unit/lib/storage/providers/sqlite.test.ts | 92 +++++ .../storage/storage-facade-extended.test.ts | 331 ++++++++++++++++ 8 files changed, 1156 insertions(+), 4 deletions(-) create mode 100644 tests/hooks-isolated/use-storage-sync.test.ts create mode 100644 tests/isolated/factory-singleton.test.ts create mode 100644 tests/unit/lib/storage/storage-facade-extended.test.ts diff --git a/package.json b/package.json index 00dc987..64119ce 100644 --- a/package.json +++ b/package.json @@ -8,7 +8,7 @@ "start": "next start", "lint": "eslint .", "typecheck": "tsc --noEmit", - "test": "bun test tests/unit tests/api tests/integration tests/hooks && bun run test:components", + "test": "bun test tests/unit tests/api tests/integration && bun test tests/hooks && bun run test:components", "test:unit": "bun test tests/unit", "test:integration": "bun test tests/integration", "test:hooks": "bun test tests/hooks", @@ -16,8 +16,8 @@ "test:components": "bash tests/run-components.sh", "test:components:coverage": "bash tests/run-components.sh --coverage --coverage-reporter=lcov --coverage-dir=coverage/components", "test:e2e": "bunx playwright test", - "test:coverage:core": "bun test --coverage --coverage-reporter=lcov --coverage-reporter=text --coverage-dir=coverage/core tests/unit tests/api tests/integration tests/hooks", - "test:coverage": "rm -rf coverage && bun run test:coverage:core && bun run test:components:coverage && node scripts/merge-lcov.mjs coverage/core/lcov.info coverage/components/lcov.info coverage/lcov.info", + "test:coverage:core": "bun test --coverage --coverage-reporter=lcov --coverage-reporter=text --coverage-dir=coverage/core tests/unit tests/api tests/integration && bun test --coverage --coverage-reporter=lcov --coverage-reporter=text --coverage-dir=coverage/hooks tests/hooks", + "test:coverage": "rm -rf coverage && bun run test:coverage:core && bun run test:components:coverage && node scripts/merge-lcov.mjs coverage/core/lcov.info coverage/hooks/lcov.info coverage/components/lcov.info coverage/lcov.info", "test:coverage-html": "bun run test:coverage && genhtml coverage/lcov.info --output-directory coverage/html && echo '\n✅ Open coverage/html/index.html in your browser'" }, "dependencies": { diff --git a/tests/api/storage/storage-routes.test.ts b/tests/api/storage/storage-routes.test.ts index 8da2930..8f05d99 100644 --- a/tests/api/storage/storage-routes.test.ts +++ b/tests/api/storage/storage-routes.test.ts @@ -156,4 +156,112 @@ describe('POST /api/storage/migrate', () => { expect(json.migrated).toContain('history'); expect(mockProvider.mergeData).toHaveBeenCalledWith('admin@test.com', data); }); + + test('returns empty migrated array for empty payload', async () => { + const res = await makeMigrateRequest({}); + expect(res.status).toBe(200); + const json = await res.json(); + expect(json.ok).toBe(true); + expect(json.migrated).toEqual([]); + }); +}); + +// ── Error propagation from provider ──────────────────────────────────────── + +describe('API routes: provider error propagation', () => { + beforeEach(() => { + mockSession = { username: 'admin@test.com', role: 'admin' }; + providerEnabled = true; + mockProvider.getAllData.mockClear(); + mockProvider.setCollection.mockClear(); + mockProvider.mergeData.mockClear(); + }); + + test('GET /api/storage propagates provider error', async () => { + mockProvider.getAllData.mockRejectedValueOnce(new Error('DB connection lost')); + // Route has no try/catch — error propagates (Next.js catches in production) + await expect(GET()).rejects.toThrow('DB connection lost'); + }); + + test('PUT collection response includes ok:true on success', async () => { + const res = await PUT( + new NextRequest('http://localhost/api/storage/connections', { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data: [{ id: 'c1' }] }), + }), + { params: Promise.resolve({ collection: 'connections' }) } + ); + expect(res.status).toBe(200); + const json = await res.json(); + expect(json.ok).toBe(true); + }); + + test('PUT uses session username for user scoping', async () => { + mockSession = { username: 'user@test.com', role: 'user' }; + const data = [{ id: 'c1' }]; + await PUT( + new NextRequest('http://localhost/api/storage/connections', { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data }), + }), + { params: Promise.resolve({ collection: 'connections' }) } + ); + expect(mockProvider.setCollection).toHaveBeenCalledWith('user@test.com', 'connections', data); + }); + + test('GET uses session username for user scoping', async () => { + mockSession = { username: 'user@test.com', role: 'user' }; + await GET(); + expect(mockProvider.getAllData).toHaveBeenCalledWith('user@test.com'); + }); + + test('PUT validates all 9 valid collection names', async () => { + const validCollections = [ + 'connections', 'history', 'saved_queries', 'schema_snapshots', + 'saved_charts', 'active_connection_id', 'audit_log', + 'masking_config', 'threshold_config', + ]; + + for (const collection of validCollections) { + mockProvider.setCollection.mockClear(); + const res = await PUT( + new NextRequest(`http://localhost/api/storage/${collection}`, { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data: [] }), + }), + { params: Promise.resolve({ collection }) } + ); + expect(res.status).toBe(200); + } + }); + + test('PUT rejects collection names not in whitelist', async () => { + const invalidNames = ['settings', 'users', 'passwords', '../connections', 'CONNECTIONS']; + for (const name of invalidNames) { + const res = await PUT( + new NextRequest(`http://localhost/api/storage/${name}`, { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data: [] }), + }), + { params: Promise.resolve({ collection: name }) } + ); + expect(res.status).toBe(400); + } + }); + + test('migrate uses session username for user scoping', async () => { + mockSession = { username: 'user@test.com', role: 'user' }; + await POST( + new NextRequest('http://localhost/api/storage/migrate', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ connections: [] }), + }) + ); + expect(mockProvider.mergeData).toHaveBeenCalledWith('user@test.com', { connections: [] }); + }); }); diff --git a/tests/hooks-isolated/use-storage-sync.test.ts b/tests/hooks-isolated/use-storage-sync.test.ts new file mode 100644 index 0000000..4c9536e --- /dev/null +++ b/tests/hooks-isolated/use-storage-sync.test.ts @@ -0,0 +1,366 @@ +import '../setup-dom'; + +import { describe, test, expect, mock, beforeEach, afterEach } from 'bun:test'; +import { renderHook, waitFor, act, cleanup } from '@testing-library/react'; +import { mockGlobalFetch, restoreGlobalFetch } from '../helpers/mock-fetch'; + +// ── Mock storage module ───────────────────────────────────────────────────── + +const mockStorage = { + getConnections: mock(() => [{ id: 'c1' }]), + getHistory: mock(() => []), + getSavedQueries: mock(() => []), + getSchemaSnapshots: mock(() => []), + getSavedCharts: mock(() => []), + getActiveConnectionId: mock(() => null), + getAuditLog: mock(() => []), + getMaskingConfig: mock(() => ({ enabled: true, patterns: [], roleSettings: { admin: { canToggle: true, canReveal: true }, user: { canToggle: false, canReveal: false } } })), + getThresholdConfig: mock(() => []), +}; + +mock.module('@/lib/storage', () => ({ + storage: mockStorage, + STORAGE_COLLECTIONS: [ + 'connections', 'history', 'saved_queries', 'schema_snapshots', + 'saved_charts', 'active_connection_id', 'audit_log', + 'masking_config', 'threshold_config', + ], +})); + +import { useStorageSync } from '@/hooks/use-storage-sync'; + +// ── Helpers ───────────────────────────────────────────────────────────────── + +function setupLocalMode() { + return mockGlobalFetch({ + '/api/storage/config': { ok: true, status: 200, json: { provider: 'local', serverMode: false } }, + }); +} + +function setupServerMode(extraRoutes: Record = {}) { + return mockGlobalFetch({ + '/api/storage/config': { ok: true, status: 200, json: { provider: 'postgres', serverMode: true } }, + '/api/storage/migrate': { ok: true, status: 200, json: { ok: true, migrated: ['connections'] } }, + '/api/storage': { ok: true, status: 200, json: { connections: [{ id: 'server-c1' }] } }, + ...extraRoutes, + }); +} + +// ── Tests ─────────────────────────────────────────────────────────────────── + +describe('useStorageSync', () => { + beforeEach(() => { + localStorage.clear(); + Object.values(mockStorage).forEach((fn) => fn.mockClear()); + }); + + afterEach(() => { + restoreGlobalFetch(); + cleanup(); + }); + + // ── Mode discovery ────────────────────────────────────────────────────── + + describe('mode discovery', () => { + test('starts with isServerMode=false', () => { + setupLocalMode(); + const { result } = renderHook(() => useStorageSync()); + expect(result.current.isServerMode).toBe(false); + }); + + test('stays in local mode when config returns serverMode=false', async () => { + setupLocalMode(); + const { result } = renderHook(() => useStorageSync()); + + // Wait for config fetch to resolve + await waitFor(() => { + expect(result.current.isSyncing).toBe(false); + }); + + expect(result.current.isServerMode).toBe(false); + }); + + test('switches to server mode when config returns serverMode=true', async () => { + setupServerMode(); + localStorage.setItem('libredb_server_migrated', 'true'); // Skip migration + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + }); + + test('stays in local mode when config fetch fails', async () => { + mockGlobalFetch({ + '/api/storage/config': { ok: false, status: 500, json: { error: 'Server error' } }, + }); + + const { result } = renderHook(() => useStorageSync()); + + // Give it time to settle + await waitFor(() => { + expect(result.current.isSyncing).toBe(false); + }); + + expect(result.current.isServerMode).toBe(false); + }); + + test('stays in local mode when config fetch throws network error', async () => { + globalThis.fetch = mock(async () => { + throw new Error('Network error'); + }) as unknown as typeof fetch; + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isSyncing).toBe(false); + }); + + expect(result.current.isServerMode).toBe(false); + expect(result.current.syncError).toBeNull(); + }); + }); + + // ── Migration ─────────────────────────────────────────────────────────── + + describe('migration', () => { + test('performs migration on first server-mode visit', async () => { + const fetchMock = setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // Migration flag should be set + expect(localStorage.getItem('libredb_server_migrated')).not.toBeNull(); + + // migrate endpoint was called + const calls = (fetchMock.mock.calls as unknown[][]).map((c) => { + const url = typeof c[0] === 'string' ? c[0] : ''; + return new URL(url, 'http://localhost:3000').pathname; + }); + expect(calls).toContain('/api/storage/migrate'); + }); + + test('skips migration when flag already set', async () => { + localStorage.setItem('libredb_server_migrated', '2026-01-01'); + const fetchMock = setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // migrate endpoint should NOT be called + const calls = (fetchMock.mock.calls as unknown[][]).map((c) => { + const url = typeof c[0] === 'string' ? c[0] : ''; + return new URL(url, 'http://localhost:3000').pathname; + }); + expect(calls).not.toContain('/api/storage/migrate'); + }); + + test('sets migration flag even when no data to migrate', async () => { + // All storage getters return empty + mockStorage.getConnections.mockReturnValue([]); + mockStorage.getActiveConnectionId.mockReturnValue(null); + + setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + expect(localStorage.getItem('libredb_server_migrated')).not.toBeNull(); + }); + }); + + // ── Pull from server ────────────────────────────────────────────────── + + describe('pull from server', () => { + test('pulls data from server on mount in server mode', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + const fetchMock = setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // /api/storage was called for pull + const calls = (fetchMock.mock.calls as unknown[][]).map((c) => { + const url = typeof c[0] === 'string' ? c[0] : ''; + return new URL(url, 'http://localhost:3000').pathname; + }); + expect(calls).toContain('/api/storage'); + }); + + test('writes server data to localStorage on pull', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.lastSyncedAt).not.toBeNull(); + }); + + // Server returned connections: [{ id: 'server-c1' }] + const stored = localStorage.getItem('libredb_connections'); + expect(stored).not.toBeNull(); + expect(JSON.parse(stored!)).toEqual([{ id: 'server-c1' }]); + }); + + test('sets syncError on pull failure', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + mockGlobalFetch({ + '/api/storage/config': { ok: true, status: 200, json: { provider: 'postgres', serverMode: true } }, + '/api/storage': { ok: false, status: 500, json: { error: 'DB error' } }, + }); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // Pull failed but no syncError for non-ok response (graceful degradation) + // The hook just returns early without setting error for non-ok + expect(result.current.isSyncing).toBe(false); + }); + }); + + // ── Push to server (debounced) ──────────────────────────────────────── + + describe('push to server', () => { + test('pushes collection to server on storage-change event', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + const fetchMock = mockGlobalFetch({ + '/api/storage/config': { ok: true, status: 200, json: { provider: 'postgres', serverMode: true } }, + '/api/storage/migrate': { ok: true, status: 200, json: { ok: true, migrated: [] } }, + '/api/storage': { ok: true, status: 200, json: {} }, + '/api/storage/connections': { ok: true, status: 200, json: { ok: true } }, + }); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // Dispatch storage change event + act(() => { + window.dispatchEvent( + new CustomEvent('libredb-storage-change', { + detail: { collection: 'connections', data: [{ id: 'c1' }] }, + }) + ); + }); + + // Wait for debounce (500ms) + push + await waitFor(() => { + const calls = (fetchMock.mock.calls as unknown[][]).map((c) => { + const url = typeof c[0] === 'string' ? c[0] : ''; + return new URL(url, 'http://localhost:3000').pathname; + }); + return calls.includes('/api/storage/connections'); + }, { timeout: 2000 }); + }); + + test('sets syncError on push failure', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + + // Use a request handler that returns 500 specifically for PUT /connections + const fetchMock = mockGlobalFetch({ + '/api/storage/config': { ok: true, status: 200, json: { provider: 'postgres', serverMode: true } }, + '/api/storage/migrate': { ok: true, status: 200, json: { ok: true, migrated: [] } }, + '/api/storage/connections': { ok: false, status: 500, json: { error: 'Write failed' } }, + '/api/storage': { ok: true, status: 200, json: {} }, + }); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // Ensure isSyncing is done before triggering push + await waitFor(() => { + expect(result.current.isSyncing).toBe(false); + }); + + act(() => { + window.dispatchEvent( + new CustomEvent('libredb-storage-change', { + detail: { collection: 'connections', data: [{ id: 'c1' }] }, + }) + ); + }); + + // Wait for debounce (500ms) + push to complete and set syncError + await waitFor(() => { + expect(result.current.syncError).not.toBeNull(); + }, { timeout: 3000 }); + }); + }); + + // ── Event listener lifecycle ────────────────────────────────────────── + + describe('event listener lifecycle', () => { + test('does not listen for events in local mode', async () => { + setupLocalMode(); + const spy = mock(() => {}); + const origAdd = window.addEventListener.bind(window); + window.addEventListener = mock((...args: Parameters) => { + if (args[0] === 'libredb-storage-change') spy(); + origAdd(...args); + }) as typeof window.addEventListener; + + renderHook(() => useStorageSync()); + + await waitFor(() => { + // Give time for init to complete + }); + + // Event listener for storage change should not be added in local mode + expect(spy).not.toHaveBeenCalled(); + + window.addEventListener = origAdd; + }); + }); + + // ── Initial state ───────────────────────────────────────────────────── + + describe('initial state', () => { + test('returns correct initial state shape', () => { + setupLocalMode(); + const { result } = renderHook(() => useStorageSync()); + + expect(result.current).toEqual({ + isServerMode: false, + isSyncing: false, + lastSyncedAt: null, + syncError: null, + }); + }); + + test('updates lastSyncedAt after successful pull', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.lastSyncedAt).not.toBeNull(); + }); + + expect(result.current.lastSyncedAt).toBeInstanceOf(Date); + }); + }); +}); diff --git a/tests/isolated/factory-singleton.test.ts b/tests/isolated/factory-singleton.test.ts new file mode 100644 index 0000000..25e08a3 --- /dev/null +++ b/tests/isolated/factory-singleton.test.ts @@ -0,0 +1,150 @@ +/** + * Factory singleton tests — isolated process required. + * Mocks provider modules to test getStorageProvider() and closeStorageProvider() + * without real database connections. + */ +import { describe, test, expect, beforeEach, mock } from 'bun:test'; + +// ── Mock providers ────────────────────────────────────────────────────────── + +const mockInitialize = mock(async () => {}); +const mockClose = mock(async () => {}); +const mockGetAllData = mock(async () => ({})); + +function makeMockProvider() { + return { + initialize: mockInitialize, + close: mockClose, + getAllData: mockGetAllData, + getCollection: mock(async () => null), + setCollection: mock(async () => {}), + mergeData: mock(async () => {}), + isHealthy: mock(async () => true), + }; +} + +const mockSQLiteInstance = makeMockProvider(); +const mockPostgresInstance = makeMockProvider(); + +mock.module('@/lib/storage/providers/sqlite', () => ({ + SQLiteStorageProvider: mock(() => mockSQLiteInstance), +})); + +mock.module('@/lib/storage/providers/postgres', () => ({ + PostgresStorageProvider: mock(() => mockPostgresInstance), +})); + +// Import factory AFTER mocking providers +import { + getStorageProvider, + closeStorageProvider, + getStorageProviderType, +} from '@/lib/storage/factory'; + +// ── Tests ─────────────────────────────────────────────────────────────────── + +describe('factory: getStorageProvider', () => { + beforeEach(async () => { + // Reset singleton state between tests + await closeStorageProvider(); + mockInitialize.mockClear(); + mockClose.mockClear(); + delete process.env.STORAGE_PROVIDER; + }); + + test('returns null when STORAGE_PROVIDER is local', async () => { + process.env.STORAGE_PROVIDER = 'local'; + const provider = await getStorageProvider(); + expect(provider).toBeNull(); + }); + + test('returns null when STORAGE_PROVIDER is not set', async () => { + const provider = await getStorageProvider(); + expect(provider).toBeNull(); + }); + + test('creates SQLite provider when STORAGE_PROVIDER=sqlite', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + const provider = await getStorageProvider(); + + expect(provider).not.toBeNull(); + expect(mockInitialize).toHaveBeenCalledTimes(1); + }); + + test('creates Postgres provider when STORAGE_PROVIDER=postgres', async () => { + process.env.STORAGE_PROVIDER = 'postgres'; + const provider = await getStorageProvider(); + + expect(provider).not.toBeNull(); + expect(mockInitialize).toHaveBeenCalledTimes(1); + }); + + test('returns same instance on second call (singleton)', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + const first = await getStorageProvider(); + const second = await getStorageProvider(); + + expect(first).toBe(second); + // initialize called only once, not twice + expect(mockInitialize).toHaveBeenCalledTimes(1); + }); + + test('calls initialize() on first creation', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + await getStorageProvider(); + + expect(mockInitialize).toHaveBeenCalledTimes(1); + }); + + test('propagates error when initialize() throws', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + mockInitialize.mockRejectedValueOnce(new Error('DB init failed')); + + await expect(getStorageProvider()).rejects.toThrow('DB init failed'); + }); +}); + +describe('factory: closeStorageProvider', () => { + beforeEach(async () => { + await closeStorageProvider(); + mockInitialize.mockClear(); + mockClose.mockClear(); + delete process.env.STORAGE_PROVIDER; + }); + + test('closes and resets singleton', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + await getStorageProvider(); + + await closeStorageProvider(); + expect(mockClose).toHaveBeenCalledTimes(1); + }); + + test('creates new instance after close + re-get', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + await getStorageProvider(); + await closeStorageProvider(); + + mockInitialize.mockClear(); + const provider = await getStorageProvider(); + + expect(provider).not.toBeNull(); + // New initialize call — fresh instance + expect(mockInitialize).toHaveBeenCalledTimes(1); + }); + + test('does not throw when called without active provider', async () => { + await expect(closeStorageProvider()).resolves.toBeUndefined(); + expect(mockClose).not.toHaveBeenCalled(); + }); + + test('double close does not throw', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + await getStorageProvider(); + + await closeStorageProvider(); + await expect(closeStorageProvider()).resolves.toBeUndefined(); + // close called only once (second call has no provider) + expect(mockClose).toHaveBeenCalledTimes(1); + }); +}); diff --git a/tests/run-components.sh b/tests/run-components.sh index 92c373a..a445885 100755 --- a/tests/run-components.sh +++ b/tests/run-components.sh @@ -24,7 +24,7 @@ set -e PASS=0 FAIL=0 -TOTAL_GROUPS=16 +TOTAL_GROUPS=18 EXTRA_BUN_ARGS=("$@") GROUP_INDEX=0 COVERAGE_MODE=0 @@ -66,6 +66,14 @@ run_group() { fi } +# Group 0a: useStorageSync hook (isolated — mocks @/lib/storage which contaminates other hook tests) +run_group "Group 0a: useStorageSync hook" \ + tests/hooks-isolated/use-storage-sync.test.ts + +# Group 0b: Factory singleton (isolated — mocks provider modules which contaminates provider unit tests) +run_group "Group 0b: Factory singleton" \ + tests/isolated/factory-singleton.test.ts + # Group 1: Studio (isolated — mocks almost every child component) run_group "Group 1/6: Studio" \ tests/components/Studio.test.tsx diff --git a/tests/unit/lib/storage/providers/postgres.test.ts b/tests/unit/lib/storage/providers/postgres.test.ts index 6f7e956..fab5c62 100644 --- a/tests/unit/lib/storage/providers/postgres.test.ts +++ b/tests/unit/lib/storage/providers/postgres.test.ts @@ -131,4 +131,101 @@ describe('PostgresStorageProvider', () => { expect(queries[0]).toBe('BEGIN'); expect(queries[queries.length - 1]).toBe('COMMIT'); }); + + test('mergeData rolls back on error and releases client', async () => { + await provider.initialize(); + + let callCount = 0; + const mockClientQuery = mock(async (sql: string): Promise<{ rows: unknown[] }> => { + callCount++; + // Fail on the INSERT (3rd call: BEGIN, then INSERT fails) + if (callCount === 2) throw new Error('Insert failed'); + return { rows: [] }; + }); + const mockClientRelease = mock(() => {}); + const client = { + query: mockClientQuery, + release: mockClientRelease, + }; + mockPool.connect = mock(async () => client); + + await expect( + provider.mergeData('admin@test.com', { + connections: [{ id: 'c1', name: 'Test', type: 'postgres', createdAt: new Date() } as import('@/lib/types').DatabaseConnection], + }) + ).rejects.toThrow('Insert failed'); + + // ROLLBACK should have been called + const queries = (mockClientQuery.mock.calls as unknown[][]).map((c) => c[0] as string); + expect(queries).toContain('ROLLBACK'); + // Client always released (finally block) + expect(mockClientRelease).toHaveBeenCalledTimes(1); + }); + + test('mergeData only writes provided collections', async () => { + await provider.initialize(); + + const mockClientQuery = mock(async (): Promise<{ rows: unknown[] }> => ({ rows: [] })); + const client = { + query: mockClientQuery, + release: mock(() => {}), + }; + mockPool.connect = mock(async () => client); + + await provider.mergeData('admin@test.com', { + connections: [{ id: 'c1', name: 'Test', type: 'postgres', createdAt: new Date() } as import('@/lib/types').DatabaseConnection], + }); + + const queries = (mockClientQuery.mock.calls as unknown[][]).map((c) => c[0] as string); + // BEGIN + 1 INSERT + COMMIT = 3 queries + expect(queries.length).toBe(3); + expect(queries[0]).toBe('BEGIN'); + expect(queries[1]).toContain('INSERT INTO user_storage'); + expect(queries[2]).toBe('COMMIT'); + }); + + test('getCollection returns null for corrupted JSON', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ + rows: [{ data: 'invalid-json{{{' }], + }); + + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result).toBeNull(); + }); + + test('getAllData skips corrupted JSON rows', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ + rows: [ + { collection: 'connections', data: JSON.stringify([{ id: 'c1' }]) }, + { collection: 'history', data: 'corrupted{{{' }, + ], + }); + + const result = await provider.getAllData('admin@test.com'); + expect(result.connections as unknown).toEqual([{ id: 'c1' }]); + expect(result.history).toBeUndefined(); + }); + + test('initialize throws when no connection string', async () => { + const origEnv = process.env.STORAGE_POSTGRES_URL; + delete process.env.STORAGE_POSTGRES_URL; + try { + const noUrlProvider = new PostgresStorageProvider(''); + await expect(noUrlProvider.initialize()).rejects.toThrow('STORAGE_POSTGRES_URL is required'); + } finally { + if (origEnv !== undefined) process.env.STORAGE_POSTGRES_URL = origEnv; + } + }); + + test('close on uninitialized provider does not throw', async () => { + const freshProvider = new PostgresStorageProvider('postgresql://localhost/test'); + await expect(freshProvider.close()).resolves.toBeUndefined(); + }); + + test('ensurePool throws when not initialized', async () => { + const freshProvider = new PostgresStorageProvider('postgresql://localhost/test'); + await expect(freshProvider.getAllData('test@test.com')).rejects.toThrow('not initialized'); + }); }); diff --git a/tests/unit/lib/storage/providers/sqlite.test.ts b/tests/unit/lib/storage/providers/sqlite.test.ts index ed9ba97..223f034 100644 --- a/tests/unit/lib/storage/providers/sqlite.test.ts +++ b/tests/unit/lib/storage/providers/sqlite.test.ts @@ -137,4 +137,96 @@ describe('SQLiteStorageProvider', () => { await provider.close(); expect(mockClose).toHaveBeenCalledTimes(1); }); + + test('mergeData uses transaction', async () => { + const mockRun = mock((..._args: unknown[]) => {}); + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => undefined), + run: mockRun, + }); + + const txFn = mock((fn: () => void) => fn); + mockDbInstance.transaction = txFn; + + await provider.initialize(); + await provider.mergeData('admin@test.com', { + connections: [{ id: 'c1', name: 'DB', type: 'postgres', host: 'localhost', port: 5432, createdAt: new Date() }] as import('@/lib/types').DatabaseConnection[], + history: [{ id: 'h1', connectionId: 'c1', query: 'SELECT 1', executionTime: 10, status: 'success', executedAt: new Date() }] as import('@/lib/types').QueryHistoryItem[], + }); + + // Transaction wrapper was called + expect(txFn).toHaveBeenCalledTimes(1); + // run was called for each provided collection + expect(mockRun.mock.calls.length).toBeGreaterThanOrEqual(2); + }); + + test('mergeData only writes provided collections', async () => { + const mockRun = mock((..._args: unknown[]) => {}); + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => undefined), + run: mockRun, + }); + mockDbInstance.transaction = mock((fn: () => void) => fn); + + await provider.initialize(); + await provider.mergeData('admin@test.com', { + connections: [{ id: 'c1', name: 'DB', type: 'postgres', host: 'localhost', port: 5432, createdAt: new Date() }] as import('@/lib/types').DatabaseConnection[], + }); + + // Only connections was provided, so only 1 run call for data + expect(mockRun).toHaveBeenCalledTimes(1); + const args = (mockRun.mock.calls as unknown[][])[0]; + expect(args[1]).toBe('connections'); + }); + + test('isHealthy returns false on error', async () => { + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => { throw new Error('DB crashed'); }), + run: mock(() => {}), + }); + + await provider.initialize(); + expect(await provider.isHealthy()).toBe(false); + }); + + test('getCollection returns null for corrupted JSON', async () => { + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => ({ data: 'not-valid-json{{{' })), + run: mock(() => {}), + }); + + await provider.initialize(); + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result).toBeNull(); + }); + + test('getAllData skips corrupted JSON rows', async () => { + mockPrepare.mockReturnValue({ + all: mock(() => [ + { collection: 'connections', data: JSON.stringify([{ id: 'c1' }]) }, + { collection: 'history', data: 'corrupted{{{' }, + ]), + get: mock(() => undefined), + run: mock(() => {}), + }); + + await provider.initialize(); + const result = await provider.getAllData('admin@test.com'); + expect(result.connections as unknown).toEqual([{ id: 'c1' }]); + expect(result.history).toBeUndefined(); + }); + + test('close on uninitialized provider does not throw', async () => { + const freshProvider = new SQLiteStorageProvider(':memory:'); + await expect(freshProvider.close()).resolves.toBeUndefined(); + }); + + test('ensureDb throws when not initialized', async () => { + const freshProvider = new SQLiteStorageProvider(':memory:'); + await expect(freshProvider.getAllData('test@test.com')).rejects.toThrow('not initialized'); + }); }); diff --git a/tests/unit/lib/storage/storage-facade-extended.test.ts b/tests/unit/lib/storage/storage-facade-extended.test.ts new file mode 100644 index 0000000..8904a39 --- /dev/null +++ b/tests/unit/lib/storage/storage-facade-extended.test.ts @@ -0,0 +1,331 @@ +import { describe, test, expect, beforeEach } from 'bun:test'; + +// Ensure `typeof window !== 'undefined'` passes +if (typeof globalThis.window === 'undefined') { + // @ts-expect-error — minimal window stub + globalThis.window = globalThis; +} + +import { storage } from '@/lib/storage'; +import type { QueryHistoryItem, SchemaSnapshot } from '@/lib/types'; + +// ── Helpers ───────────────────────────────────────────────────────────────── + +function makeHistoryItem(overrides: Partial = {}): QueryHistoryItem { + return { + id: `h-${Math.random().toString(36).slice(2, 8)}`, + connectionId: 'conn-1', + query: 'SELECT 1', + executionTime: 42, + status: 'success', + executedAt: new Date(), + ...overrides, + }; +} + +function makeSnapshot(overrides: Partial = {}): SchemaSnapshot { + return { + id: `snap-${Math.random().toString(36).slice(2, 8)}`, + connectionId: 'conn-1', + connectionName: 'Test DB', + databaseType: 'postgres', + schema: [], + createdAt: new Date(), + ...overrides, + }; +} + +// ── MongoDB JSON round-trip ───────────────────────────────────────────────── + +describe('storage facade: MongoDB JSON round-trip', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('MongoDB JSON query survives addToHistory round-trip', () => { + const mongoQuery = JSON.stringify({ + collection: 'users', + operation: 'find', + filter: { status: 'active', age: { $gt: 18 } }, + options: { limit: 50, sort: { name: 1 } }, + }); + + storage.addToHistory(makeHistoryItem({ id: 'mongo-1', query: mongoQuery })); + const history = storage.getHistory(); + + expect(history.length).toBe(1); + expect(history[0].query).toBe(mongoQuery); + + // Verify the inner JSON is valid and parseable + const parsed = JSON.parse(history[0].query); + expect(parsed.collection).toBe('users'); + expect(parsed.filter.age.$gt).toBe(18); + }); + + test('nested JSON with special characters round-trips correctly', () => { + const complexQuery = JSON.stringify({ + collection: 'logs', + operation: 'aggregate', + pipeline: [ + { $match: { message: { $regex: 'error.*"fatal"' } } }, + { $group: { _id: '$level', count: { $sum: 1 } } }, + { $sort: { count: -1 } }, + ], + }); + + storage.addToHistory(makeHistoryItem({ id: 'mongo-2', query: complexQuery })); + const result = storage.getHistory(); + + expect(result[0].query).toBe(complexQuery); + const parsed = JSON.parse(result[0].query); + expect(parsed.pipeline.length).toBe(3); + }); + + test('multiple MongoDB queries in history maintain separate JSON integrity', () => { + const queries = [ + JSON.stringify({ collection: 'users', operation: 'find', filter: {} }), + JSON.stringify({ collection: 'orders', operation: 'insertOne', document: { item: 'laptop', price: 999.99 } }), + JSON.stringify({ collection: 'products', operation: 'updateMany', filter: { stock: 0 }, update: { $set: { available: false } } }), + ]; + + queries.forEach((q, i) => { + storage.addToHistory(makeHistoryItem({ id: `m-${i}`, query: q })); + }); + + const history = storage.getHistory(); + expect(history.length).toBe(3); + + // History is prepended, so reverse order + for (let i = 0; i < 3; i++) { + const parsed = JSON.parse(history[i].query); + expect(parsed.collection).toBeDefined(); + expect(parsed.operation).toBeDefined(); + } + }); + + test('mixed SQL and MongoDB queries in same history', () => { + storage.addToHistory(makeHistoryItem({ id: 'sql-1', query: 'SELECT * FROM users WHERE name = \'O\'\'Brien\'' })); + storage.addToHistory(makeHistoryItem({ + id: 'mongo-1', + query: JSON.stringify({ collection: 'users', operation: 'find', filter: { name: "O'Brien" } }), + })); + storage.addToHistory(makeHistoryItem({ id: 'sql-2', query: 'INSERT INTO logs (msg) VALUES (\'{"key": "value"}\')' })); + + const history = storage.getHistory(); + expect(history.length).toBe(3); + + // SQL with embedded JSON string + expect(history[0].query).toContain('{"key": "value"}'); + // MongoDB JSON + const mongoParsed = JSON.parse(history[1].query); + expect(mongoParsed.filter.name).toBe("O'Brien"); + // SQL with quotes + expect(history[2].query).toContain("O''Brien"); + }); + + test('Redis JSON command round-trips correctly', () => { + const redisQuery = JSON.stringify({ + command: 'SET', + args: ['mykey', '{"nested": "json", "arr": [1,2,3]}'], + }); + + storage.addToHistory(makeHistoryItem({ id: 'redis-1', query: redisQuery })); + const result = storage.getHistory(); + + expect(result[0].query).toBe(redisQuery); + const parsed = JSON.parse(result[0].query); + expect(parsed.command).toBe('SET'); + // The nested JSON in args should also survive + const nestedJson = JSON.parse(parsed.args[1]); + expect(nestedJson.nested).toBe('json'); + expect(nestedJson.arr).toEqual([1, 2, 3]); + }); +}); + +// ── Buffer boundary tests ─────────────────────────────────────────────────── + +describe('storage facade: history buffer boundary (500)', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('trims history to 500 when adding item over limit', () => { + // Fill to exactly 500 + for (let i = 0; i < 500; i++) { + storage.addToHistory(makeHistoryItem({ id: `h-${i}` })); + } + expect(storage.getHistory().length).toBe(500); + + // Add one more — should trim oldest + storage.addToHistory(makeHistoryItem({ id: 'h-new' })); + const history = storage.getHistory(); + expect(history.length).toBe(500); + expect(history[0].id).toBe('h-new'); // newest first + }); + + test('exactly 500 items are kept without trimming', () => { + for (let i = 0; i < 500; i++) { + storage.addToHistory(makeHistoryItem({ id: `h-${i}` })); + } + expect(storage.getHistory().length).toBe(500); + }); + + test('oldest items are dropped when buffer overflows', () => { + // Fill with 500 items + for (let i = 0; i < 500; i++) { + storage.addToHistory(makeHistoryItem({ id: `old-${i}` })); + } + + // Add 3 new items + for (let i = 0; i < 3; i++) { + storage.addToHistory(makeHistoryItem({ id: `new-${i}` })); + } + + const history = storage.getHistory(); + expect(history.length).toBe(500); + // Newest 3 should be at the top + expect(history[0].id).toBe('new-2'); + expect(history[1].id).toBe('new-1'); + expect(history[2].id).toBe('new-0'); + // Oldest should be dropped + expect(history.find((h) => h.id === 'old-0')).toBeUndefined(); + expect(history.find((h) => h.id === 'old-1')).toBeUndefined(); + expect(history.find((h) => h.id === 'old-2')).toBeUndefined(); + }); +}); + +describe('storage facade: schema snapshot buffer boundary (50)', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('trims snapshots to 50 when over limit', () => { + for (let i = 0; i < 50; i++) { + storage.saveSchemaSnapshot(makeSnapshot({ id: `snap-${i}` })); + } + expect(storage.getSchemaSnapshots().length).toBe(50); + + // Add one more + storage.saveSchemaSnapshot(makeSnapshot({ id: 'snap-new' })); + const snapshots = storage.getSchemaSnapshots(); + expect(snapshots.length).toBe(50); + // Oldest should be dropped (sliced from end, keeps last 50) + expect(snapshots.find((s) => s.id === 'snap-0')).toBeUndefined(); + }); + + test('exactly 50 snapshots kept without trimming', () => { + for (let i = 0; i < 50; i++) { + storage.saveSchemaSnapshot(makeSnapshot({ id: `snap-${i}` })); + } + expect(storage.getSchemaSnapshots().length).toBe(50); + }); +}); + +describe('storage facade: audit log buffer boundary (1000)', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('trims audit log to 1000 events', () => { + const events = Array.from({ length: 1050 }, (_, i) => ({ + id: `evt-${i}`, + timestamp: new Date().toISOString(), + type: 'query_execution' as const, + action: 'SELECT', + target: 'users', + user: 'admin', + result: 'success' as const, + })); + + storage.saveAuditLog(events); + const result = storage.getAuditLog(); + expect(result.length).toBe(1000); + // Keeps the last 1000 (newest) + expect(result[0].id).toBe('evt-50'); + expect(result[999].id).toBe('evt-1049'); + }); + + test('exactly 1000 events kept without trimming', () => { + const events = Array.from({ length: 1000 }, (_, i) => ({ + id: `evt-${i}`, + timestamp: new Date().toISOString(), + type: 'query_execution' as const, + action: 'SELECT', + target: 'users', + user: 'admin', + result: 'success' as const, + })); + + storage.saveAuditLog(events); + expect(storage.getAuditLog().length).toBe(1000); + }); +}); + +// ── Delete non-existent ID ────────────────────────────────────────────────── + +describe('storage facade: delete non-existent items', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('deleteConnection with non-existent id does not throw', () => { + expect(() => storage.deleteConnection('non-existent')).not.toThrow(); + }); + + test('deleteSavedQuery with non-existent id does not throw', () => { + expect(() => storage.deleteSavedQuery('non-existent')).not.toThrow(); + }); + + test('deleteSchemaSnapshot with non-existent id does not throw', () => { + expect(() => storage.deleteSchemaSnapshot('non-existent')).not.toThrow(); + }); + + test('deleteChart with non-existent id does not throw', () => { + expect(() => storage.deleteChart('non-existent')).not.toThrow(); + }); + + test('deleteConnection does not affect existing items', () => { + storage.saveConnection({ + id: 'c1', name: 'DB1', type: 'postgres', host: 'localhost', port: 5432, createdAt: new Date(), + }); + storage.deleteConnection('non-existent'); + expect(storage.getConnections().length).toBe(1); + expect(storage.getConnections()[0].id).toBe('c1'); + }); +}); + +// ── Event dispatch for all mutation methods ───────────────────────────────── + +describe('storage facade: event dispatch completeness', () => { + beforeEach(() => { + localStorage.clear(); + }); + + const mutations: Array<{ name: string; fn: () => void; expectedCollection: string }> = [ + { name: 'clearHistory', fn: () => storage.clearHistory(), expectedCollection: 'history' }, + { name: 'saveQuery', fn: () => storage.saveQuery({ id: 'q1', name: 'Test', query: 'SELECT 1', connectionType: 'postgres', createdAt: new Date(), updatedAt: new Date() }), expectedCollection: 'saved_queries' }, + { name: 'deleteSavedQuery', fn: () => storage.deleteSavedQuery('q1'), expectedCollection: 'saved_queries' }, + { name: 'saveSchemaSnapshot', fn: () => storage.saveSchemaSnapshot(makeSnapshot()), expectedCollection: 'schema_snapshots' }, + { name: 'deleteSchemaSnapshot', fn: () => storage.deleteSchemaSnapshot('snap-1'), expectedCollection: 'schema_snapshots' }, + { name: 'saveChart', fn: () => storage.saveChart({ id: 'ch1', name: 'Chart', chartType: 'bar', xAxis: 'x', yAxis: ['y'], createdAt: new Date() }), expectedCollection: 'saved_charts' }, + { name: 'deleteChart', fn: () => storage.deleteChart('ch1'), expectedCollection: 'saved_charts' }, + { name: 'saveAuditLog', fn: () => storage.saveAuditLog([]), expectedCollection: 'audit_log' }, + { name: 'saveMaskingConfig', fn: () => storage.saveMaskingConfig({ enabled: true, patterns: [], roleSettings: { admin: { canToggle: true, canReveal: true }, user: { canToggle: false, canReveal: false } } }), expectedCollection: 'masking_config' }, + { name: 'saveThresholdConfig', fn: () => storage.saveThresholdConfig([]), expectedCollection: 'threshold_config' }, + ]; + + for (const { name, fn, expectedCollection } of mutations) { + test(`${name} dispatches event for '${expectedCollection}'`, () => { + let captured: CustomEvent | null = null; + const handler = (e: Event) => { captured = e as CustomEvent; }; + window.addEventListener('libredb-storage-change', handler); + + fn(); + + expect(captured).not.toBeNull(); + expect(captured!.detail.collection).toBe(expectedCollection); + + window.removeEventListener('libredb-storage-change', handler); + }); + } +}); From a8fb1cc7e02efa78d5a157d49c909652a77813a7 Mon Sep 17 00:00:00 2001 From: cevheri Date: Wed, 4 Mar 2026 01:11:49 +0300 Subject: [PATCH 05/20] bump: version upgrade to 0.8.0 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 64119ce..5e77a68 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "libredb-studio", - "version": "0.7.1", + "version": "0.8.0", "private": true, "scripts": { "dev": "next dev", From 96fdd8b97f26da346106789f6652335fd7168bab Mon Sep 17 00:00:00 2001 From: yusuf gundogdu Date: Wed, 4 Mar 2026 01:17:33 +0300 Subject: [PATCH 06/20] fix(login): resolve CI test failure and improve accessibility - Change mobile branding heading from h1 to h2 to maintain single h1 per page for proper accessibility hierarchy - Update LoginPage test to use getAllByText for 'LibreDB Studio' since the split-panel layout now renders the title in multiple locations Co-Authored-By: Claude Opus 4.6 --- src/app/login/login-form.tsx | 2 +- tests/components/LoginPage.test.tsx | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/app/login/login-form.tsx b/src/app/login/login-form.tsx index d44524a..1da8d07 100644 --- a/src/app/login/login-form.tsx +++ b/src/app/login/login-form.tsx @@ -150,7 +150,7 @@ function LoginFormInner({ authProvider }: { authProvider: string }) {
-

LibreDB Studio

+

LibreDB Studio

Open-source database management

diff --git a/tests/components/LoginPage.test.tsx b/tests/components/LoginPage.test.tsx index 5606932..c948baa 100644 --- a/tests/components/LoginPage.test.tsx +++ b/tests/components/LoginPage.test.tsx @@ -47,8 +47,8 @@ describe('LoginPage', () => { }); test('renders LibreDB Studio title', () => { - const { getByText } = renderLogin(); - expect(getByText('LibreDB Studio')).not.toBeNull(); + const { getAllByText } = renderLogin(); + expect(getAllByText('LibreDB Studio').length).toBeGreaterThanOrEqual(1); }); test('renders quick access Admin and User buttons', () => { From 0a392e7bcf1c22cffab79eff9c5c2048a30b34f4 Mon Sep 17 00:00:00 2001 From: cevheri Date: Wed, 4 Mar 2026 01:37:50 +0300 Subject: [PATCH 07/20] test(storage): add isolated tests for useStorageSync hook with local and server mode scenarios --- tests/{hooks-isolated => isolated}/use-storage-sync.test.ts | 0 tests/run-components.sh | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename tests/{hooks-isolated => isolated}/use-storage-sync.test.ts (100%) diff --git a/tests/hooks-isolated/use-storage-sync.test.ts b/tests/isolated/use-storage-sync.test.ts similarity index 100% rename from tests/hooks-isolated/use-storage-sync.test.ts rename to tests/isolated/use-storage-sync.test.ts diff --git a/tests/run-components.sh b/tests/run-components.sh index a445885..a70b6d1 100755 --- a/tests/run-components.sh +++ b/tests/run-components.sh @@ -68,7 +68,7 @@ run_group() { # Group 0a: useStorageSync hook (isolated — mocks @/lib/storage which contaminates other hook tests) run_group "Group 0a: useStorageSync hook" \ - tests/hooks-isolated/use-storage-sync.test.ts + tests/isolated/use-storage-sync.test.ts # Group 0b: Factory singleton (isolated — mocks provider modules which contaminates provider unit tests) run_group "Group 0b: Factory singleton" \ From a002d581e483c65cd17cddde91c14abb51e7dd2e Mon Sep 17 00:00:00 2001 From: yusuf gundogdu Date: Wed, 4 Mar 2026 01:54:58 +0300 Subject: [PATCH 08/20] feat(auth): enhance OIDC login UX with side-by-side SSO window On desktop, the SSO provider's login page now opens as a separate window positioned on the right half of the screen, creating a side-by-side experience with the main login page on the left. On mobile, the flow gracefully falls back to a full-page redirect. Changes: - Add handleSSO function with responsive window management - Enrich OIDC card UI with ShieldCheck icon and security badges - Store popup mode flag via cookie for callback coordination - Auto-close SSO window on completion and verify auth state - Clean up oidc-mode cookie in both success and error paths Co-Authored-By: Claude Opus 4.6 --- src/app/api/auth/oidc/callback/route.ts | 26 ++++++++- src/app/api/auth/oidc/login/route.ts | 20 +++++++ src/app/login/login-form.tsx | 73 +++++++++++++++++++++++-- 3 files changed, 114 insertions(+), 5 deletions(-) diff --git a/src/app/api/auth/oidc/callback/route.ts b/src/app/api/auth/oidc/callback/route.ts index 0655346..8252f9f 100644 --- a/src/app/api/auth/oidc/callback/route.ts +++ b/src/app/api/auth/oidc/callback/route.ts @@ -63,8 +63,20 @@ export async function GET(request: Request) { const username = claims.email || claims.preferred_username || claims.sub || role; await login(role, username); - // Clean up state cookie + // Check if this was a popup login + const isPopup = cookieStore.get('oidc-mode')?.value === 'popup'; + + // Clean up cookies cookieStore.delete('oidc-state'); + cookieStore.delete('oidc-mode'); + + if (isPopup) { + // Close popup and let the parent window detect the auth via /api/auth/me + return new NextResponse( + '', + { headers: { 'Content-Type': 'text/html' } } + ); + } // Redirect based on role return NextResponse.redirect( @@ -75,6 +87,18 @@ export async function GET(request: Request) { if (error instanceof Error && 'cause' in error) { console.error('OIDC error cause:', error.cause); } + + // Clean up popup cookie on error too + const cookieStore2 = await cookies(); + const isPopup = cookieStore2.get('oidc-mode')?.value === 'popup'; + cookieStore2.delete('oidc-mode'); + + if (isPopup) { + return new NextResponse( + '', + { headers: { 'Content-Type': 'text/html' } } + ); + } return NextResponse.redirect(`${origin}/login?error=oidc_failed`); } } diff --git a/src/app/api/auth/oidc/login/route.ts b/src/app/api/auth/oidc/login/route.ts index 4c61042..efdd39e 100644 --- a/src/app/api/auth/oidc/login/route.ts +++ b/src/app/api/auth/oidc/login/route.ts @@ -33,10 +33,30 @@ export async function GET(request: Request) { path: '/', }); + // Store popup mode flag so callback knows to close the window + const requestUrl = new URL(request.url); + if (requestUrl.searchParams.get('mode') === 'popup') { + cookieStore.set('oidc-mode', 'popup', { + httpOnly: true, + secure: process.env.NODE_ENV === 'production', + sameSite: 'lax', + maxAge: 300, + path: '/', + }); + } + return NextResponse.redirect(url.toString()); } catch (error) { console.error('OIDC login error:', error); const origin = getPublicOrigin(request); + const requestUrl = new URL(request.url); + const isPopup = requestUrl.searchParams.get('mode') === 'popup'; + if (isPopup) { + return new NextResponse( + '', + { headers: { 'Content-Type': 'text/html' } } + ); + } return NextResponse.redirect(`${origin}/login?error=oidc_config`); } } diff --git a/src/app/login/login-form.tsx b/src/app/login/login-form.tsx index 1da8d07..c58ab1f 100644 --- a/src/app/login/login-form.tsx +++ b/src/app/login/login-form.tsx @@ -19,6 +19,50 @@ function LoginFormInner({ authProvider }: { authProvider: string }) { const searchParams = useSearchParams(); const oidcError = searchParams.get('error'); + const handleSSO = () => { + setIsLoading(true); + const isMobile = window.innerWidth < 768; + + if (isMobile) { + window.location.href = '/api/auth/oidc/login?mode=redirect'; + return; + } + + // Desktop: open SSO as a side-by-side window on the right half of the screen + const screenW = window.screen.availWidth; + const screenH = window.screen.availHeight; + const halfW = Math.round(screenW / 2); + + const ssoWindow = window.open( + '/api/auth/oidc/login?mode=popup', + 'sso-login', + `width=${halfW},height=${screenH},left=${halfW},top=0,toolbar=no,menubar=no,scrollbars=yes` + ); + + // Move main window to the left half + try { + window.moveTo(0, 0); + window.resizeTo(halfW, screenH); + } catch { + // Some browsers block window resize - that's fine + } + + // Listen for SSO window close + const checkSSO = setInterval(() => { + if (!ssoWindow || ssoWindow.closed) { + clearInterval(checkSSO); + setIsLoading(false); + router.refresh(); + // Check if auth succeeded + fetch('/api/auth/me').then(res => res.json()).then(data => { + if (data.authenticated) { + router.push(data.role === 'admin' ? '/admin' : '/'); + } + }).catch(() => {}); + } + }, 500); + }; + const handleLogin = async (e?: React.FormEvent, directEmail?: string, directPassword?: string) => { if (e) e.preventDefault(); const loginEmail = directEmail || email; @@ -176,17 +220,38 @@ function LoginFormInner({ authProvider }: { authProvider: string }) { Authentication failed. Please try again.
)} + +
+
+ +
+
+

Single Sign-On

+

+ Sign in securely with your organization's identity provider +

+
+
+ + +
+
+ + Encrypted +
+
+ + OIDC Protected +
+
) : ( <> From 22ccbed013fd0569f95250df57b3d31c7dc982f2 Mon Sep 17 00:00:00 2001 From: cevheri Date: Wed, 4 Mar 2026 02:06:49 +0300 Subject: [PATCH 09/20] docs(release): document v0.8.0 release with pluggable storage layer and migration details --- docs/releases/RELEASE_v0.8.0.md | 432 ++++++++++++++++++++++++++++++++ 1 file changed, 432 insertions(+) create mode 100644 docs/releases/RELEASE_v0.8.0.md diff --git a/docs/releases/RELEASE_v0.8.0.md b/docs/releases/RELEASE_v0.8.0.md new file mode 100644 index 0000000..0e696b8 --- /dev/null +++ b/docs/releases/RELEASE_v0.8.0.md @@ -0,0 +1,432 @@ +# Release v0.8.0 - Pluggable Storage Layer + +This release introduces a **pluggable storage abstraction layer** that allows LibreDB Studio to persist user data beyond the browser. Choose between localStorage (default), SQLite, or PostgreSQL — controlled by a single environment variable, no code changes or rebuild required. + +--- + +## Highlights + +- **Three Storage Modes:** localStorage (zero-config default), SQLite (single-node persistent), PostgreSQL (multi-node enterprise) +- **Write-Through Cache:** localStorage always serves reads (instant, synchronous); server storage is the persistent source of truth +- **Automatic Migration:** Existing localStorage data is seamlessly migrated to server storage on first login +- **Per-User Isolation:** Server storage is scoped by JWT username — no cross-user data leaks +- **Single Docker Image:** Runtime config via `STORAGE_PROVIDER` env var — one image supports all modes +- **Zero Breaking Changes:** All 16+ consumer components keep the same synchronous `storage.*` API + +--- + +## New Features + +### Pluggable Storage Abstraction + +A complete storage module (`src/lib/storage/`) that decouples data persistence from the browser: + +``` +┌──────────────────────────────┐ +│ 16+ Consumer Components │ ← Unchanged, same sync API +│ storage.getConnections() │ +│ storage.saveConnection() │ +└──────────────┬───────────────┘ + │ sync read/write +┌──────────────▼───────────────┐ +│ Storage Facade │ ← localStorage + CustomEvent dispatch +└──────────────┬───────────────┘ + │ CustomEvent: 'libredb-storage-change' +┌──────────────▼───────────────┐ +│ useStorageSync Hook │ ← Write-through cache (server mode only) +└──────────────┬───────────────┘ + │ fetch (debounced 500ms) +┌──────────────▼───────────────┐ +│ API Routes /api/storage/* │ ← JWT auth + user scoping +└──────────────┬───────────────┘ + │ +┌──────────────▼───────────────┐ +│ ServerStorageProvider │ ← Strategy Pattern +│ ┌─────────┐ ┌────────────┐ │ +│ │ SQLite │ │ PostgreSQL │ │ +│ └─────────┘ └────────────┘ │ +└──────────────────────────────┘ +``` + +**9 Data Collections** persisted across all modes: + +| Collection | Description | +|-----------|-------------| +| `connections` | Saved database connections | +| `history` | Query execution history (max 500) | +| `saved_queries` | User-saved SQL/JSON queries | +| `schema_snapshots` | Schema diff snapshots (max 50) | +| `saved_charts` | Saved chart configurations | +| `active_connection_id` | Currently active connection | +| `audit_log` | Audit trail events (max 1000) | +| `masking_config` | Data masking rules and RBAC | +| `threshold_config` | Monitoring alert thresholds | + +### Storage Modes + +#### Local Mode (Default) + +No configuration needed. All data lives in the browser's `localStorage`. + +```bash +# Just start the app +bun dev +``` + +#### SQLite Mode + +A single file on the server. Ideal for self-hosted single-node deployments. + +```env +STORAGE_PROVIDER=sqlite +# Optional: STORAGE_SQLITE_PATH=./data/libredb-storage.db (default) +``` + +- Auto-creates directory, database file, and table on first request +- WAL mode enabled for concurrent read performance +- `better-sqlite3` (Node.js native bindings) + +#### PostgreSQL Mode + +Recommended for production, teams, and high-availability deployments. + +```env +STORAGE_PROVIDER=postgres +STORAGE_POSTGRES_URL=postgresql://user:pass@host:5432/libredb +``` + +- Connection pool (max 5, 30s idle timeout) +- Table auto-created via `CREATE TABLE IF NOT EXISTS` +- Transactional `mergeData()` for migration safety + +### Write-Through Cache & Sync Hook + +The `useStorageSync` hook orchestrates all client-server synchronization: + +1. **Discovery:** `GET /api/storage/config` determines storage mode at runtime +2. **Migration:** First-time server mode users get localStorage data auto-migrated via `POST /api/storage/migrate` +3. **Pull:** Server data pulled into localStorage on mount +4. **Push:** Mutations debounced (500ms) and pushed to server via `PUT /api/storage/[collection]` +5. **Graceful Degradation:** If server is unreachable, localStorage continues to work + +### Automatic Migration + +When switching from local to server mode: + +1. Sync hook detects first-time server mode (no `libredb_server_migrated` flag) +2. All 9 collections sent to server via `POST /api/storage/migrate` +3. Server performs ID-based deduplication — no duplicates +4. Flag set in localStorage to prevent re-migration +5. From this point, server is the source of truth + +**No manual steps required** — just change the env var and restart. + +### Per-User Isolation + +Every row in `user_storage` is scoped by `user_id` (JWT email): + +- Client never sends `user_id` — server always extracts from JWT cookie +- Every query includes `WHERE user_id = $username` +- OIDC users (Auth0, Keycloak, Okta, Azure AD) fully supported + +### Docker Deployment + +**SQLite:** +```yaml +services: + app: + image: ghcr.io/libredb/libredb-studio:latest + environment: + - STORAGE_PROVIDER=sqlite + - STORAGE_SQLITE_PATH=/app/data/libredb-storage.db + volumes: + - storage-data:/app/data +volumes: + storage-data: +``` + +**PostgreSQL:** +```yaml +services: + app: + image: ghcr.io/libredb/libredb-studio:latest + environment: + - STORAGE_PROVIDER=postgres + - STORAGE_POSTGRES_URL=postgresql://user:pass@db:5432/libredb + depends_on: + db: + condition: service_healthy + db: + image: postgres:16-alpine + environment: + - POSTGRES_DB=libredb + - POSTGRES_USER=user + - POSTGRES_PASSWORD=pass + volumes: + - pgdata:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U user"] + interval: 5s + timeout: 3s + retries: 5 +volumes: + pgdata: +``` + +--- + +## Architecture Changes + +### New Files (24 files, ~3,500 lines) + +| File | Description | +|------|-------------| +| **Storage Module** | | +| `src/lib/storage/types.ts` | `StorageData`, `StorageCollection`, `ServerStorageProvider` interface | +| `src/lib/storage/local-storage.ts` | Pure localStorage CRUD (SSR-safe, JSON parse safety) | +| `src/lib/storage/storage-facade.ts` | Public `storage` object — sync API + CustomEvent dispatch on every mutation | +| `src/lib/storage/factory.ts` | Singleton provider factory with dynamic imports | +| `src/lib/storage/index.ts` | Barrel export preserving `@/lib/storage` import path | +| `src/lib/storage/providers/sqlite.ts` | `better-sqlite3` backend (WAL, auto-create, upsert) | +| `src/lib/storage/providers/postgres.ts` | `pg` pool backend (upsert, transactional merge) | +| **API Routes** | | +| `src/app/api/storage/config/route.ts` | `GET` — Public runtime storage mode discovery | +| `src/app/api/storage/route.ts` | `GET` — Fetch all collections (JWT auth) | +| `src/app/api/storage/[collection]/route.ts` | `PUT` — Update single collection (JWT auth) | +| `src/app/api/storage/migrate/route.ts` | `POST` — localStorage to server migration (JWT auth) | +| **Sync Hook** | | +| `src/hooks/use-storage-sync.ts` | Write-through cache hook — discovery, migration, pull, push | +| **Documentation** | | +| `docs/STORAGE_ARCHITECTURE.md` | Deep-dive architecture document (565 lines) | +| `docs/STORAGE_QUICK_SETUP.md` | Quick setup guide for all three modes (404 lines) | +| **Tests (10 files)** | | +| `tests/unit/lib/storage/local-storage.test.ts` | localStorage CRUD tests | +| `tests/unit/lib/storage/storage-facade.test.ts` | Facade sync API tests | +| `tests/unit/lib/storage/storage-facade-extended.test.ts` | Extended facade tests (history caps, snapshots, charts) | +| `tests/unit/lib/storage/factory.test.ts` | Factory env-based provider selection | +| `tests/unit/lib/storage/providers/sqlite.test.ts` | SQLite provider with mocked `better-sqlite3` | +| `tests/unit/lib/storage/providers/postgres.test.ts` | PostgreSQL provider with mocked `pg` | +| `tests/api/storage/config.test.ts` | Config endpoint tests | +| `tests/api/storage/storage-routes.test.ts` | Full API route tests (GET/PUT/POST) | +| `tests/isolated/factory-singleton.test.ts` | Factory singleton isolation tests | +| `tests/isolated/use-storage-sync.test.ts` | useStorageSync hook tests (local + server mode) | + +### Modified Files + +| File | Change | +|------|--------| +| `src/components/Studio.tsx` | Mount `useStorageSync` hook after `useAuth()` | +| `src/components/DataCharts.tsx` | Use `@/lib/storage` module instead of direct localStorage | +| `src/components/admin/tabs/SecurityTab.tsx` | Use `@/lib/storage` module instead of direct localStorage | +| `src/components/studio/BottomPanel.tsx` | Use `@/lib/storage` module instead of direct localStorage | +| `src/lib/audit.ts` | Use `@/lib/storage` module instead of direct localStorage | +| `src/lib/data-masking.ts` | Use `@/lib/storage` module instead of direct localStorage | +| `src/proxy.ts` | Add `/api/storage/config` to public route whitelist | +| `.env.example` | Add `STORAGE_PROVIDER`, `STORAGE_SQLITE_PATH`, `STORAGE_POSTGRES_URL` | +| `docker-compose.yml` | Add storage volume and environment variables | +| `Dockerfile` | Include `better-sqlite3` native bindings, create `/app/data` directory | +| `package.json` | Add `better-sqlite3` dependency, version bump to 0.8.0 | +| `docs/ARCHITECTURE.md` | Add storage abstraction section | + +### Deleted Files + +| File | Reason | +|------|--------| +| `src/lib/storage.ts` | Replaced by modular `src/lib/storage/` directory (same import path preserved) | + +--- + +## Bug Fixes + +### Proxy Middleware Blocking `/api/storage/config` + +**Problem:** `GET /api/storage/config` returned a 307 redirect to `/login` instead of the expected JSON response. This endpoint is designed to be public (no auth required) for runtime storage mode discovery. + +**Root Cause:** The endpoint was not included in the proxy middleware's public route whitelist (`src/proxy.ts`). + +**Fix:** Added `/api/storage/config` to both the `if` condition block and the matcher regex in `src/proxy.ts`. + +--- + +## Dependencies + +### Added + +| Package | Version | Purpose | +|---------|---------|---------| +| `better-sqlite3` | ^11.x | SQLite storage provider (WAL mode, native bindings) | +| `@types/better-sqlite3` | ^7.x | TypeScript definitions for better-sqlite3 | + +### Note + +PostgreSQL uses the existing `pg` package (already in dependencies for database connections). No new dependency needed for PostgreSQL storage. + +--- + +## Breaking Changes + +**None.** This release is fully backward-compatible: + +- All 16+ consumer components keep the same synchronous `storage.*` API +- The `@/lib/storage` import path is preserved (barrel export) +- Default mode is `local` — existing deployments work without any changes +- localStorage key prefix standardized to `libredb_` (done in v0.7.1) + +--- + +## Environment Variables + +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `STORAGE_PROVIDER` | No | `local` | Storage backend: `local`, `sqlite`, or `postgres` | +| `STORAGE_SQLITE_PATH` | No | `./data/libredb-storage.db` | Path to SQLite database file | +| `STORAGE_POSTGRES_URL` | Yes (postgres) | — | PostgreSQL connection string | + +> These are **server-side only** variables (no `NEXT_PUBLIC_` prefix). The client discovers the mode at runtime via `GET /api/storage/config`. This means **one Docker image works for all modes**. + +--- + +## Testing + +### New Tests + +| File | Tests | Description | +|------|-------|-------------| +| `local-storage.test.ts` | 7 | localStorage CRUD, SSR safety, JSON parse errors | +| `storage-facade.test.ts` | 18 | All domain methods, CustomEvent dispatch | +| `storage-facade-extended.test.ts` | 33 | History caps, snapshots, charts, edge cases | +| `factory.test.ts` | 7 | Env-based provider selection, singleton behavior | +| `sqlite.test.ts` | 23 | WAL mode, upsert, transactions, health check | +| `postgres.test.ts` | 23 | Pool config, upsert, transactions, health check | +| `config.test.ts` | 4 | Config endpoint responses for all modes | +| `storage-routes.test.ts` | 27 | API route handlers (GET/PUT/POST, auth, validation) | +| `factory-singleton.test.ts` | 15 | Singleton isolation, concurrent access | +| `use-storage-sync.test.ts` | 37 | Hook lifecycle: discovery, migration, pull, push | + +**Total: 194 new tests across 10 files** + +### E2E Test Results + +All three storage modes verified end-to-end: + +| Mode | API Tests | Browser Tests | User Isolation | Result | +|------|-----------|---------------|----------------|--------| +| **Local** | 3/3 pass | 2/2 pass | N/A | PASS | +| **SQLite** | 6/6 pass | 3/3 pass | N/A | PASS | +| **PostgreSQL** | 8/8 pass | 3/3 pass | 2-user verified | PASS | + +### CI Pipeline + +``` +bun run lint # ESLint 9 — clean +bun run typecheck # TypeScript strict — clean +bun run test # All tests pass (unit + API + integration + hooks + components) +bun run build # Next.js production build — clean +``` + +--- + +## Extending: Add Your Own Storage Provider + +Adding a new backend (e.g., MySQL, DynamoDB, Redis) requires **one file** implementing `ServerStorageProvider`: + +```typescript +// src/lib/storage/providers/your-provider.ts +import type { ServerStorageProvider, StorageData, StorageCollection } from '../types'; + +export class YourStorageProvider implements ServerStorageProvider { + async initialize(): Promise { /* create table */ } + async getAllData(userId: string): Promise> { /* ... */ } + async getCollection(userId: string, collection: K): Promise { /* ... */ } + async setCollection(userId: string, collection: K, data: StorageData[K]): Promise { /* upsert */ } + async mergeData(userId: string, data: Partial): Promise { /* batch upsert */ } + async isHealthy(): Promise { /* SELECT 1 */ } + async close(): Promise { /* cleanup */ } +} +``` + +Then register it in `factory.ts` — no changes to facade, API routes, sync hook, or consumer components. + +--- + +## Migration Guide + +### From v0.7.x to v0.8.0 + +**No action required** for existing deployments. The default mode is `local` and the API is unchanged. + +### To Enable Server Storage + +1. **SQLite (simplest):** + ```env + # Add to .env.local + STORAGE_PROVIDER=sqlite + ``` + That's it. Directory, file, WAL mode, and table are auto-created. + +2. **PostgreSQL:** + ```env + # Add to .env.local + STORAGE_PROVIDER=postgres + STORAGE_POSTGRES_URL=postgresql://user:pass@host:5432/libredb + ``` + The database must exist; the table is auto-created. + +3. **Existing localStorage data** is automatically migrated to the server on first login. No manual export/import needed. + +--- + +## Documentation + +- **[STORAGE_ARCHITECTURE.md](../STORAGE_ARCHITECTURE.md)** — Deep-dive into write-through cache, sync hook, provider internals, and data model +- **[STORAGE_QUICK_SETUP.md](../STORAGE_QUICK_SETUP.md)** — Step-by-step setup for all three modes with Docker examples and troubleshooting + +--- + +## What's Next + +### v0.8.x (Planned) +- S3/MinIO storage provider for object storage deployments +- Storage admin panel with usage metrics and data export +- Cross-device sync indicator in the UI +- Conflict resolution for concurrent multi-tab edits in server mode + +--- + +## Full Changelog + +### Added +- Pluggable storage abstraction layer (`src/lib/storage/`) with Strategy Pattern +- SQLite storage provider (`better-sqlite3`, WAL mode, auto-create) +- PostgreSQL storage provider (`pg` pool, transactional merge) +- Storage API routes: `/api/storage/config`, `/api/storage`, `/api/storage/[collection]`, `/api/storage/migrate` +- `useStorageSync` write-through cache hook with debounced push (500ms) +- Automatic localStorage-to-server migration on first login +- Per-user data isolation via JWT username scoping +- Runtime storage mode discovery (no `NEXT_PUBLIC_*` build-time coupling) +- `STORAGE_PROVIDER`, `STORAGE_SQLITE_PATH`, `STORAGE_POSTGRES_URL` environment variables +- Docker Compose examples for SQLite and PostgreSQL modes +- `better-sqlite3` and `@types/better-sqlite3` dependencies +- 194 new tests across 10 test files +- `STORAGE_ARCHITECTURE.md` — comprehensive architecture documentation (565 lines) +- `STORAGE_QUICK_SETUP.md` — quick setup guide with Docker examples (404 lines) + +### Changed +- Monolithic `src/lib/storage.ts` refactored into modular `src/lib/storage/` directory +- localStorage keys standardized to `libredb_` prefix (v0.7.1) +- `DataCharts.tsx`, `SecurityTab.tsx`, `BottomPanel.tsx`, `audit.ts`, `data-masking.ts` — use storage module +- `Dockerfile` — includes `better-sqlite3` native bindings and `/app/data` directory +- `docker-compose.yml` — storage volume and environment variables +- `.env.example` — storage configuration section + +### Fixed +- `/api/storage/config` blocked by proxy middleware (307 redirect instead of JSON response) + +### Removed +- `src/lib/storage.ts` — replaced by `src/lib/storage/` module (import path preserved via barrel export) + +--- + +**Full Changelog:** [Compare v0.7.0...v0.8.0](https://github.com/libredb/libredb-studio/compare/v0.7.0...v0.8.0) + +**Docker Image:** `ghcr.io/libredb/libredb-studio:0.8.0` From 2cc6e1b7b7a2de5389b129a1a2ef5e61da1f8c57 Mon Sep 17 00:00:00 2001 From: cevheri Date: Wed, 4 Mar 2026 02:50:14 +0300 Subject: [PATCH 10/20] feat(docker): add PostgreSQL service to docker-compose and update Dockerfile for build dependencies --- Dockerfile | 3 ++- docker-compose.yml | 14 +++++++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 26b7514..2f7df12 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,6 +11,7 @@ WORKDIR /usr/src/app # Install dependencies only when needed FROM base AS deps +RUN apt-get update && apt-get install -y python3 make g++ --no-install-recommends && rm -rf /var/lib/apt/lists/* COPY package.json bun.lock ./ RUN bun install --frozen-lockfile @@ -61,7 +62,7 @@ COPY --from=builder /usr/src/app/.next/static ./.next/static COPY --from=builder /usr/src/app/node_modules/better-sqlite3 ./node_modules/better-sqlite3 COPY --from=builder /usr/src/app/node_modules/bindings ./node_modules/bindings COPY --from=builder /usr/src/app/node_modules/file-uri-to-path ./node_modules/file-uri-to-path -COPY --from=builder /usr/src/app/node_modules/prebuild-install ./node_modules/prebuild-install 2>/dev/null || true +# prebuild-install is only needed at build time, not runtime # Create non-root user for security RUN addgroup --system --gid 1001 nodejs && \ diff --git a/docker-compose.yml b/docker-compose.yml index 449369e..2001f14 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -15,7 +15,19 @@ services: - STORAGE_POSTGRES_URL=${STORAGE_POSTGRES_URL} volumes: - storage-data:/app/data - restart: always + + libredb-postgres: + image: postgres:17-alpine + container_name: libredb-postgres + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: libredb_storage + ports: + - "5432:5432" + volumes: + - pgdata:/var/lib/postgresql/data volumes: storage-data: + pgdata: From 5e8754ac7abb7c1e4934720f8f34955bdcaa38b2 Mon Sep 17 00:00:00 2001 From: cevheri Date: Wed, 4 Mar 2026 03:46:16 +0300 Subject: [PATCH 11/20] feat(fly): add Fly.io configuration for deployment --- fly.toml | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 fly.toml diff --git a/fly.toml b/fly.toml new file mode 100644 index 0000000..4c95387 --- /dev/null +++ b/fly.toml @@ -0,0 +1,35 @@ +# LibreDB Studio — Fly.io Configuration +# Deploy: fly deploy --image ghcr.io/libredb/libredb-studio:latest +# Docs: https://fly.io/docs/reference/configuration/ + +app = 'libredb-studio' +primary_region = 'ams' + +[build] + image = 'ghcr.io/libredb/libredb-studio:latest' + +[env] + PORT = '3000' + NODE_ENV = 'production' + NEXT_PUBLIC_AUTH_PROVIDER = 'local' + STORAGE_PROVIDER = 'local' + +[http_service] + internal_port = 3000 + force_https = true + auto_stop_machines = 'stop' + auto_start_machines = true + min_machines_running = 0 + processes = ['app'] + +[[http_service.checks]] + grace_period = '10s' + interval = '30s' + method = 'GET' + path = '/api/db/health' + timeout = '5s' + +[[vm]] + memory = '512mb' + cpu_kind = 'shared' + cpus = 1 From bf6f8c61672a6f65f8eeed98ad7953cbbc17069f Mon Sep 17 00:00:00 2001 From: cevheri Date: Wed, 4 Mar 2026 03:51:04 +0300 Subject: [PATCH 12/20] chore(fly): update Fly.io configuration for improved clarity and organization --- fly.toml | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/fly.toml b/fly.toml index 4c95387..3fc8a02 100644 --- a/fly.toml +++ b/fly.toml @@ -1,6 +1,7 @@ -# LibreDB Studio — Fly.io Configuration -# Deploy: fly deploy --image ghcr.io/libredb/libredb-studio:latest -# Docs: https://fly.io/docs/reference/configuration/ +# fly.toml app configuration file generated for libredb-studio on 2026-03-04T03:50:40+03:00 +# +# See https://fly.io/docs/reference/configuration/ for information about how to use this file. +# app = 'libredb-studio' primary_region = 'ams' @@ -9,9 +10,9 @@ primary_region = 'ams' image = 'ghcr.io/libredb/libredb-studio:latest' [env] - PORT = '3000' - NODE_ENV = 'production' NEXT_PUBLIC_AUTH_PROVIDER = 'local' + NODE_ENV = 'production' + PORT = '3000' STORAGE_PROVIDER = 'local' [http_service] @@ -22,14 +23,15 @@ primary_region = 'ams' min_machines_running = 0 processes = ['app'] -[[http_service.checks]] - grace_period = '10s' - interval = '30s' - method = 'GET' - path = '/api/db/health' - timeout = '5s' + [[http_service.checks]] + interval = '30s' + timeout = '5s' + grace_period = '10s' + method = 'GET' + path = '/api/db/health' [[vm]] memory = '512mb' cpu_kind = 'shared' cpus = 1 + memory_mb = 512 From d8227cbcd06f3279f5e97a6ab47b038682f60bf1 Mon Sep 17 00:00:00 2001 From: Hasan A Date: Wed, 4 Mar 2026 08:47:09 +0300 Subject: [PATCH 13/20] feat(auth): add Zitadel OIDC integration support - Added Zitadel to the list of supported OIDC providers in documentation and README - Updated with Zitadel-specific scope and role claim configurations - Implemented RP-Initiated logout URL building logic for Zitadel - Added unit tests for Zitadel logout URL generation --- .env.example | 4 +++- .gitignore | 2 ++ README.md | 6 +++--- docs/OIDC_SETUP.md | 28 ++++++++++++++++++++++++++++ src/lib/oidc.ts | 9 +++++++++ tests/unit/lib/oidc.test.ts | 9 +++++++++ 6 files changed, 54 insertions(+), 4 deletions(-) diff --git a/.env.example b/.env.example index 82953e3..02cf640 100644 --- a/.env.example +++ b/.env.example @@ -30,7 +30,7 @@ JWT_SECRET=your_32_character_random_string_here # AUTHENTICATION PROVIDER # ============================================ # "local" (default) = email/password login (ADMIN_EMAIL/ADMIN_PASSWORD, USER_EMAIL/USER_PASSWORD) -# "oidc" = OpenID Connect SSO (Auth0, Keycloak, Okta, Azure AD, etc.) +# "oidc" = OpenID Connect SSO (Auth0, Keycloak, Okta, Azure AD, Zitadel, etc.) NEXT_PUBLIC_AUTH_PROVIDER=local # ============================================ @@ -43,6 +43,7 @@ NEXT_PUBLIC_AUTH_PROVIDER=local # Scopes to request (default: openid profile email) # OIDC_SCOPE=openid profile email +# if using Zitadel, add this scope: urn:zitadel:iam:org:project:roles # Role mapping (optional) — claim path for determining admin vs user role # Supports dot-notation for nested claims (e.g. "realm_access.roles") @@ -56,6 +57,7 @@ NEXT_PUBLIC_AUTH_PROVIDER=local # Keycloak: OIDC_ROLE_CLAIM=realm_access.roles # Okta: OIDC_ROLE_CLAIM=groups # Azure AD: OIDC_ROLE_CLAIM=roles +# Zitadel: OIDC_ROLE_CLAIM=urn:zitadel:iam:org:project:roles # ============================================ # STORAGE PROVIDER (Optional) diff --git a/.gitignore b/.gitignore index 74b0405..a00b21d 100644 --- a/.gitignore +++ b/.gitignore @@ -83,3 +83,5 @@ Thumbs.db !.claude/skills +.orchids/ +.codegraph/ \ No newline at end of file diff --git a/README.md b/README.md index d6fe479..28e0170 100644 --- a/README.md +++ b/README.md @@ -136,7 +136,7 @@ The demo runs in **Demo Mode** with simulated data. No real database required! ### Authentication & SSO - **Dual Auth Modes**: Local email/password login or OpenID Connect (OIDC) Single Sign-On — switchable via environment variable. -- **Vendor-Agnostic OIDC**: Works with any OIDC-compliant provider — Auth0, Keycloak, Okta, Azure AD, Google, and more. +- **Vendor-Agnostic OIDC**: Works with any OIDC-compliant provider — Auth0, Keycloak, Okta, Azure AD, Zitadel, Google, and more. - **PKCE Security**: Authorization Code Flow with Proof Key for Code Exchange (S256) for secure authentication. - **Auto Role Mapping**: Configurable claim-based role mapping with dot-notation for nested claims (e.g., `realm_access.roles`). - **Provider Logout**: Logout clears both local JWT session and identity provider session. @@ -406,7 +406,7 @@ LibreDB Studio is optimized for K8s with: - [x] **Phase 12**: Advanced Charting (Scatter, Histogram, Stacked Charts, Aggregation, Date Grouping, Chart Save/Load, Chart Dashboard). - [x] **Phase 13**: Monitoring Enhancement (Time-Series Trends, Threshold Alerting, Connection Pool Stats, Configurable Polling). - [x] **Phase 14**: Enterprise Database Support (Oracle Database via oracledb Thin mode, Microsoft SQL Server via mssql/tedious). -- [x] **Phase 15**: SSO Integration — Vendor-agnostic OIDC authentication (Auth0, Keycloak, Okta, Azure AD) with PKCE, role mapping, and provider logout. +- [x] **Phase 15**: SSO Integration — Vendor-agnostic OIDC authentication (Auth0, Keycloak, Okta, Azure AD, Zitadel) with PKCE, role mapping, and provider logout. - [ ] **Phase 16**: DBA & Monitoring (Lock Dependency Graph, Vacuum Scheduler, Prometheus Export). - [ ] **Phase 17**: Enterprise Collaboration (User Identity, Shared Workspaces, SAML 2.0). @@ -419,7 +419,7 @@ LibreDB Studio is optimized for K8s with: | [DeepWiki](https://deepwiki.com/libredb/libredb-studio) | AI-powered documentation — always up-to-date with the codebase | | [SonarCloud](https://sonarcloud.io/project/overview?id=libredb_libredb-studio) | Code quality, security analysis, and technical debt tracking | | [API Docs](docs/API_DOCS.md) | Complete REST API reference | -| [OIDC Setup Guide](docs/OIDC_SETUP.md) | SSO configuration for Auth0, Keycloak, Okta, Azure AD | +| [OIDC Setup Guide](docs/OIDC_SETUP.md) | SSO configuration for Auth0, Keycloak, Okta, Azure AD, Zitadel | | [OIDC Architecture](docs/OIDC_ARCH.md) | OIDC subsystem internals, security model, extension points | | [Theming Guide](docs/THEMING.md) | CSS theming, dark mode, and styling customization | | [Architecture](docs/ARCHITECTURE.md) | System architecture and design patterns | diff --git a/docs/OIDC_SETUP.md b/docs/OIDC_SETUP.md index a1e9c9d..280f095 100644 --- a/docs/OIDC_SETUP.md +++ b/docs/OIDC_SETUP.md @@ -170,6 +170,34 @@ Navigate to `/login` and click **"Login with SSO"**. OIDC_ADMIN_ROLES=Admin,admin ``` +### Zitadel + +1. **Create Project & Application** in Zitadel Console → Projects → Create New Project → Add Application (Web) + - Auth Method: PKCE + +2. **Settings:** + ``` + Redirect URIs: http://localhost:3000/api/auth/oidc/callback + Post Logout URIs: http://localhost:3000/login + ``` + +3. **Environment Variables:** + ```env + NEXT_PUBLIC_AUTH_PROVIDER=oidc + OIDC_ISSUER=https://your-instance.zitadel.cloud + OIDC_CLIENT_ID=your_client_id + OIDC_CLIENT_SECRET=your_client_secret + ``` + +4. **Role Mapping:** + + Zitadel includes roles if requested via scopes. Ensure `OIDC_SCOPE` includes `urn:zitadel:iam:org:project:roles`. + ```env + OIDC_SCOPE=openid profile email urn:zitadel:iam:org:project:roles + OIDC_ROLE_CLAIM=urn:zitadel:iam:org:project:roles + OIDC_ADMIN_ROLES=admin + ``` + ### Google Workspace 1. **Create OAuth Client** in Google Cloud Console → APIs & Services → Credentials → Create OAuth Client ID → Web Application diff --git a/src/lib/oidc.ts b/src/lib/oidc.ts index f1b48dd..35124f1 100644 --- a/src/lib/oidc.ts +++ b/src/lib/oidc.ts @@ -228,6 +228,7 @@ export function buildLogoutUrl(returnTo: string): string | null { try { const config = getOIDCConfig(); const issuerUrl = new URL(config.issuer); + const roleClaim = config.roleClaim; // Auth0 uses /v2/logout if (issuerUrl.hostname.includes('auth0.com')) { @@ -237,6 +238,14 @@ export function buildLogoutUrl(returnTo: string): string | null { return logoutUrl.toString(); } + // Zitadel RP-Initiated Logout + if (roleClaim.includes('zitadel')) { + const logoutUrl = new URL('/oidc/v1/end_session', config.issuer); + logoutUrl.searchParams.set('client_id', config.clientId); + logoutUrl.searchParams.set('post_logout_redirect_uri', returnTo); + return logoutUrl.toString(); + } + // Generic OIDC (Keycloak, Okta, Azure AD, etc.) — RP-Initiated Logout const logoutUrl = new URL('/protocol/openid-connect/logout', config.issuer); logoutUrl.searchParams.set('client_id', config.clientId); diff --git a/tests/unit/lib/oidc.test.ts b/tests/unit/lib/oidc.test.ts index f2ab0b9..513f595 100644 --- a/tests/unit/lib/oidc.test.ts +++ b/tests/unit/lib/oidc.test.ts @@ -230,6 +230,15 @@ describe('buildLogoutUrl', () => { expect(url).toContain('post_logout_redirect_uri='); }); + test('builds Zitadel logout URL', () => { + process.env.OIDC_ISSUER = 'https://my-instance.zitadel.cloud'; + const url = buildLogoutUrl('http://localhost:3000/login'); + expect(url).not.toBeNull(); + expect(url).toContain('/oidc/v1/end_session'); + expect(url).toContain('client_id=test-client-id'); + expect(url).toContain('post_logout_redirect_uri='); + }); + test('returns null when OIDC config is missing', () => { delete process.env.OIDC_ISSUER; const url = buildLogoutUrl('http://localhost:3000/login'); From 09d36a4d33b81e21fa267a072e0055880c82b663 Mon Sep 17 00:00:00 2001 From: Hasan A Date: Wed, 4 Mar 2026 09:04:58 +0300 Subject: [PATCH 14/20] test: add OIDC_ROLE_CLAIM environment variable to the Zitadel logout URL test. --- tests/unit/lib/oidc.test.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unit/lib/oidc.test.ts b/tests/unit/lib/oidc.test.ts index 513f595..e4ff28f 100644 --- a/tests/unit/lib/oidc.test.ts +++ b/tests/unit/lib/oidc.test.ts @@ -232,6 +232,7 @@ describe('buildLogoutUrl', () => { test('builds Zitadel logout URL', () => { process.env.OIDC_ISSUER = 'https://my-instance.zitadel.cloud'; + process.env.OIDC_ROLE_CLAIM = 'urn:zitadel:iam:org:project:roles'; const url = buildLogoutUrl('http://localhost:3000/login'); expect(url).not.toBeNull(); expect(url).toContain('/oidc/v1/end_session'); From 0d74de6faa06e98fa1674f99445a8d8c667da22c Mon Sep 17 00:00:00 2001 From: cevheri Date: Sat, 7 Mar 2026 22:12:34 +0300 Subject: [PATCH 15/20] fix(api): validate 'data' field in PUT request for storage collection --- src/app/api/storage/[collection]/route.ts | 7 +++++++ src/lib/storage/providers/postgres.ts | 3 +++ 2 files changed, 10 insertions(+) diff --git a/src/app/api/storage/[collection]/route.ts b/src/app/api/storage/[collection]/route.ts index da1c028..13f81d8 100644 --- a/src/app/api/storage/[collection]/route.ts +++ b/src/app/api/storage/[collection]/route.ts @@ -37,6 +37,13 @@ export async function PUT( const body = await request.json(); + if (body.data === undefined || body.data === null) { + return NextResponse.json( + { error: 'Missing required field: data' }, + { status: 400 } + ); + } + await provider.setCollection( session.username, collection as StorageCollection, diff --git a/src/lib/storage/providers/postgres.ts b/src/lib/storage/providers/postgres.ts index 166f0e4..c11d710 100644 --- a/src/lib/storage/providers/postgres.ts +++ b/src/lib/storage/providers/postgres.ts @@ -34,6 +34,9 @@ export class PostgresStorageProvider implements ServerStorageProvider { connectionString: this.connectionString, max: 5, idleTimeoutMillis: 30000, + ssl: this.connectionString.includes('sslmode=disable') + ? false + : { rejectUnauthorized: false }, }); // Create table From 7d0462320a532f470c85ac81f0b0cc54ed0a7de1 Mon Sep 17 00:00:00 2001 From: cevheri Date: Sat, 7 Mar 2026 22:19:10 +0300 Subject: [PATCH 16/20] docs: add article on deploying projects with Koyeb.com --- docs/medium-koyeb-article-en.md | 215 ++++++++++++++++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 docs/medium-koyeb-article-en.md diff --git a/docs/medium-koyeb-article-en.md b/docs/medium-koyeb-article-en.md new file mode 100644 index 0000000..04ebf85 --- /dev/null +++ b/docs/medium-koyeb-article-en.md @@ -0,0 +1,215 @@ +# Zero-Cost SaaS: How I Deployed My Project with Koyeb.com + +*A side-project developer's serverless discovery journey* + +**Tags:** #SaaS #CloudComputing #Koyeb #Startup #WebDevelopment #FreeTier #Serverless + +--- + +## I Had an Idea and Was Looking for the Right Tool + +Every developer has that moment: the idea is ready, the code is ready, everything works perfectly on localhost. Then the question comes — "Where do I deploy this?" + +Sound familiar? + +I've been actively using platforms like AWS, Azure, GCP, DigitalOcean, Heroku, Render, Railway, and Fly.io for years, and I continue to use many of them. Each has its strengths — AWS's flexibility, Azure's enterprise integrations, DigitalOcean's simplicity, Fly.io's edge computing approach... I love these platforms and run most of my projects on them. + +But this time I had a different need. For a SaaS application I was building as a side project, I was looking for a **free web service and database**, **quick setup**, and a **clean interface**. Something that didn't require heavy configuration, had no complex billing model, and could go live in minutes. + +That's when I came across Koyeb. I created an account, connected my GitHub repo, and within minutes my project — database included — was live. And my bill? Zero. + +In this post, I'll share my experience with Koyeb.com, what the platform offers, and why it's a great option for side projects and MVPs. + +--- + +## What Is Koyeb? + +Koyeb is a serverless Platform-as-a-Service (PaaS) built for developers who want to run applications without dealing with infrastructure management. In the simplest terms, it offers two paths: + +> **Push to GitHub, let Koyeb deploy it. Or provide a Docker image, and it runs.** + +First path: you connect your GitHub repo, and with every push, Koyeb automatically builds and deploys. Second path: you provide a Docker image from your container registry — like `ghcr.io/user/project:latest` — and Koyeb pulls and runs it directly. No server setup, no nginx configuration, no SSL certificates to manage... None of that. Automatic build, automatic deploy, automatic HTTPS. + +What really surprised me was the **deployment speed**. Even on the first deploy, it was incredibly fast. You provide the image, and within seconds the service is ready. I've waited minutes for builds and provisioning on other platforms — on Koyeb, it was done in the blink of an eye. + +What truly sets Koyeb apart is a major development from February 2026: **its acquisition by Mistral AI**. This was the French AI giant's first-ever acquisition, and it sends a strong signal of confidence in Koyeb's future. Mistral is positioning Koyeb as a core component of its AI cloud infrastructure. This means the platform won't just survive — it will grow with serious investment behind it. + +Who is Koyeb for? + +- **Indie hackers**: Those who want to validate ideas quickly +- **Startup founders**: Those who want to launch an MVP at zero cost +- **Students**: Those who want to host portfolio projects +- **Side project developers**: Those who keep saying "I'll deploy it someday" +- **AI developers**: Those who want to run GPU-powered inference workloads + +--- + +## The Power of the Free Plan + +The first thing that drew me to Koyeb was the value of the Starter plan. + +Koyeb's Starter plan is **$0** and includes: + +- **1 web service** (512MB RAM, 0.1 vCPU, 2GB SSD) +- **1 managed PostgreSQL database** (1GB storage) +- **No time limit** — there's no trial period +- **Commercial use allowed** — even if your side project generates revenue +- **Custom domain support** — you can connect up to 5 domains +- **100GB monthly bandwidth** — included + +This is more than enough for an MVP or side project. My SaaS project ran smoothly with these resources. + +> *I entered my credit card during registration. A small amount was charged and refunded for verification — I didn't see any charges on my statement.* + +### What Makes Koyeb Stand Out for Side Projects? + +Every platform has its strengths. AWS and Azure are indispensable for enterprise projects, DigitalOcean is great for its simplicity, Fly.io excels at edge computing, and Railway and Render have made serious strides in developer experience. I actively use most of these platforms across different projects. + +What made Koyeb different for me was that it **offers both a web service and a PostgreSQL database together on the free plan**, with an extremely clean interface. Instead of setting up separate services and managing accounts across different platforms for a side project, I could handle everything from one place. + +--- + +## 5 Features That Impressed Me + +The free plan is nice, but what really kept me on Koyeb was the user experience. Here are the 5 features that impressed me the most: + +### 1. Two Easy Deploy Paths: Git Push or Docker Image + +Koyeb offers two deployment paths, both extremely simple: + +**Path A — Git Push:** +1. Connect your GitHub repo +2. Select a branch, confirm build settings +3. Deploy — every `git push` now triggers an automatic deployment + +**Path B — Docker Image:** +1. Enter your container registry and image name (e.g., `ghcr.io/user/project:latest`) +2. Set port and environment variables +3. Deploy — Koyeb pulls the image and runs it + +I chose the Docker image path for my project. I provided my image from GitHub Container Registry, entered my environment variables, and hit deploy. You can also integrate with GitHub Actions or your own CI/CD pipeline, but for a simple side project, this level of simplicity is exactly right. + +My first deploy? It was live within **a few minutes** of providing the image. No exaggeration — the startup speed genuinely surprised me. + +### 2. Scale-to-Zero: Pay Only for What You Use + +This is, in my opinion, Koyeb's strongest feature. When your project isn't receiving traffic, the instance automatically goes to sleep. When a request comes in, it wakes up automatically. (Hugging Face Spaces and Render work similarly, but Koyeb still wins on simplicity and speed.) + +Koyeb does this in two tiers: + +- **Light Sleep**: The instance stays in memory, waking up within 200ms. Users feel no delay. (On other platforms, you really notice the cold start.) +- **Deep Sleep**: The instance shuts down completely, waking up in 1-5 seconds. For longer idle periods. + +This is especially great for side projects. Why consume resources when your project isn't getting traffic at 3 AM? With Scale-to-Zero, you only pay for actual usage. On the free plan, you're already paying nothing — but when you move to paid plans, this means real savings. + +### 3. Managed PostgreSQL + +If you're building a SaaS project, you almost certainly need a database. Koyeb provides one out of the box. (This became my favorite feature. Yugabyte, Neon, Supabase, Heroku, and MongoDB Atlas all offer free tiers too — but having everything in one place is wonderful.) + +You get your connection string, paste it into your app, done. + +### 4. Clean and Modern Dashboard + +Koyeb's dashboard is clean, intuitive, and modern. You can view real-time logs, track deployment history, and easily manage environment variables. (You can copy-paste your .env file — no need to enter variables one by one.) Larger cloud platforms naturally have more comprehensive dashboards because they offer a much wider range of services. Koyeb, by focusing on a narrower scope, keeps its interface extremely clean. I found exactly the information I needed, right where I needed it, for managing a side project. + +--- + +## Koyeb's Future: The Mistral AI Effect + +In February 2026, Mistral AI announced its acquisition of Koyeb. This was Mistral's **first-ever acquisition**. Koyeb's 13-person team and three co-founders joined Mistral's engineering team, led by CTO Timothée Lacroix. + +Why does this matter? + +**Strategic positioning**: Mistral is moving beyond being just an LLM company to become a full-stack AI cloud provider. The Mistral Compute platform announced in June 2025 will be accelerated by Koyeb's infrastructure. + +**AI-focused infrastructure**: Koyeb's serverless GPU support (L4, L40S, V100) aligns perfectly with Mistral's AI inference needs. We can expect more powerful GPU options and AI-native features in the future. + +**European AI independence**: Mistral is part of Europe's vision to build alternatives to US tech giants. Combined with its $1.4 billion data center investment in Sweden, Koyeb's European-based infrastructure carries strategic value. + +**Platform continuity commitment**: Koyeb's blog post made it clear — the existing platform and free plan will continue. The acquisition means growth, not shutdown. + +--- + +## Pricing: Pay as You Grow + +Koyeb's free Starter plan offers 1 web service + 1 PostgreSQL. If your project grows, there are Pro ($29/month), Scale ($299/month), and Enterprise options — but at the side project stage, you don't need to think about any of that. + +What mattered to me was this: no commitment, no hidden costs. You can start without thinking "What if it doesn't take off?" — because if it doesn't, it costs you absolutely nothing. + +--- + +## Practical Deploy Flow + +I want to show how easy it is to get a project live on Koyeb. Both paths take just a few steps: + +### With GitHub: + +**Step 1** — Go to [koyeb.com](https://www.koyeb.com), sign up. + +**Step 2** — Select "Create Web Service" > "GitHub", connect your repo. + +**Step 3** — Confirm branch and build settings. Koyeb auto-detects popular frameworks like Next.js, Node.js, Python, Go, and Rust. + +**Step 4** — Add your environment variables, hit "Deploy". + +### With Docker Image: + +**Step 1** — Select "Create Web Service" > "Docker". + +**Step 2** — Enter your image address, e.g.: `ghcr.io/user/project:latest` + +**Step 3** — Set port, environment variables, and health check settings, hit "Deploy". + +With both paths, your project is live at `xxx.koyeb.app` within minutes. Want to add PostgreSQL? One click from the dashboard via "Create Database". The connection string is auto-generated. That's it. + +--- + +## Is Koyeb Perfect? An Honest Assessment + +Every platform has strengths and weaknesses. I want to be honest while praising Koyeb: + +**Things to keep in mind:** + +- **Free tier resources are limited**: 512MB RAM and 0.1 vCPU aren't enough for heavy traffic. But it's free — ideal for MVPs and prototypes. +- **PostgreSQL uptime**: The free database goes to sleep when idle. If you need an always-on DB, you may need to upgrade to a paid plan. +- **Region limitations (free)**: On the free plan, you can only choose Frankfurt or Washington D.C. +- **Younger ecosystem**: It doesn't yet have as wide a service range as platforms that have been around for many years. But it's rapidly growing with Mistral's backing. + +**Standout strengths:** + +- Zero-cost start (web service + DB together) +- Remarkably simple deploy process +- Smart resource management with Scale-to-Zero +- Clean and focused dashboard +- Secure future with Mistral AI backing + +--- + +## Conclusion: The Right Tool for the Right Job + +Platforms like AWS, Azure, and GCP remain indispensable for my larger projects. But for a side project, for a quick and free start, Koyeb was exactly what I was looking for. + +I deployed my project at zero cost, in minutes. I didn't have to get my database from a separate service. All I needed was my code and my GitHub repo. And my bill reflected nothing. + +In 2026, cloud platform options are so rich that there's a solution for every need. Koyeb is a great option, especially for side projects, MVPs, and rapid prototyping. With its free web service and database bundled together, its clean interface, and easy setup, it's tailor-made for those "let me try this live" moments. + +If you have an idea, if you have an unfinished project, if you have code that works on localhost but has never gone live — **give it a shot**. It's free to try. + +And who knows, maybe the next big SaaS story will be yours. + +--- + +*This post is not sponsored. There are no discount codes or referral links. I used Koyeb for my own project and wanted to share my experience. As someone who loves open source: a thank you to Koyeb, and a roadmap for you.* + +--- + +## Resources + +- [Koyeb Official Site](https://www.koyeb.com/) +- [Koyeb Pricing](https://www.koyeb.com/pricing) +- [Koyeb Docs](https://www.koyeb.com/docs) +- [Koyeb Scale-to-Zero](https://www.koyeb.com/docs/run-and-scale/scale-to-zero) +- [Mistral AI + Koyeb (TechCrunch)](https://techcrunch.com/2026/02/17/mistral-ai-buys-koyeb-in-first-acquisition-to-back-its-cloud-ambitions/) +- [Koyeb Blog: Joining Mistral AI](https://www.koyeb.com/blog/koyeb-is-joining-mistral-ai-to-build-the-future-of-ai-infrastructure) +- [Scale-to-Zero with Light Sleep](https://www.koyeb.com/blog/avoid-cold-starts-with-scale-to-zero-light-sleep) +- [Koyeb Regions](https://www.koyeb.com/docs/reference/regions) From 1d6bd63ec20e43c6bca21ffb3cee1be3839bb693 Mon Sep 17 00:00:00 2001 From: cevheri Date: Sat, 7 Mar 2026 22:39:14 +0300 Subject: [PATCH 17/20] refactor(login): remove popup SSO window in favor of standard redirect Popup-based SSO flow (window.open + window.moveTo/resizeTo) is unreliable because modern browsers block window manipulation APIs. Revert to the proven full-page redirect approach while keeping all UI improvements (split-panel layout, responsive design, security badges). Co-Authored-By: Claude Opus 4.6 --- src/app/api/auth/oidc/callback/route.ts | 25 +------------ src/app/api/auth/oidc/login/route.ts | 20 ---------- src/app/login/login-form.tsx | 49 ++----------------------- 3 files changed, 5 insertions(+), 89 deletions(-) diff --git a/src/app/api/auth/oidc/callback/route.ts b/src/app/api/auth/oidc/callback/route.ts index 8252f9f..cb35623 100644 --- a/src/app/api/auth/oidc/callback/route.ts +++ b/src/app/api/auth/oidc/callback/route.ts @@ -63,20 +63,8 @@ export async function GET(request: Request) { const username = claims.email || claims.preferred_username || claims.sub || role; await login(role, username); - // Check if this was a popup login - const isPopup = cookieStore.get('oidc-mode')?.value === 'popup'; - - // Clean up cookies + // Clean up state cookie cookieStore.delete('oidc-state'); - cookieStore.delete('oidc-mode'); - - if (isPopup) { - // Close popup and let the parent window detect the auth via /api/auth/me - return new NextResponse( - '', - { headers: { 'Content-Type': 'text/html' } } - ); - } // Redirect based on role return NextResponse.redirect( @@ -88,17 +76,6 @@ export async function GET(request: Request) { console.error('OIDC error cause:', error.cause); } - // Clean up popup cookie on error too - const cookieStore2 = await cookies(); - const isPopup = cookieStore2.get('oidc-mode')?.value === 'popup'; - cookieStore2.delete('oidc-mode'); - - if (isPopup) { - return new NextResponse( - '', - { headers: { 'Content-Type': 'text/html' } } - ); - } return NextResponse.redirect(`${origin}/login?error=oidc_failed`); } } diff --git a/src/app/api/auth/oidc/login/route.ts b/src/app/api/auth/oidc/login/route.ts index efdd39e..4c61042 100644 --- a/src/app/api/auth/oidc/login/route.ts +++ b/src/app/api/auth/oidc/login/route.ts @@ -33,30 +33,10 @@ export async function GET(request: Request) { path: '/', }); - // Store popup mode flag so callback knows to close the window - const requestUrl = new URL(request.url); - if (requestUrl.searchParams.get('mode') === 'popup') { - cookieStore.set('oidc-mode', 'popup', { - httpOnly: true, - secure: process.env.NODE_ENV === 'production', - sameSite: 'lax', - maxAge: 300, - path: '/', - }); - } - return NextResponse.redirect(url.toString()); } catch (error) { console.error('OIDC login error:', error); const origin = getPublicOrigin(request); - const requestUrl = new URL(request.url); - const isPopup = requestUrl.searchParams.get('mode') === 'popup'; - if (isPopup) { - return new NextResponse( - '', - { headers: { 'Content-Type': 'text/html' } } - ); - } return NextResponse.redirect(`${origin}/login?error=oidc_config`); } } diff --git a/src/app/login/login-form.tsx b/src/app/login/login-form.tsx index c58ab1f..4295c0d 100644 --- a/src/app/login/login-form.tsx +++ b/src/app/login/login-form.tsx @@ -19,50 +19,6 @@ function LoginFormInner({ authProvider }: { authProvider: string }) { const searchParams = useSearchParams(); const oidcError = searchParams.get('error'); - const handleSSO = () => { - setIsLoading(true); - const isMobile = window.innerWidth < 768; - - if (isMobile) { - window.location.href = '/api/auth/oidc/login?mode=redirect'; - return; - } - - // Desktop: open SSO as a side-by-side window on the right half of the screen - const screenW = window.screen.availWidth; - const screenH = window.screen.availHeight; - const halfW = Math.round(screenW / 2); - - const ssoWindow = window.open( - '/api/auth/oidc/login?mode=popup', - 'sso-login', - `width=${halfW},height=${screenH},left=${halfW},top=0,toolbar=no,menubar=no,scrollbars=yes` - ); - - // Move main window to the left half - try { - window.moveTo(0, 0); - window.resizeTo(halfW, screenH); - } catch { - // Some browsers block window resize - that's fine - } - - // Listen for SSO window close - const checkSSO = setInterval(() => { - if (!ssoWindow || ssoWindow.closed) { - clearInterval(checkSSO); - setIsLoading(false); - router.refresh(); - // Check if auth succeeded - fetch('/api/auth/me').then(res => res.json()).then(data => { - if (data.authenticated) { - router.push(data.role === 'admin' ? '/admin' : '/'); - } - }).catch(() => {}); - } - }, 500); - }; - const handleLogin = async (e?: React.FormEvent, directEmail?: string, directPassword?: string) => { if (e) e.preventDefault(); const loginEmail = directEmail || email; @@ -235,7 +191,10 @@ function LoginFormInner({ authProvider }: { authProvider: string }) {