diff --git a/.env.example b/.env.example index b580f2d..82953e3 100644 --- a/.env.example +++ b/.env.example @@ -57,6 +57,24 @@ NEXT_PUBLIC_AUTH_PROVIDER=local # Okta: OIDC_ROLE_CLAIM=groups # Azure AD: OIDC_ROLE_CLAIM=roles +# ============================================ +# STORAGE PROVIDER (Optional) +# ============================================ +# Controls where application data is persisted. +# "local" (default) = browser localStorage only (zero config, great for dev) +# "sqlite" = SQLite file on server (persistent, single-node) +# "postgres" = PostgreSQL database (persistent, multi-node, enterprise) +# +# Note: NOT prefixed with NEXT_PUBLIC_ — server-side only, discovered at runtime +# via GET /api/storage/config endpoint. +STORAGE_PROVIDER=local + +# SQLite storage path (required when STORAGE_PROVIDER=sqlite) +# STORAGE_SQLITE_PATH=./data/libredb-storage.db + +# PostgreSQL connection URL (required when STORAGE_PROVIDER=postgres) +# STORAGE_POSTGRES_URL=postgresql://user:pass@host:5432/libredb + # =========================================== # LLM Configuration (Strategy Pattern) # =========================================== diff --git a/CLAUDE.md b/CLAUDE.md index 033f599..1f76736 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -6,6 +6,11 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co LibreDB Studio is a web-based SQL IDE for cloud-native teams. It supports PostgreSQL, MySQL, SQLite, Oracle, SQL Server, MongoDB, Redis, and a demo mode with AI-powered query assistance. +## Github +* Repository: https://github.com/libredb/libredb-studio +* Container Registry: https://github.com/libredb/libredb-studio/pkgs/container/libredb-studio +* Docker Image: ghcr.io/libredb/libredb-studio:latest + ## Development Commands ```bash @@ -58,6 +63,7 @@ The project uses ESLint 9 for linting and `bun:test` for testing with `@testing- - **AI:** Multi-model support (Gemini, OpenAI, Ollama, Custom) - **Databases:** PostgreSQL (`pg`), MySQL (`mysql2`), SQLite (`better-sqlite3`), Oracle (`oracledb`), SQL Server (`mssql`), MongoDB (`mongodb`), Redis (`ioredis`) - **Auth:** JWT-based with `jose` library + OIDC SSO with `openid-client` (Auth0, Keycloak, Okta, Azure AD) +- **Storage:** Pluggable storage layer — localStorage (default), SQLite (`better-sqlite3`), or PostgreSQL (`pg`) ### Directory Structure @@ -69,6 +75,7 @@ src/ │ │ │ └── oidc/ # OIDC login + callback routes (PKCE, code exchange) │ │ ├── ai/ # AI endpoints (chat, nl2sql, explain, safety) │ │ ├── db/ # Query, schema, health, maintenance, transactions +│ │ ├── storage/ # Storage sync API (config, CRUD, migrate) │ │ └── admin/ # Fleet health, audit endpoints │ ├── admin/ # Admin dashboard (RBAC protected) │ └── login/ # Login page @@ -83,6 +90,15 @@ src/ │ └── ui/ # Shadcn/UI primitives ├── hooks/ # Custom React hooks └── lib/ + ├── storage/ # Storage abstraction layer + │ ├── index.ts # Barrel export + │ ├── types.ts # StorageData, ServerStorageProvider interfaces + │ ├── storage-facade.ts # Public sync API + CustomEvent dispatch + │ ├── local-storage.ts # Pure localStorage CRUD + │ ├── factory.ts # Env-based provider factory (singleton) + │ └── providers/ + │ ├── sqlite.ts # better-sqlite3 backend + │ └── postgres.ts # pg backend ├── db/ # Database provider module (Strategy Pattern) │ ├── providers/ │ │ ├── sql/ # SQL providers (postgres, mysql, sqlite, oracle, mssql) @@ -133,7 +149,12 @@ e2e/ # Playwright E2E tests (browser) 4. **API Routes:** All backend logic in `src/app/api/`. Protected routes require valid JWT. Public routes: `/login`, `/api/auth`, `/api/db/health` -5. **Client State:** LocalStorage for connections, query history, and saved queries (`src/lib/storage.ts`) +5. **Storage Abstraction:** `src/lib/storage/` module provides pluggable persistence: + - **Local** (default): Browser localStorage, zero config + - **SQLite**: `better-sqlite3` file DB for single-node persistent storage + - **PostgreSQL**: `pg` for multi-node enterprise storage + - Write-through cache: localStorage always serves reads; `useStorageSync` hook pushes mutations to server (debounced) + - Controlled by `STORAGE_PROVIDER` env var (server-side only, discovered at runtime via `/api/storage/config`) 6. **Multi-Tab Workspace:** Each query tab has independent state (query, results, execution status) @@ -164,6 +185,11 @@ LLM_PROVIDER=gemini # gemini, openai, ollama, custom LLM_API_KEY= LLM_MODEL=gemini-2.5-flash LLM_API_URL= # For ollama/custom providers + +# Optional storage config (server-side only, not NEXT_PUBLIC_) +STORAGE_PROVIDER=local # local (default) | sqlite | postgres +STORAGE_SQLITE_PATH=./data/libredb-storage.db # SQLite file path +STORAGE_POSTGRES_URL=postgresql://... # PostgreSQL connection URL ``` ### Path Aliases diff --git a/Dockerfile b/Dockerfile index 4cd08df..26b7514 100644 --- a/Dockerfile +++ b/Dockerfile @@ -49,14 +49,20 @@ ENV NODE_OPTIONS="--max-old-space-size=384" COPY --from=builder /usr/src/app/public ./public -# Set the correct permission for prerender cache -RUN mkdir -p .next +# Set the correct permission for prerender cache and storage +RUN mkdir -p .next data # Automatically leverage output traces to reduce image size # https://nextjs.org/docs/advanced-features/output-file-tracing COPY --from=builder /usr/src/app/.next/standalone ./ COPY --from=builder /usr/src/app/.next/static ./.next/static +# Copy better-sqlite3 native binding for server storage support +COPY --from=builder /usr/src/app/node_modules/better-sqlite3 ./node_modules/better-sqlite3 +COPY --from=builder /usr/src/app/node_modules/bindings ./node_modules/bindings +COPY --from=builder /usr/src/app/node_modules/file-uri-to-path ./node_modules/file-uri-to-path +COPY --from=builder /usr/src/app/node_modules/prebuild-install ./node_modules/prebuild-install 2>/dev/null || true + # Create non-root user for security RUN addgroup --system --gid 1001 nodejs && \ adduser --system --uid 1001 nextjs && \ diff --git a/bun.lock b/bun.lock index 6b5d1e2..2045ca7 100644 --- a/bun.lock +++ b/bun.lock @@ -37,6 +37,7 @@ "@tanstack/react-table": "^8.21.3", "@tanstack/react-virtual": "^3.13.13", "@xyflow/react": "^12.10.0", + "better-sqlite3": "^12.6.2", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "cmdk": "^1.1.1", @@ -78,6 +79,7 @@ "@testing-library/jest-dom": "^6.9.1", "@testing-library/react": "^16.3.2", "@testing-library/user-event": "^14.6.1", + "@types/better-sqlite3": "^7.6.13", "@types/bun": "latest", "@types/node": "^20", "@types/pg": "^8.16.0", @@ -480,6 +482,8 @@ "@types/aria-query": ["@types/aria-query@5.0.4", "", {}, "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw=="], + "@types/better-sqlite3": ["@types/better-sqlite3@7.6.13", "", { "dependencies": { "@types/node": "*" } }, "sha512-NMv9ASNARoKksWtsq/SHakpYAYnhBrQgGD8zkLYk/jaK8jUGn08CfEdTRgYhMypUQAfzSP8W6gNLe0q19/t4VA=="], + "@types/bun": ["@types/bun@1.3.5", "", { "dependencies": { "bun-types": "1.3.5" } }, "sha512-RnygCqNrd3srIPEWBd5LFeUYG7plCoH2Yw9WaZGyNmdTEei+gWaHqydbaIRkIkcbXwhBT94q78QljxN0Sk838w=="], "@types/d3-array": ["@types/d3-array@3.2.2", "", {}, "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw=="], @@ -660,6 +664,10 @@ "bcrypt-pbkdf": ["bcrypt-pbkdf@1.0.2", "", { "dependencies": { "tweetnacl": "^0.14.3" } }, "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w=="], + "better-sqlite3": ["better-sqlite3@12.6.2", "", { "dependencies": { "bindings": "^1.5.0", "prebuild-install": "^7.1.1" } }, "sha512-8VYKM3MjCa9WcaSAI3hzwhmyHVlH8tiGFwf0RlTsZPWJ1I5MkzjiudCo4KC4DxOaL/53A5B1sI/IbldNFDbsKA=="], + + "bindings": ["bindings@1.5.0", "", { "dependencies": { "file-uri-to-path": "1.0.0" } }, "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ=="], + "bl": ["bl@6.1.6", "", { "dependencies": { "@types/readable-stream": "^4.0.0", "buffer": "^6.0.3", "inherits": "^2.0.4", "readable-stream": "^4.2.0" } }, "sha512-jLsPgN/YSvPUg9UX0Kd73CXpm2Psg9FxMeCSXnk3WBO3CMT10JMwijubhGfHCnFu6TPn1ei3b975dxv7K2pWVg=="], "brace-expansion": ["brace-expansion@1.1.12", "", { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg=="], @@ -692,6 +700,8 @@ "chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], + "chownr": ["chownr@1.1.4", "", {}, "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="], + "class-variance-authority": ["class-variance-authority@0.7.1", "", { "dependencies": { "clsx": "^2.1.1" } }, "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg=="], "classcat": ["classcat@5.0.5", "", {}, "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w=="], @@ -772,6 +782,10 @@ "decimal.js-light": ["decimal.js-light@2.5.1", "", {}, "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg=="], + "decompress-response": ["decompress-response@6.0.0", "", { "dependencies": { "mimic-response": "^3.1.0" } }, "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ=="], + + "deep-extend": ["deep-extend@0.6.0", "", {}, "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA=="], + "deep-is": ["deep-is@0.1.4", "", {}, "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="], "default-browser": ["default-browser@5.5.0", "", { "dependencies": { "bundle-name": "^4.1.0", "default-browser-id": "^5.0.0" } }, "sha512-H9LMLr5zwIbSxrmvikGuI/5KGhZ8E2zH3stkMgM5LpOWDutGM2JZaj460Udnf1a+946zc7YBgrqEWwbk7zHvGw=="], @@ -818,6 +832,8 @@ "emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + "end-of-stream": ["end-of-stream@1.4.5", "", { "dependencies": { "once": "^1.4.0" } }, "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg=="], + "enhanced-resolve": ["enhanced-resolve@5.18.4", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" } }, "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q=="], "entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="], @@ -880,6 +896,8 @@ "events": ["events@3.3.0", "", {}, "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q=="], + "expand-template": ["expand-template@2.0.3", "", {}, "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg=="], + "fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="], "fast-equals": ["fast-equals@5.4.0", "", {}, "sha512-jt2DW/aNFNwke7AUd+Z+e6pz39KO5rzdbbFCg2sGafS4mk13MI7Z8O5z9cADNn5lhGODIgLwug6TZO2ctf7kcw=="], @@ -896,6 +914,8 @@ "file-entry-cache": ["file-entry-cache@8.0.0", "", { "dependencies": { "flat-cache": "^4.0.0" } }, "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ=="], + "file-uri-to-path": ["file-uri-to-path@1.0.0", "", {}, "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw=="], + "fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="], "find-up": ["find-up@5.0.0", "", { "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" } }, "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng=="], @@ -908,6 +928,8 @@ "framer-motion": ["framer-motion@12.23.26", "", { "dependencies": { "motion-dom": "^12.23.23", "motion-utils": "^12.23.6", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-cPcIhgR42xBn1Uj+PzOyheMtZ73H927+uWPDVhUMqxy8UHt6Okavb6xIz9J/phFUHUj0OncR6UvMfJTXoc/LKA=="], + "fs-constants": ["fs-constants@1.0.0", "", {}, "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow=="], + "fsevents": ["fsevents@2.3.2", "", { "os": "darwin" }, "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA=="], "function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="], @@ -932,6 +954,8 @@ "get-tsconfig": ["get-tsconfig@4.13.0", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ=="], + "github-from-package": ["github-from-package@0.0.0", "", {}, "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw=="], + "glob-parent": ["glob-parent@6.0.2", "", { "dependencies": { "is-glob": "^4.0.3" } }, "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A=="], "globals": ["globals@14.0.0", "", {}, "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ=="], @@ -982,6 +1006,8 @@ "inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="], + "ini": ["ini@1.3.8", "", {}, "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="], + "input-otp": ["input-otp@1.4.2", "", { "peerDependencies": { "react": "^16.8 || ^17.0 || ^18.0 || ^19.0.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0.0 || ^19.0.0-rc" } }, "sha512-l3jWwYNvrEa6NTCt7BECfCm48GvwuZzkoeG3gBL2w4CHeOXW3eKFmf9UNYkNfYc3mxMrthMnxjIE07MT0zLBQA=="], "internal-slot": ["internal-slot@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "hasown": "^2.0.2", "side-channel": "^1.1.0" } }, "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw=="], @@ -1164,12 +1190,16 @@ "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], + "mimic-response": ["mimic-response@3.1.0", "", {}, "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ=="], + "min-indent": ["min-indent@1.0.1", "", {}, "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg=="], "minimatch": ["minimatch@3.1.2", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw=="], "minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], + "mkdirp-classic": ["mkdirp-classic@0.5.3", "", {}, "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A=="], + "monaco-editor": ["monaco-editor@0.55.1", "", { "dependencies": { "dompurify": "3.2.7", "marked": "14.0.0" } }, "sha512-jz4x+TJNFHwHtwuV9vA9rMujcZRb0CEilTEwG2rRSpe/A7Jdkuj8xPKttCgOh+v/lkHy7HsZ64oj+q3xoAFl9A=="], "mongodb": ["mongodb@7.0.0", "", { "dependencies": { "@mongodb-js/saslprep": "^1.3.0", "bson": "^7.0.0", "mongodb-connection-string-url": "^7.0.0" }, "peerDependencies": { "@aws-sdk/credential-providers": "^3.806.0", "@mongodb-js/zstd": "^7.0.0", "gcp-metadata": "^7.0.1", "kerberos": "^7.0.0", "mongodb-client-encryption": ">=7.0.0 <7.1.0", "snappy": "^7.3.2", "socks": "^2.8.6" }, "optionalPeers": ["@aws-sdk/credential-providers", "@mongodb-js/zstd", "gcp-metadata", "kerberos", "mongodb-client-encryption", "snappy", "socks"] }, "sha512-vG/A5cQrvGGvZm2mTnCSz1LUcbOPl83hfB6bxULKQ8oFZauyox/2xbZOoGNl+64m8VBrETkdGCDBdOsCr3F3jg=="], @@ -1194,6 +1224,8 @@ "nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="], + "napi-build-utils": ["napi-build-utils@2.0.0", "", {}, "sha512-GEbrYkbfF7MoNaoh2iGG84Mnf/WZfB0GdGEsM8wz7Expx/LlWf5U8t9nvJKXSp3qr5IsEbK04cBGhol/KwOsWA=="], + "napi-postinstall": ["napi-postinstall@0.3.4", "", { "bin": { "napi-postinstall": "lib/cli.js" } }, "sha512-PHI5f1O0EP5xJ9gQmFGMS6IZcrVvTjpXjz7Na41gTE7eE2hK11lg04CECCYEEjdc17EV4DO+fkGEtt7TpTaTiQ=="], "native-duplexpair": ["native-duplexpair@1.0.0", "", {}, "sha512-E7QQoM+3jvNtlmyfqRZ0/U75VFgCls+fSkbml2MpgWkWyz3ox8Y58gNhfuziuQYGNNQAbFZJQck55LHCnCK6CA=="], @@ -1206,6 +1238,8 @@ "next-themes": ["next-themes@0.4.6", "", { "peerDependencies": { "react": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc", "react-dom": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc" } }, "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA=="], + "node-abi": ["node-abi@3.87.0", "", { "dependencies": { "semver": "^7.3.5" } }, "sha512-+CGM1L1CgmtheLcBuleyYOn7NWPVu0s0EJH2C4puxgEZb9h8QpR9G2dBfZJOAUhi7VQxuBPMd0hiISWcTyiYyQ=="], + "node-releases": ["node-releases@2.0.27", "", {}, "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA=="], "oauth4webapi": ["oauth4webapi@3.8.5", "", {}, "sha512-A8jmyUckVhRJj5lspguklcl90Ydqk61H3dcU0oLhH3Yv13KpAliKTt5hknpGGPZSSfOwGyraNEFmofDYH+1kSg=="], @@ -1226,6 +1260,8 @@ "object.values": ["object.values@1.2.1", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" } }, "sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA=="], + "once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="], + "open": ["open@10.2.0", "", { "dependencies": { "default-browser": "^5.2.1", "define-lazy-prop": "^3.0.0", "is-inside-container": "^1.0.0", "wsl-utils": "^0.1.0" } }, "sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA=="], "openid-client": ["openid-client@6.8.2", "", { "dependencies": { "jose": "^6.1.3", "oauth4webapi": "^3.8.4" } }, "sha512-uOvTCndr4udZsKihJ68H9bUICrriHdUVJ6Az+4Ns6cW55rwM5h0bjVIzDz2SxgOI84LKjFyjOFvERLzdTUROGA=="], @@ -1284,6 +1320,8 @@ "postgres-interval": ["postgres-interval@1.2.0", "", { "dependencies": { "xtend": "^4.0.0" } }, "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ=="], + "prebuild-install": ["prebuild-install@7.1.3", "", { "dependencies": { "detect-libc": "^2.0.0", "expand-template": "^2.0.3", "github-from-package": "0.0.0", "minimist": "^1.2.3", "mkdirp-classic": "^0.5.3", "napi-build-utils": "^2.0.0", "node-abi": "^3.3.0", "pump": "^3.0.0", "rc": "^1.2.7", "simple-get": "^4.0.0", "tar-fs": "^2.0.0", "tunnel-agent": "^0.6.0" }, "bin": { "prebuild-install": "bin.js" } }, "sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug=="], + "prelude-ls": ["prelude-ls@1.2.1", "", {}, "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g=="], "pretty-format": ["pretty-format@27.5.1", "", { "dependencies": { "ansi-regex": "^5.0.1", "ansi-styles": "^5.0.0", "react-is": "^17.0.1" } }, "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ=="], @@ -1292,6 +1330,8 @@ "prop-types": ["prop-types@15.8.1", "", { "dependencies": { "loose-envify": "^1.4.0", "object-assign": "^4.1.1", "react-is": "^16.13.1" } }, "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg=="], + "pump": ["pump@3.0.4", "", { "dependencies": { "end-of-stream": "^1.1.0", "once": "^1.3.1" } }, "sha512-VS7sjc6KR7e1ukRFhQSY5LM2uBWAUPiOPa/A3mkKmiMwSmRFUITt0xuj+/lesgnCv+dPIEYlkzrcyXgquIHMcA=="], + "punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="], "queue-microtask": ["queue-microtask@1.2.3", "", {}, "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="], @@ -1300,6 +1340,8 @@ "randexp": ["randexp@0.4.6", "", { "dependencies": { "discontinuous-range": "1.0.0", "ret": "~0.1.10" } }, "sha512-80WNmd9DA0tmZrw9qQa62GPPWfuXJknrmVmLcxvq4uZBdYqb1wYoKTmnlGUchvVWe0XiLupYkBoXVOxz3C8DYQ=="], + "rc": ["rc@1.2.8", "", { "dependencies": { "deep-extend": "^0.6.0", "ini": "~1.3.0", "minimist": "^1.2.0", "strip-json-comments": "~2.0.1" }, "bin": { "rc": "./cli.js" } }, "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw=="], + "react": ["react@19.2.4", "", {}, "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ=="], "react-day-picker": ["react-day-picker@9.13.0", "", { "dependencies": { "@date-fns/tz": "^1.4.1", "date-fns": "^4.1.0", "date-fns-jalali": "^4.1.0-0" }, "peerDependencies": { "react": ">=16.8.0" } }, "sha512-euzj5Hlq+lOHqI53NiuNhCP8HWgsPf/bBAVijR50hNaY1XwjKjShAnIe8jm8RD2W9IJUvihDIZ+KrmqfFzNhFQ=="], @@ -1388,6 +1430,10 @@ "side-channel-weakmap": ["side-channel-weakmap@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3", "side-channel-map": "^1.0.1" } }, "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A=="], + "simple-concat": ["simple-concat@1.0.1", "", {}, "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q=="], + + "simple-get": ["simple-get@4.0.1", "", { "dependencies": { "decompress-response": "^6.0.0", "once": "^1.3.1", "simple-concat": "^1.0.0" } }, "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA=="], + "sonner": ["sonner@2.0.7", "", { "peerDependencies": { "react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc" } }, "sha512-W6ZN4p58k8aDKA4XPcx2hpIQXBRAgyiWVkYhT7CvK6D3iAu7xjvVyhQHg2/iaKJZ1XVJ4r7XuwGL+WGEK37i9w=="], "source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="], @@ -1444,6 +1490,10 @@ "tapable": ["tapable@2.3.0", "", {}, "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg=="], + "tar-fs": ["tar-fs@2.1.4", "", { "dependencies": { "chownr": "^1.1.1", "mkdirp-classic": "^0.5.2", "pump": "^3.0.0", "tar-stream": "^2.1.4" } }, "sha512-mDAjwmZdh7LTT6pNleZ05Yt65HC3E+NiQzl672vQG38jIrehtJk/J3mNwIg+vShQPcLF/LV7CMnDW6vjj6sfYQ=="], + + "tar-stream": ["tar-stream@2.2.0", "", { "dependencies": { "bl": "^4.0.3", "end-of-stream": "^1.4.1", "fs-constants": "^1.0.0", "inherits": "^2.0.3", "readable-stream": "^3.1.1" } }, "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ=="], + "tarn": ["tarn@3.0.2", "", {}, "sha512-51LAVKUSZSVfI05vjPESNc5vwqqZpbXCsU+/+wxlOrUjk2SnFTt97v9ZgQrD4YmxYW1Px6w2KjaDitCfkvgxMQ=="], "tedious": ["tedious@19.2.1", "", { "dependencies": { "@azure/core-auth": "^1.7.2", "@azure/identity": "^4.2.1", "@azure/keyvault-keys": "^4.4.0", "@js-joda/core": "^5.6.5", "@types/node": ">=18", "bl": "^6.1.4", "iconv-lite": "^0.7.0", "js-md4": "^0.3.2", "native-duplexpair": "^1.0.0", "sprintf-js": "^1.1.3" } }, "sha512-pk1Q16Yl62iocuQB+RWbg6rFUFkIyzqOFQ6NfysCltRvQqKwfurgj8v/f2X+CKvDhSL4IJ0cCOfCHDg9PWEEYA=="], @@ -1464,6 +1514,8 @@ "tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], + "tunnel-agent": ["tunnel-agent@0.6.0", "", { "dependencies": { "safe-buffer": "^5.0.1" } }, "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w=="], + "tw-animate-css": ["tw-animate-css@1.4.0", "", {}, "sha512-7bziOlRqH0hJx80h/3mbicLW7o8qLsH5+RaLR2t+OHM3D0JlWGODQKQ4cxbK7WlvmUxpcj6Kgu6EKqjrGFe3QQ=="], "tweetnacl": ["tweetnacl@0.14.5", "", {}, "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA=="], @@ -1498,6 +1550,8 @@ "use-sync-external-store": ["use-sync-external-store@1.6.0", "", { "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w=="], + "util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="], + "utrie": ["utrie@1.0.2", "", { "dependencies": { "base64-arraybuffer": "^1.0.2" } }, "sha512-1MLa5ouZiOmQzUbjbu9VmjLzn1QLXBhwpUa7kdLUQK+KQ5KA9I1vk5U4YHe/X2Ch7PYnJfWuWT+VbuxbGwljhw=="], "uuid": ["uuid@8.3.2", "", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg=="], @@ -1524,6 +1578,8 @@ "word-wrap": ["word-wrap@1.2.5", "", {}, "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA=="], + "wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="], + "ws": ["ws@8.19.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg=="], "wsl-utils": ["wsl-utils@0.1.0", "", { "dependencies": { "is-wsl": "^3.1.0" } }, "sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw=="], @@ -1626,20 +1682,30 @@ "next/postcss": ["postcss@8.4.31", "", { "dependencies": { "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" } }, "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ=="], + "node-abi/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], + "pretty-format/ansi-styles": ["ansi-styles@5.2.0", "", {}, "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA=="], "pretty-format/react-is": ["react-is@17.0.2", "", {}, "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w=="], "prop-types/react-is": ["react-is@16.13.1", "", {}, "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="], + "rc/strip-json-comments": ["strip-json-comments@2.0.1", "", {}, "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ=="], + "sharp/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], + "tar-stream/bl": ["bl@4.1.0", "", { "dependencies": { "buffer": "^5.5.0", "inherits": "^2.0.4", "readable-stream": "^3.4.0" } }, "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w=="], + + "tar-stream/readable-stream": ["readable-stream@3.6.2", "", { "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } }, "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA=="], + "@types/ssh2/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="], "@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@5.0.4", "", { "dependencies": { "balanced-match": "^4.0.2" } }, "sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg=="], "@typescript-eslint/utils/@eslint-community/eslint-utils/eslint-visitor-keys": ["eslint-visitor-keys@3.4.3", "", {}, "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag=="], + "tar-stream/bl/buffer": ["buffer@5.7.1", "", { "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.1.13" } }, "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ=="], + "@typescript-eslint/typescript-estree/minimatch/brace-expansion/balanced-match": ["balanced-match@4.0.4", "", {}, "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA=="], } } diff --git a/docker-compose.yml b/docker-compose.yml index cdbf501..449369e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -10,4 +10,12 @@ services: - LLM_API_KEY=${LLM_API_KEY} - LLM_MODEL=${LLM_MODEL:-gemini-2.5-flash} - LLM_API_URL=${LLM_API_URL} + - STORAGE_PROVIDER=${STORAGE_PROVIDER:-local} + - STORAGE_SQLITE_PATH=${STORAGE_SQLITE_PATH:-/app/data/libredb-storage.db} + - STORAGE_POSTGRES_URL=${STORAGE_POSTGRES_URL} + volumes: + - storage-data:/app/data restart: always + +volumes: + storage-data: diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index e379016..cf8205e 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -150,9 +150,20 @@ Controlled by `NEXT_PUBLIC_AUTH_PROVIDER` (`local` | `oidc`). Both flows result Multi-statement queries execute sequentially via `POST /api/db/multi-query`. -### 4.4. Client State Management +### 4.4. Storage Abstraction Layer -- **LocalStorage** for persistent data: connections, query history, saved queries, schema snapshots, chart configs, masking config +- **Write-through cache architecture**: localStorage (L1 cache) + optional server storage (L2 persistent) +- **Three storage modes** controlled by `STORAGE_PROVIDER` env var: + - `local` (default): Browser localStorage only, zero configuration + - `sqlite`: Server-side SQLite file via `better-sqlite3` + - `postgres`: Server-side PostgreSQL via `pg` +- **`useStorageSync` hook** in Studio.tsx: discovers mode at runtime via `/api/storage/config`, pulls on mount, pushes mutations (debounced 500ms) +- **Migration**: First login auto-migrates localStorage to server; `libredb_server_migrated` flag prevents re-migration +- **Graceful degradation**: If server unreachable, localStorage continues working + +### 4.5. Client State Management + +- **Storage module** (`src/lib/storage/`) for persistent data: connections, query history, saved queries, schema snapshots, chart configs, audit log, masking config, threshold config - **React hooks** for UI state: tabs, active connection, execution status - **Custom hooks** extracted from Studio.tsx: `useAuth`, `useConnectionManager`, `useTabManager`, `useTransactionControl`, `useQueryExecution`, `useInlineEditing` @@ -165,6 +176,7 @@ src/ │ │ ├── auth/ # Login/logout/me + OIDC (PKCE, callback) │ │ ├── ai/ # Chat, NL2SQL, explain, safety │ │ ├── db/ # Query, schema, health, maintenance, transactions +│ │ ├── storage/ # Storage sync API (config, CRUD, migrate) │ │ └── admin/ # Fleet health, audit │ ├── admin/ # Admin dashboard (RBAC protected) │ └── login/ # Login page @@ -192,7 +204,12 @@ src/ ├── ssh/ # SSH tunnel support ├── auth.ts # JWT utilities ├── oidc.ts # OIDC utilities - └── storage.ts # LocalStorage management + └── storage/ # Storage abstraction layer + ├── index.ts # Barrel export + ├── storage-facade.ts # Public sync API + CustomEvent dispatch + ├── local-storage.ts # Pure localStorage CRUD + ├── factory.ts # Env-based provider factory + └── providers/ # SQLite + PostgreSQL backends ``` ## 6. Deployment diff --git a/docs/STORAGE_ARCHITECTURE.md b/docs/STORAGE_ARCHITECTURE.md new file mode 100644 index 0000000..6ed961d --- /dev/null +++ b/docs/STORAGE_ARCHITECTURE.md @@ -0,0 +1,564 @@ +# Storage Architecture — LibreDB Studio + +This document describes the **Storage Abstraction Layer**, a pluggable persistence system that allows LibreDB Studio to operate in two modes: + +- **Local mode** (default): Zero-config, all data lives in the browser's `localStorage`. Ideal for single-user / open-source usage. +- **Server mode**: Data is persisted to a server-side database (SQLite or PostgreSQL) with per-user scoping. Ideal for teams and enterprise deployments. + +Switching between modes requires **only one environment variable** — no code changes, no rebuild. + +--- + +## Table of Contents + +1. [Design Goals](#1-design-goals) +2. [Architecture Overview](#2-architecture-overview) +3. [Data Model](#3-data-model) +4. [Module Structure](#4-module-structure) +5. [Local Storage Layer](#5-local-storage-layer) +6. [Storage Facade](#6-storage-facade) +7. [Server Storage Providers](#7-server-storage-providers) +8. [API Routes](#8-api-routes) +9. [Write-Through Cache & Sync Hook](#9-write-through-cache--sync-hook) +10. [Migration Flow](#10-migration-flow) +11. [Configuration](#11-configuration) +12. [User Scoping & Security](#12-user-scoping--security) +13. [Docker Deployment](#13-docker-deployment) +14. [Adding a New Provider](#14-adding-a-new-provider) + +--- + +## 1. Design Goals + +| Goal | Approach | +|------|----------| +| **Zero breaking changes** | All 16+ consumer components keep the same synchronous `storage.*` API | +| **Zero-config default** | `localStorage` works out of the box — no database, no env vars needed | +| **Single image, all modes** | Runtime config via env var, not build-time `NEXT_PUBLIC_*` | +| **Per-user isolation** | Server storage scoped by JWT `username` — no cross-user leaks | +| **Graceful degradation** | If server is unreachable, `localStorage` continues to work | +| **Extensible** | Adding a new backend (e.g., MySQL, DynamoDB) requires one file implementing `ServerStorageProvider` | + +--- + +## 2. Architecture Overview + +``` +┌──────────────────────────────┐ +│ 16+ Consumer Components │ ← Unchanged, same sync API +│ storage.getConnections() │ +│ storage.saveConnection() │ +└──────────────┬───────────────┘ + │ sync read/write +┌──────────────▼───────────────┐ +│ Storage Facade │ ← localStorage read/write + CustomEvent dispatch +│ src/lib/storage/ │ +│ storage-facade.ts │ +└──────────────┬───────────────┘ + │ CustomEvent: 'libredb-storage-change' +┌──────────────▼───────────────┐ +│ useStorageSync Hook │ ← Mounted in Studio.tsx (server mode only) +│ src/hooks/ │ +│ use-storage-sync.ts │ +└──────────────┬───────────────┘ + │ fetch (debounced 500ms) +┌──────────────▼───────────────┐ +│ API Routes │ ← JWT auth + user scoping +│ /api/storage/* │ +└──────────────┬───────────────┘ + │ +┌──────────────▼───────────────┐ +│ ServerStorageProvider │ ← Strategy Pattern +│ ┌─────────┐ ┌────────────┐ │ +│ │ SQLite │ │ PostgreSQL │ │ +│ └─────────┘ └────────────┘ │ +└──────────────────────────────┘ +``` + +**Key insight:** `localStorage` is always the **rendering source** (L1 cache). The server database is the **persistent source of truth** (L2). The sync hook keeps them in sync via a write-through cache pattern. + +--- + +## 3. Data Model + +### 3.1 Collections + +All application state is organized into **9 collections**, each stored as a JSON blob: + +| Collection | Type | Description | Max Items | +|-----------|------|-------------|-----------| +| `connections` | `DatabaseConnection[]` | Saved database connections | — | +| `history` | `QueryHistoryItem[]` | Query execution history | 500 | +| `saved_queries` | `SavedQuery[]` | User-saved SQL/JSON queries | — | +| `schema_snapshots` | `SchemaSnapshot[]` | Schema diff snapshots | 50 | +| `saved_charts` | `SavedChartConfig[]` | Saved chart configurations | — | +| `active_connection_id` | `string \| null` | Currently active connection | — | +| `audit_log` | `AuditEvent[]` | Audit trail events | 1000 | +| `masking_config` | `MaskingConfig` | Data masking rules and RBAC | — | +| `threshold_config` | `ThresholdConfig[]` | Monitoring alert thresholds | — | + +### 3.2 Server Database Schema + +Both SQLite and PostgreSQL use the same logical schema — a single table with collection-based JSON blobs: + +```sql +CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, -- JWT username (email) + collection TEXT NOT NULL, -- 'connections', 'history', etc. + data TEXT NOT NULL, -- JSON serialized + updated_at TEXT/TIMESTAMPTZ NOT NULL, -- Last modification time + PRIMARY KEY (user_id, collection) +); +``` + +This design is intentionally simple: +- **No schema migrations** needed when adding new collections +- **One row per user per collection** — efficient upsert +- **JSON blobs** keep the server storage schema-agnostic + +### 3.3 localStorage Keys + +Each collection maps to a `libredb_`-prefixed localStorage key: + +``` +connections → libredb_connections +history → libredb_history +saved_queries → libredb_saved_queries +schema_snapshots → libredb_schema_snapshots +saved_charts → libredb_saved_charts +active_connection_id → libredb_active_connection_id +audit_log → libredb_audit_log +masking_config → libredb_masking_config +threshold_config → libredb_threshold_config +``` + +--- + +## 4. Module Structure + +``` +src/lib/storage/ +├── index.ts # Barrel export — preserves @/lib/storage import path +├── types.ts # StorageData, StorageCollection, ServerStorageProvider +├── local-storage.ts # Pure localStorage CRUD (SSR-safe) +├── storage-facade.ts # Public storage object with domain methods +├── factory.ts # Env-based provider instantiation (singleton) +└── providers/ + ├── sqlite.ts # better-sqlite3 implementation + └── postgres.ts # pg (Pool) implementation + +src/hooks/ +└── use-storage-sync.ts # Write-through cache hook + +src/app/api/storage/ +├── config/route.ts # GET: storage mode discovery (public) +├── route.ts # GET: fetch all user data (auth required) +├── [collection]/route.ts # PUT: update single collection (auth required) +└── migrate/route.ts # POST: localStorage → server migration (auth required) +``` + +--- + +## 5. Local Storage Layer + +**File:** `src/lib/storage/local-storage.ts` + +Pure, side-effect-free localStorage CRUD with SSR safety: + +```typescript +// All operations check isClient() before accessing localStorage +export function readJSON(collection: string): T | null; +export function writeJSON(collection: string, data: unknown): void; +export function readString(collection: string): string | null; +export function writeString(collection: string, value: string): void; +export function remove(collection: string): void; +export function getKey(collection: string): string; // → 'libredb_' + collection +``` + +- Every function is guarded by `isClient()` — safe to call during SSR (returns `null` / no-op) +- JSON parse failures return `null` instead of throwing + +--- + +## 6. Storage Facade + +**File:** `src/lib/storage/storage-facade.ts` + +The public `storage` object provides the same **synchronous API** that all 16+ consumer components use. Every mutation method: + +1. Writes to `localStorage` (immediate) +2. Dispatches a `CustomEvent('libredb-storage-change')` with the collection name and data + +```typescript +// Example: saving a connection +storage.saveConnection(conn); +// 1. Reads existing connections from localStorage +// 2. Upserts by ID +// 3. Writes back to localStorage +// 4. Dispatches CustomEvent({ collection: 'connections', data: updatedList }) +``` + +### Public API + +| Category | Methods | +|----------|---------| +| **Connections** | `getConnections()`, `saveConnection(conn)`, `deleteConnection(id)` | +| **History** | `getHistory()`, `addToHistory(item)`, `clearHistory()` | +| **Saved Queries** | `getSavedQueries()`, `saveQuery(query)`, `deleteSavedQuery(id)` | +| **Schema Snapshots** | `getSchemaSnapshots(connId?)`, `saveSchemaSnapshot(snap)`, `deleteSchemaSnapshot(id)` | +| **Charts** | `getSavedCharts()`, `saveChart(chart)`, `deleteChart(id)` | +| **Active Connection** | `getActiveConnectionId()`, `setActiveConnectionId(id)` | +| **Audit Log** | `getAuditLog()`, `saveAuditLog(events)` | +| **Masking Config** | `getMaskingConfig()`, `saveMaskingConfig(config)` | +| **Threshold Config** | `getThresholdConfig()`, `saveThresholdConfig(thresholds)` | + +All read methods are **synchronous** — they read from `localStorage` only. No network calls. + +--- + +## 7. Server Storage Providers + +### 7.1 Provider Interface + +**File:** `src/lib/storage/types.ts` + +```typescript +interface ServerStorageProvider { + initialize(): Promise; + getAllData(userId: string): Promise>; + getCollection( + userId: string, collection: K + ): Promise; + setCollection( + userId: string, collection: K, data: StorageData[K] + ): Promise; + mergeData(userId: string, data: Partial): Promise; + isHealthy(): Promise; + close(): Promise; +} +``` + +### 7.2 SQLite Provider + +**File:** `src/lib/storage/providers/sqlite.ts` +**Package:** `better-sqlite3` (Node.js compatible, not `bun:sqlite`) + +| Feature | Detail | +|---------|--------| +| **WAL mode** | Enabled for concurrent read performance | +| **Auto-create** | Directory and database file created on `initialize()` | +| **Upsert** | `INSERT OR REPLACE INTO user_storage` | +| **Transactions** | `mergeData()` wraps all inserts in a single transaction | +| **Health check** | `SELECT 1 AS ok` | + +```env +STORAGE_PROVIDER=sqlite +STORAGE_SQLITE_PATH=./data/libredb-storage.db # default +``` + +### 7.3 PostgreSQL Provider + +**File:** `src/lib/storage/providers/postgres.ts` +**Package:** `pg` (connection pool) + +| Feature | Detail | +|---------|--------| +| **Pool config** | max: 5, idleTimeoutMillis: 30000 | +| **Upsert** | `INSERT ... ON CONFLICT (user_id, collection) DO UPDATE` | +| **Transactions** | `mergeData()` uses `BEGIN`/`COMMIT`/`ROLLBACK` with client checkout | +| **Health check** | `SELECT 1 AS ok` | + +```env +STORAGE_PROVIDER=postgres +STORAGE_POSTGRES_URL=postgresql://user:pass@host:5432/libredb +``` + +### 7.4 Factory + +**File:** `src/lib/storage/factory.ts` + +The factory uses the **Singleton pattern** — one provider instance per process, lazy-initialized on first access: + +```typescript +getStorageProviderType() // → 'local' | 'sqlite' | 'postgres' +isServerStorageEnabled() // → true if not 'local' +getStorageConfig() // → { provider, serverMode } +getStorageProvider() // → ServerStorageProvider | null (singleton) +closeStorageProvider() // → cleanup for testing +``` + +Provider classes are **dynamically imported** — SQLite and PostgreSQL dependencies are only loaded when their provider is selected. + +--- + +## 8. API Routes + +All routes (except `/config`) require JWT authentication. The authenticated user's `username` (email) is used as the `user_id` for storage scoping. + +| Endpoint | Method | Auth | Purpose | +|----------|--------|------|---------| +| `/api/storage/config` | GET | Public | Runtime storage mode discovery | +| `/api/storage` | GET | JWT | Fetch all collections for the authenticated user | +| `/api/storage/[collection]` | PUT | JWT | Update a single collection | +| `/api/storage/migrate` | POST | JWT | Merge localStorage dump into server storage | + +### Response Examples + +**GET /api/storage/config** +```json +{ "provider": "sqlite", "serverMode": true } +``` + +**GET /api/storage** +```json +{ + "connections": [{ "id": "c1", "name": "Prod DB", ... }], + "history": [{ "id": "h1", "query": "SELECT ...", ... }], + ... +} +``` + +**PUT /api/storage/connections** +```json +// Request: { "data": [{ "id": "c1", "name": "Prod DB", ... }] } +// Response: { "ok": true } +``` + +**POST /api/storage/migrate** +```json +// Request: { "connections": [...], "history": [...], ... } +// Response: { "ok": true, "migrated": ["connections", "history"] } +``` + +When `STORAGE_PROVIDER=local`, all data routes return `404 Not Found` (config route always works). + +--- + +## 9. Write-Through Cache & Sync Hook + +**File:** `src/hooks/use-storage-sync.ts` + +The hook is mounted in `Studio.tsx` after `useAuth()` and orchestrates all client-server synchronization. + +### Sync States + +```typescript +interface StorageSyncState { + isServerMode: boolean; // Server storage active? + isSyncing: boolean; // Currently transferring data? + lastSyncedAt: Date | null; // Last successful sync timestamp + syncError: string | null; // Last error message (null = healthy) +} +``` + +### Lifecycle + +``` +App Mount + │ + ├─ GET /api/storage/config + │ ├─ serverMode: false → done (localStorage only) + │ └─ serverMode: true ──┐ + │ │ + │ ┌──────────────────────▼──────────────────────┐ + │ │ Check libredb_server_migrated flag │ + │ │ ├─ Not migrated → POST /api/storage/migrate│ + │ │ │ (send all localStorage → server merge) │ + │ │ │ Set flag in localStorage │ + │ │ └─ Already migrated → skip │ + │ └──────────────────────┬──────────────────────┘ + │ │ + │ ┌──────────────────────▼──────────────────────┐ + │ │ Pull: GET /api/storage │ + │ │ → Write server data into localStorage │ + │ │ → Components re-render from localStorage │ + │ └──────────────────────┬──────────────────────┘ + │ │ + │ ┌──────────────────────▼──────────────────────┐ + │ │ Listen: 'libredb-storage-change' events │ + │ │ → Collect pending collections │ + │ │ → Debounce 500ms │ + │ │ → PUT /api/storage/[collection] for each │ + │ └─────────────────────────────────────────────┘ + │ + ▼ (ongoing) +``` + +### Push Behavior (Debounced) + +When any `storage.*` mutation fires: + +1. Facade writes to `localStorage` (immediate, synchronous) +2. Facade dispatches `CustomEvent('libredb-storage-change', { collection, data })` +3. Hook captures event, adds collection to pending set +4. After 500ms of no new mutations, hook flushes: + - Reads each pending collection from `localStorage` + - Sends `PUT /api/storage/[collection]` for each + +### Graceful Degradation + +- If `/api/storage/config` fails → stays in localStorage-only mode +- If push fails → logs warning, sets `syncError`, does **not** block the UI +- Components always read from `localStorage` — no loading states for storage + +--- + +## 10. Migration Flow + +When a user first enables server mode (or a new user logs in for the first time): + +``` +1. Hook detects serverMode = true +2. Checks localStorage('libredb_server_migrated') flag +3. If not migrated: + a. Reads all 9 collections from localStorage + b. POST /api/storage/migrate with full payload + c. Server calls provider.mergeData() — ID-based deduplication + d. Sets 'libredb_server_migrated' flag in localStorage +4. Pull: GET /api/storage → overwrite localStorage with server data +5. Subsequent mutations sync normally via push +``` + +This ensures existing localStorage data is preserved when transitioning to server mode. + +--- + +## 11. Configuration + +### Environment Variables + +| Variable | Default | Required | Description | +|----------|---------|----------|-------------| +| `STORAGE_PROVIDER` | `local` | No | Storage backend: `local`, `sqlite`, or `postgres` | +| `STORAGE_SQLITE_PATH` | `./data/libredb-storage.db` | No | Path to SQLite database file | +| `STORAGE_POSTGRES_URL` | — | If `postgres` | PostgreSQL connection string | + +### Why Not `NEXT_PUBLIC_*`? + +Next.js `NEXT_PUBLIC_*` variables are **inlined at build time** as static strings. This means: +- Every storage mode would require a separate Docker build +- Cannot change storage mode without rebuilding + +Instead, the client discovers the storage mode at **runtime** via `GET /api/storage/config`. One Docker image supports all modes. + +--- + +## 12. User Scoping & Security + +### Per-User Isolation + +Every row in `user_storage` is scoped by `user_id`: + +``` +(admin@libredb.org, connections) → [{"id":"c1", "name":"Prod DB"...}] +(admin@libredb.org, history) → [{"id":"h1", "query":"SELECT..."...}] +(user@libredb.org, connections) → [{"id":"c2", "name":"Dev DB"...}] +``` + +- `user_id` = JWT session `username` (email address) +- **Client never sends `user_id`** — server always extracts from JWT cookie +- Every query includes `WHERE user_id = $username` — no cross-user access possible + +### Authentication + +- `/api/storage/config` is **public** — returns only `{ provider, serverMode }`, no sensitive data +- All other `/api/storage/*` routes require a valid JWT session via `getSession()` +- Unauthorized requests receive `401 Unauthorized` + +### OIDC Users + +OIDC users (Auth0, Keycloak, Okta, Azure AD) have their `preferred_username` or email claim mapped to the same `username` field used as `user_id`. + +--- + +## 13. Docker Deployment + +### SQLite Mode + +```yaml +# docker-compose.yml +services: + libredb-studio: + environment: + STORAGE_PROVIDER: sqlite + STORAGE_SQLITE_PATH: /app/data/libredb-storage.db + volumes: + - storage-data:/app/data + +volumes: + storage-data: +``` + +The Dockerfile includes `better-sqlite3` native bindings and creates the `/app/data` directory. + +### PostgreSQL Mode + +```yaml +services: + libredb-studio: + environment: + STORAGE_PROVIDER: postgres + STORAGE_POSTGRES_URL: postgresql://user:pass@db:5432/libredb + depends_on: + - db + db: + image: postgres:16-alpine + environment: + POSTGRES_DB: libredb + POSTGRES_USER: user + POSTGRES_PASSWORD: pass +``` + +No volume needed on the app container — data lives in PostgreSQL. + +--- + +## 14. Adding a New Provider + +To add a new storage backend (e.g., MySQL, DynamoDB): + +### Step 1: Implement the Interface + +Create `src/lib/storage/providers/your-provider.ts`: + +```typescript +import type { ServerStorageProvider, StorageData, StorageCollection } from '../types'; + +export class YourStorageProvider implements ServerStorageProvider { + async initialize(): Promise { /* create table */ } + async getAllData(userId: string): Promise> { /* ... */ } + async getCollection( + userId: string, collection: K + ): Promise { /* ... */ } + async setCollection( + userId: string, collection: K, data: StorageData[K] + ): Promise { /* upsert */ } + async mergeData( + userId: string, data: Partial + ): Promise { /* batch upsert in transaction */ } + async isHealthy(): Promise { /* SELECT 1 */ } + async close(): Promise { /* cleanup */ } +} +``` + +### Step 2: Register in Factory + +Update `src/lib/storage/factory.ts`: + +```typescript +// Add to StorageProviderType +type StorageProviderType = 'local' | 'sqlite' | 'postgres' | 'your-provider'; + +// Add dynamic import in getStorageProvider() +case 'your-provider': { + const { YourStorageProvider } = await import('./providers/your-provider'); + instance = new YourStorageProvider(process.env.STORAGE_YOUR_URL!); + break; +} +``` + +### Step 3: Add Tests + +Create `tests/unit/lib/storage/providers/your-provider.test.ts` with mocked driver. + +That's it — no changes needed to the facade, API routes, sync hook, or any consumer components. diff --git a/docs/STORAGE_QUICK_SETUP.md b/docs/STORAGE_QUICK_SETUP.md new file mode 100644 index 0000000..32edb91 --- /dev/null +++ b/docs/STORAGE_QUICK_SETUP.md @@ -0,0 +1,403 @@ +# Storage Quick Setup Guide + +LibreDB Studio supports three storage modes. Pick the one that fits your use case and follow the steps below. + +> For a deep dive into the architecture, see [STORAGE_ARCHITECTURE.md](./STORAGE_ARCHITECTURE.md). + +--- + +## Which Mode Should I Use? + +| Mode | Best For | Persistence | Multi-User | Setup | +|------|----------|-------------|------------|-------| +| **Local** (default) | Solo dev, quick start | Browser only | No | Zero config | +| **SQLite** | Small teams, single server | Server file | Yes | 1 env var | +| **PostgreSQL** | Enterprise, multi-node | External DB | Yes | 2 env vars | + +--- + +## 1. Local Mode (Default) + +No configuration needed. All data stays in the browser's `localStorage`. + +```bash +# Just start the app — that's it +bun dev +``` + +**What you get:** +- Instant start, no database required +- Data persists across page reloads +- Data is lost if browser storage is cleared or you switch browsers/devices + +**When to move on:** When you need data to survive across devices, browsers, or team members. + +--- + +## 2. SQLite Mode + +A single file on the server. Great for self-hosted single-node deployments. + +### Minimal Setup (Just One Env Var) + +```bash +# .env.local +STORAGE_PROVIDER=sqlite +``` + +```bash +bun dev +``` + +That's it. When `STORAGE_SQLITE_PATH` is not provided, the default path is `./data/libredb-storage.db`. + +### What Happens Automatically + +On the first API request, the SQLite provider: + +1. **Creates the directory** — `./data/` (or whatever parent directory the path points to) is created recursively if it doesn't exist +2. **Creates the database file** — `libredb-storage.db` is created by `better-sqlite3` +3. **Enables WAL mode** — Write-Ahead Logging for better concurrent read performance +4. **Creates the table** — `user_storage` table with the schema below + +No manual setup, no migrations, no SQL scripts needed. + +### Custom Path + +If you want the database file in a different location: + +```bash +# .env.local +STORAGE_PROVIDER=sqlite +STORAGE_SQLITE_PATH=/var/lib/libredb/storage.db +``` + +The directory must be writable by the app process. The directory and file are created automatically. + +### Docker + +```yaml +# docker-compose.yml +services: + app: + image: ghcr.io/libredb/libredb-studio:latest + ports: + - "3000:3000" + environment: + - STORAGE_PROVIDER=sqlite + - STORAGE_SQLITE_PATH=/app/data/libredb-storage.db + volumes: + - storage-data:/app/data + +volumes: + storage-data: +``` + +```bash +docker-compose up -d +``` + +> **Volume is essential.** Without it, data is lost when the container restarts. + +### Verify + +```bash +curl http://localhost:3000/api/storage/config +# → {"provider":"sqlite","serverMode":true} +``` + +### Manual Table Creation (Optional) + +The table is auto-created, but if you prefer to create it yourself (e.g., for auditing or version control): + +```sql +CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TEXT NOT NULL DEFAULT (datetime('now')), + PRIMARY KEY (user_id, collection) +); + +-- Recommended: enable WAL mode for concurrent read performance +PRAGMA journal_mode = WAL; +``` + +--- + +## 3. PostgreSQL Mode + +Recommended for production, teams, and high-availability deployments. + +> **Important:** Unlike SQLite, `STORAGE_POSTGRES_URL` is **required**. There is no default value. If you set `STORAGE_PROVIDER=postgres` without providing a connection string, the app will throw an error on the first storage request: +> ``` +> Error: STORAGE_POSTGRES_URL is required when STORAGE_PROVIDER=postgres +> ``` + +### Local Development + +```bash +# Start a PostgreSQL instance (if you don't have one) +docker run -d --name libredb-pg \ + -e POSTGRES_DB=libredb \ + -e POSTGRES_USER=libredb \ + -e POSTGRES_PASSWORD=secret \ + -p 5432:5432 \ + postgres:16-alpine +``` + +```bash +# .env.local +STORAGE_PROVIDER=postgres +STORAGE_POSTGRES_URL=postgresql://libredb:secret@localhost:5432/libredb +``` + +```bash +bun dev +``` + +### What Happens Automatically + +On the first API request, the PostgreSQL provider: + +1. **Creates a connection pool** — max 5 connections, 30s idle timeout +2. **Creates the table** — `user_storage` table with the schema below via `CREATE TABLE IF NOT EXISTS` + +The database itself must already exist. The **table** is auto-created, but the **database** is not. + +### Required Privileges + +The PostgreSQL user specified in `STORAGE_POSTGRES_URL` needs: + +| Privilege | Why | +|-----------|-----| +| `CREATE TABLE` | Auto-create `user_storage` on first request (only needed once) | +| `INSERT` | Save user data | +| `UPDATE` | Update existing data | +| `SELECT` | Read user data | + +If your DBA restricts `CREATE TABLE`, you can create the table manually (see below) and the user only needs `INSERT`/`UPDATE`/`SELECT`. + +### Docker Compose (App + PostgreSQL) + +```yaml +# docker-compose.yml +services: + app: + image: ghcr.io/libredb/libredb-studio:latest + ports: + - "3000:3000" + environment: + - STORAGE_PROVIDER=postgres + - STORAGE_POSTGRES_URL=postgresql://libredb:secret@db:5432/libredb + depends_on: + db: + condition: service_healthy + + db: + image: postgres:16-alpine + environment: + - POSTGRES_DB=libredb + - POSTGRES_USER=libredb + - POSTGRES_PASSWORD=secret + volumes: + - pgdata:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U libredb"] + interval: 5s + timeout: 3s + retries: 5 + +volumes: + pgdata: +``` + +```bash +docker-compose up -d +``` + +### Using an Existing PostgreSQL + +Just set the connection string — the table is auto-created: + +```bash +STORAGE_PROVIDER=postgres +STORAGE_POSTGRES_URL=postgresql://user:pass@your-pg-host:5432/your_db +``` + +### Verify + +```bash +curl http://localhost:3000/api/storage/config +# → {"provider":"postgres","serverMode":true} +``` + +### Manual Table Creation (Optional) + +The table is auto-created on first request. However, if you prefer to create it yourself — for example, in environments where the app user doesn't have `CREATE TABLE` privileges, or you want to track schema changes in version control: + +```sql +-- PostgreSQL +CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (user_id, collection) +); + +-- Optional: index for faster lookups by user +CREATE INDEX IF NOT EXISTS idx_user_storage_user_id ON user_storage (user_id); +``` + +#### Minimal Privileges (When Table Already Exists) + +If a DBA creates the table, the app user only needs: + +```sql +-- Grant only data access (no DDL needed) +GRANT SELECT, INSERT, UPDATE ON user_storage TO libredb_app; +``` + +--- + +## Migration: Local to Server + +When you switch from local mode to SQLite or PostgreSQL, **existing browser data is automatically migrated** on first login: + +1. User opens the app in server mode +2. The sync hook detects it's the first time (no `libredb_server_migrated` flag) +3. All localStorage data is sent to the server via `POST /api/storage/migrate` +4. Server merges the data (ID-based deduplication — no duplicates) +5. A flag is set in localStorage to prevent re-migration +6. From this point on, the server is the source of truth + +**No manual steps required.** Just change the env var and restart. + +> If multiple users were sharing a browser in local mode, only the data from the user who migrates first will be sent. Each user's server storage is isolated by their login email. + +--- + +## Environment Variables Reference + +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `STORAGE_PROVIDER` | No | `local` | `local`, `sqlite`, or `postgres` | +| `STORAGE_SQLITE_PATH` | No | `./data/libredb-storage.db` | Path to SQLite file. Directory and file are auto-created. | +| `STORAGE_POSTGRES_URL` | **Yes** (postgres mode) | — | PostgreSQL connection string. **No default — app will error without it.** | + +> These are **server-side only** variables (no `NEXT_PUBLIC_` prefix). The client discovers the mode at runtime via `GET /api/storage/config`. This means one Docker image works for all modes. + +### Default Behavior Summary + +| Mode | Config needed | What's auto-created | +|------|--------------|---------------------| +| `local` | Nothing | N/A (browser localStorage) | +| `sqlite` | Just `STORAGE_PROVIDER=sqlite` | Directory + DB file + WAL mode + table | +| `postgres` | `STORAGE_PROVIDER=postgres` + `STORAGE_POSTGRES_URL` | Table only (database must exist) | + +--- + +## Health Check + +Check if the storage backend is reachable: + +```bash +# Storage mode info (always works, no auth needed) +curl http://localhost:3000/api/storage/config + +# Full data fetch (requires auth cookie) +curl -b cookies.txt http://localhost:3000/api/storage +``` + +--- + +## Troubleshooting + +### "Data not syncing to server" + +1. Check storage mode: `curl http://localhost:3000/api/storage/config` +2. Make sure the response shows `"serverMode": true` +3. Check browser console for sync errors (look for `[StorageSync]` prefixed logs) + +### SQLite: "SQLITE_CANTOPEN" + +- The directory in `STORAGE_SQLITE_PATH` must be writable by the app process +- In Docker, make sure the volume is mounted correctly + +### PostgreSQL: "STORAGE_POSTGRES_URL is required" + +- You set `STORAGE_PROVIDER=postgres` but didn't provide `STORAGE_POSTGRES_URL` +- Unlike SQLite, PostgreSQL has **no default** — a connection string is always required +- Fix: add `STORAGE_POSTGRES_URL=postgresql://user:pass@host:5432/dbname` to your env + +### PostgreSQL: "Connection refused" + +- Verify `STORAGE_POSTGRES_URL` is correct and the database is reachable +- In Docker Compose, use the service name (`db`) as the host, not `localhost` +- Check that the PostgreSQL container is healthy: `docker-compose ps` + +### "Data disappeared after switching modes" + +- Switching from server mode **back** to local mode doesn't pull data from the server +- Local mode only reads from localStorage +- To recover: switch back to server mode, the data is still in the database + +### "Duplicate data after migration" + +- Migration uses ID-based deduplication — this shouldn't happen +- If it does, check if the same user logged in from multiple browsers before migration completed + +--- + +## Database Schema Reference + +Both SQLite and PostgreSQL use the same single-table design. The table is auto-created on first request, but the full DDL is provided here for reference. + +### SQLite + +```sql +CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TEXT NOT NULL DEFAULT (datetime('now')), + PRIMARY KEY (user_id, collection) +); + +PRAGMA journal_mode = WAL; +``` + +### PostgreSQL + +```sql +CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (user_id, collection) +); + +-- Optional: index for faster lookups by user +CREATE INDEX IF NOT EXISTS idx_user_storage_user_id ON user_storage (user_id); +``` + +### Schema Explanation + +| Column | Type | Description | +|--------|------|-------------| +| `user_id` | TEXT | User's email from JWT token (e.g., `admin@libredb.org`) | +| `collection` | TEXT | Data category: `connections`, `history`, `saved_queries`, `schema_snapshots`, `saved_charts`, `active_connection_id`, `audit_log`, `masking_config`, `threshold_config` | +| `data` | TEXT | JSON-serialized collection data | +| `updated_at` | TEXT / TIMESTAMPTZ | Last modification timestamp | + +Each row stores **one user's one collection** as a JSON blob. Adding a new collection type requires no schema changes — just a new row. + +--- + +## What's Next? + +- [STORAGE_ARCHITECTURE.md](./STORAGE_ARCHITECTURE.md) — Deep dive into the write-through cache, sync hook, and provider internals +- [ARCHITECTURE.md](./ARCHITECTURE.md) — Overall system architecture +- [OIDC_SETUP.md](./OIDC_SETUP.md) — SSO configuration (pairs well with server storage for team deployments) diff --git a/package.json b/package.json index 152daef..5e77a68 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "libredb-studio", - "version": "0.7.1", + "version": "0.8.0", "private": true, "scripts": { "dev": "next dev", @@ -8,7 +8,7 @@ "start": "next start", "lint": "eslint .", "typecheck": "tsc --noEmit", - "test": "bun test tests/unit tests/api tests/integration tests/hooks && bun run test:components", + "test": "bun test tests/unit tests/api tests/integration && bun test tests/hooks && bun run test:components", "test:unit": "bun test tests/unit", "test:integration": "bun test tests/integration", "test:hooks": "bun test tests/hooks", @@ -16,8 +16,8 @@ "test:components": "bash tests/run-components.sh", "test:components:coverage": "bash tests/run-components.sh --coverage --coverage-reporter=lcov --coverage-dir=coverage/components", "test:e2e": "bunx playwright test", - "test:coverage:core": "bun test --coverage --coverage-reporter=lcov --coverage-reporter=text --coverage-dir=coverage/core tests/unit tests/api tests/integration tests/hooks", - "test:coverage": "rm -rf coverage && bun run test:coverage:core && bun run test:components:coverage && node scripts/merge-lcov.mjs coverage/core/lcov.info coverage/components/lcov.info coverage/lcov.info", + "test:coverage:core": "bun test --coverage --coverage-reporter=lcov --coverage-reporter=text --coverage-dir=coverage/core tests/unit tests/api tests/integration && bun test --coverage --coverage-reporter=lcov --coverage-reporter=text --coverage-dir=coverage/hooks tests/hooks", + "test:coverage": "rm -rf coverage && bun run test:coverage:core && bun run test:components:coverage && node scripts/merge-lcov.mjs coverage/core/lcov.info coverage/hooks/lcov.info coverage/components/lcov.info coverage/lcov.info", "test:coverage-html": "bun run test:coverage && genhtml coverage/lcov.info --output-directory coverage/html && echo '\n✅ Open coverage/html/index.html in your browser'" }, "dependencies": { @@ -53,6 +53,7 @@ "@tanstack/react-table": "^8.21.3", "@tanstack/react-virtual": "^3.13.13", "@xyflow/react": "^12.10.0", + "better-sqlite3": "^12.6.2", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "cmdk": "^1.1.1", @@ -94,6 +95,7 @@ "@testing-library/jest-dom": "^6.9.1", "@testing-library/react": "^16.3.2", "@testing-library/user-event": "^14.6.1", + "@types/better-sqlite3": "^7.6.13", "@types/bun": "latest", "@types/node": "^20", "@types/pg": "^8.16.0", diff --git a/src/app/api/storage/[collection]/route.ts b/src/app/api/storage/[collection]/route.ts new file mode 100644 index 0000000..da1c028 --- /dev/null +++ b/src/app/api/storage/[collection]/route.ts @@ -0,0 +1,47 @@ +/** + * PUT /api/storage/[collection] + * Updates a single storage collection for the authenticated user. + * Only works when server storage is enabled. + */ + +import { NextRequest, NextResponse } from 'next/server'; +import { getSession } from '@/lib/auth'; +import { getStorageProvider } from '@/lib/storage/factory'; +import { STORAGE_COLLECTIONS, type StorageCollection } from '@/lib/storage/types'; + +export async function PUT( + request: NextRequest, + { params }: { params: Promise<{ collection: string }> } +) { + const provider = await getStorageProvider(); + if (!provider) { + return NextResponse.json( + { error: 'Server storage is not enabled' }, + { status: 404 } + ); + } + + const session = await getSession(); + if (!session) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + const { collection } = await params; + + if (!STORAGE_COLLECTIONS.includes(collection as StorageCollection)) { + return NextResponse.json( + { error: `Invalid collection: ${collection}` }, + { status: 400 } + ); + } + + const body = await request.json(); + + await provider.setCollection( + session.username, + collection as StorageCollection, + body.data + ); + + return NextResponse.json({ ok: true }); +} diff --git a/src/app/api/storage/config/route.ts b/src/app/api/storage/config/route.ts new file mode 100644 index 0000000..d3e86cf --- /dev/null +++ b/src/app/api/storage/config/route.ts @@ -0,0 +1,12 @@ +/** + * GET /api/storage/config + * Returns storage configuration (public endpoint, no auth required). + * Client uses this to discover if server-side storage is enabled at runtime. + */ + +import { NextResponse } from 'next/server'; +import { getStorageConfig } from '@/lib/storage/factory'; + +export async function GET() { + return NextResponse.json(getStorageConfig()); +} diff --git a/src/app/api/storage/migrate/route.ts b/src/app/api/storage/migrate/route.ts new file mode 100644 index 0000000..59cf71b --- /dev/null +++ b/src/app/api/storage/migrate/route.ts @@ -0,0 +1,32 @@ +/** + * POST /api/storage/migrate + * Migrates localStorage data to server storage. + * Client sends all its localStorage collections; server merges them. + * Only works when server storage is enabled. + */ + +import { NextRequest, NextResponse } from 'next/server'; +import { getSession } from '@/lib/auth'; +import { getStorageProvider } from '@/lib/storage/factory'; +import type { StorageData } from '@/lib/storage/types'; + +export async function POST(request: NextRequest) { + const provider = await getStorageProvider(); + if (!provider) { + return NextResponse.json( + { error: 'Server storage is not enabled' }, + { status: 404 } + ); + } + + const session = await getSession(); + if (!session) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + const body = (await request.json()) as Partial; + + await provider.mergeData(session.username, body); + + return NextResponse.json({ ok: true, migrated: Object.keys(body) }); +} diff --git a/src/app/api/storage/route.ts b/src/app/api/storage/route.ts new file mode 100644 index 0000000..812e255 --- /dev/null +++ b/src/app/api/storage/route.ts @@ -0,0 +1,27 @@ +/** + * GET /api/storage + * Returns all storage data for the authenticated user. + * Only works when server storage is enabled. + */ + +import { NextResponse } from 'next/server'; +import { getSession } from '@/lib/auth'; +import { getStorageProvider } from '@/lib/storage/factory'; + +export async function GET() { + const provider = await getStorageProvider(); + if (!provider) { + return NextResponse.json( + { error: 'Server storage is not enabled' }, + { status: 404 } + ); + } + + const session = await getSession(); + if (!session) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }); + } + + const data = await provider.getAllData(session.username); + return NextResponse.json(data); +} diff --git a/src/components/DataCharts.tsx b/src/components/DataCharts.tsx index 50e260a..4e9d387 100644 --- a/src/components/DataCharts.tsx +++ b/src/components/DataCharts.tsx @@ -55,6 +55,7 @@ import { SelectTrigger, SelectValue, } from '@/components/ui/select'; +import { storage } from '@/lib/storage'; // Chart colors matching CSS variables const CHART_COLORS = [ @@ -320,12 +321,20 @@ export function DataCharts({ result }: DataChartsProps) { const [showSaveDialog, setShowSaveDialog] = useState(false); const [saveName, setSaveName] = useState(''); - // Load saved charts from localStorage + // Load saved charts from storage React.useEffect(() => { - try { - const stored = localStorage.getItem('libredb_saved_charts'); - if (stored) setSavedCharts(JSON.parse(stored)); - } catch { /* ignore */ } + const charts = storage.getSavedCharts(); + if (charts.length > 0) { + setSavedCharts(charts.map(c => ({ + id: c.id, + name: c.name, + chartType: c.chartType as ChartType, + xAxis: c.xAxis, + yAxis: c.yAxis, + aggregation: (c.aggregation || 'none') as AggregationType, + dateGrouping: c.dateGrouping || '', + }))); + } }, []); // Initialize axis selections when analysis changes @@ -411,7 +420,16 @@ export function DataCharts({ result }: DataChartsProps) { }; const updated = [...savedCharts, newChart]; setSavedCharts(updated); - localStorage.setItem('libredb_saved_charts', JSON.stringify(updated)); + storage.saveChart({ + id: newChart.id, + name: newChart.name, + chartType: newChart.chartType, + xAxis: newChart.xAxis, + yAxis: newChart.yAxis, + aggregation: newChart.aggregation, + dateGrouping: (newChart.dateGrouping || undefined) as DateGrouping | undefined, + createdAt: new Date(), + }); setShowSaveDialog(false); setSaveName(''); }, [saveName, chartType, xAxis, yAxis, aggregation, dateGrouping, savedCharts]); @@ -429,7 +447,7 @@ export function DataCharts({ result }: DataChartsProps) { const deleteSavedChart = useCallback((id: string) => { const updated = savedCharts.filter(c => c.id !== id); setSavedCharts(updated); - localStorage.setItem('libredb_saved_charts', JSON.stringify(updated)); + storage.deleteChart(id); }, [savedCharts]); const exportChart = useCallback(async (format: 'png' | 'svg') => { diff --git a/src/components/Studio.tsx b/src/components/Studio.tsx index 5649de0..fc7afe7 100644 --- a/src/components/Studio.tsx +++ b/src/components/Studio.tsx @@ -31,6 +31,7 @@ import { useTabManager } from '@/hooks/use-tab-manager'; import { useTransactionControl } from '@/hooks/use-transaction-control'; import { useQueryExecution } from '@/hooks/use-query-execution'; import { useInlineEditing } from '@/hooks/use-inline-editing'; +import { useStorageSync } from '@/hooks/use-storage-sync'; import { storage } from '@/lib/storage'; import { getRandomShowcaseQuery } from '@/lib/showcase-queries'; import { @@ -65,6 +66,9 @@ export default function Studio() { // 1. Auth const { user, isAdmin, handleLogout } = useAuth(); + // 1.5. Storage sync (write-through cache for server mode) + useStorageSync(); + // 2. Connection Manager + Provider Metadata const conn = useConnectionManager(); const { metadata } = useProviderMetadata(conn.activeConnection); diff --git a/src/components/admin/tabs/SecurityTab.tsx b/src/components/admin/tabs/SecurityTab.tsx index ac5c04e..d673d02 100644 --- a/src/components/admin/tabs/SecurityTab.tsx +++ b/src/components/admin/tabs/SecurityTab.tsx @@ -19,23 +19,9 @@ import { DEFAULT_THRESHOLDS, type ThresholdConfig, } from '@/lib/monitoring-thresholds'; +import { storage } from '@/lib/storage'; import { toast } from 'sonner'; -const THRESHOLD_STORAGE_KEY = 'libredb_threshold_config'; - -function loadThresholds(): ThresholdConfig[] { - if (typeof window === 'undefined') return DEFAULT_THRESHOLDS; - try { - const stored = localStorage.getItem(THRESHOLD_STORAGE_KEY); - if (stored) return JSON.parse(stored); - } catch { /* ignore */ } - return DEFAULT_THRESHOLDS; -} - -function saveThresholds(thresholds: ThresholdConfig[]) { - localStorage.setItem(THRESHOLD_STORAGE_KEY, JSON.stringify(thresholds)); -} - export function SecurityTab() { return (
@@ -151,7 +137,7 @@ function ThresholdSettings() { const [hasChanges, setHasChanges] = useState(false); useEffect(() => { - setThresholds(loadThresholds()); + setThresholds(storage.getThresholdConfig()); }, []); const updateThreshold = ( @@ -168,14 +154,14 @@ function ThresholdSettings() { }; const handleSave = () => { - saveThresholds(thresholds); + storage.saveThresholdConfig(thresholds); setHasChanges(false); toast.success('Threshold configuration saved'); }; const handleReset = () => { setThresholds(DEFAULT_THRESHOLDS); - saveThresholds(DEFAULT_THRESHOLDS); + storage.saveThresholdConfig(DEFAULT_THRESHOLDS); setHasChanges(false); toast.success('Thresholds reset to defaults'); }; diff --git a/src/components/studio/BottomPanel.tsx b/src/components/studio/BottomPanel.tsx index 9388ef2..0143120 100644 --- a/src/components/studio/BottomPanel.tsx +++ b/src/components/studio/BottomPanel.tsx @@ -26,6 +26,7 @@ import { DropdownMenuItem, DropdownMenuTrigger, } from '@/components/ui/dropdown-menu'; import { Button } from '@/components/ui/button'; +import { storage } from '@/lib/storage'; export type BottomPanelMode = 'results' | 'explain' | 'history' | 'saved' | 'charts' | 'nl2sql' | 'autopilot' | 'pivot' | 'docs' | 'schemadiff' | 'dashboard'; @@ -33,10 +34,8 @@ export type BottomPanelMode = 'results' | 'explain' | 'history' | 'saved' | 'cha function ChartDashboardLazy({ result }: { result: QueryResult | null }) { const [savedCharts, setSavedCharts] = React.useState<{ id: string; name: string; chartType: string; xAxis: string; yAxis: string[] }[]>([]); React.useEffect(() => { - try { - const stored = localStorage.getItem('libredb_saved_charts'); - if (stored) setSavedCharts(JSON.parse(stored)); - } catch { /* ignore */ } + const charts = storage.getSavedCharts(); + if (charts.length > 0) setSavedCharts(charts); }, []); if (savedCharts.length === 0) { diff --git a/src/hooks/use-storage-sync.ts b/src/hooks/use-storage-sync.ts new file mode 100644 index 0000000..a263235 --- /dev/null +++ b/src/hooks/use-storage-sync.ts @@ -0,0 +1,235 @@ +'use client'; + +import { useState, useEffect, useRef, useCallback } from 'react'; +import { storage, type StorageConfigResponse, type StorageChangeDetail, type StorageData, STORAGE_COLLECTIONS } from '@/lib/storage'; + +const MIGRATION_FLAG = 'libredb_server_migrated'; +const DEBOUNCE_MS = 500; + +export interface StorageSyncState { + isServerMode: boolean; + isSyncing: boolean; + lastSyncedAt: Date | null; + syncError: string | null; +} + +/** + * Write-through cache sync hook. + * Mounts in Studio.tsx after useAuth. + * + * - Discovers storage mode via GET /api/storage/config + * - In server mode: pulls data on mount, pushes mutations (debounced) + * - Handles first-login migration from localStorage to server + * - Graceful degradation: if server unreachable, localStorage continues + */ +export function useStorageSync(): StorageSyncState { + const [isServerMode, setIsServerMode] = useState(false); + const [isSyncing, setIsSyncing] = useState(false); + const [lastSyncedAt, setLastSyncedAt] = useState(null); + const [syncError, setSyncError] = useState(null); + + const debounceTimerRef = useRef | null>(null); + const pendingCollectionsRef = useRef>(new Set()); + const serverModeRef = useRef(false); + + // ── Push a collection to server (debounced) ── + const pushToServer = useCallback(async (collection: string, data: unknown) => { + try { + const res = await fetch(`/api/storage/${collection}`, { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data }), + }); + if (!res.ok) { + const err = await res.json().catch(() => ({})); + throw new Error(err.error || `HTTP ${res.status}`); + } + setLastSyncedAt(new Date()); + setSyncError(null); + } catch (err) { + console.warn(`[StorageSync] Push failed for ${collection}:`, err); + setSyncError(err instanceof Error ? err.message : 'Sync failed'); + } + }, []); + + // ── Flush pending collections ── + const flushPending = useCallback(async () => { + const collections = Array.from(pendingCollectionsRef.current); + pendingCollectionsRef.current.clear(); + if (collections.length === 0) return; + + setIsSyncing(true); + try { + await Promise.all( + collections.map((col) => { + const getter = getCollectionData(col); + return pushToServer(col, getter); + }) + ); + } finally { + setIsSyncing(false); + } + }, [pushToServer]); + + // ── Schedule debounced push ── + const schedulePush = useCallback( + (collection: string) => { + pendingCollectionsRef.current.add(collection); + if (debounceTimerRef.current) { + clearTimeout(debounceTimerRef.current); + } + debounceTimerRef.current = setTimeout(() => { + flushPending(); + }, DEBOUNCE_MS); + }, + [flushPending] + ); + + // ── Pull all data from server → localStorage ── + const pullFromServer = useCallback(async () => { + setIsSyncing(true); + try { + const res = await fetch('/api/storage'); + if (!res.ok) return; + const data = (await res.json()) as Partial; + + // Write server data to localStorage (overwrite) + if (data.connections) writeCollectionToLocal('connections', data.connections); + if (data.history) writeCollectionToLocal('history', data.history); + if (data.saved_queries) writeCollectionToLocal('saved_queries', data.saved_queries); + if (data.schema_snapshots) writeCollectionToLocal('schema_snapshots', data.schema_snapshots); + if (data.saved_charts) writeCollectionToLocal('saved_charts', data.saved_charts); + if (data.active_connection_id !== undefined) writeCollectionToLocal('active_connection_id', data.active_connection_id); + if (data.audit_log) writeCollectionToLocal('audit_log', data.audit_log); + if (data.masking_config) writeCollectionToLocal('masking_config', data.masking_config); + if (data.threshold_config) writeCollectionToLocal('threshold_config', data.threshold_config); + + setLastSyncedAt(new Date()); + setSyncError(null); + } catch (err) { + console.warn('[StorageSync] Pull failed:', err); + setSyncError(err instanceof Error ? err.message : 'Pull failed'); + } finally { + setIsSyncing(false); + } + }, []); + + // ── Migration: localStorage → server ── + const migrateToServer = useCallback(async () => { + if (typeof window === 'undefined') return; + if (localStorage.getItem(MIGRATION_FLAG)) return; + + setIsSyncing(true); + try { + const allData: Partial = {}; + for (const col of STORAGE_COLLECTIONS) { + const data = getCollectionData(col); + if (data !== null && data !== undefined) { + (allData as Record)[col] = data; + } + } + + if (Object.keys(allData).length === 0) { + localStorage.setItem(MIGRATION_FLAG, new Date().toISOString()); + return; + } + + const res = await fetch('/api/storage/migrate', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(allData), + }); + + if (res.ok) { + localStorage.setItem(MIGRATION_FLAG, new Date().toISOString()); + } + } catch (err) { + console.warn('[StorageSync] Migration failed:', err); + } finally { + setIsSyncing(false); + } + }, []); + + // ── Initialize: discover storage mode ── + useEffect(() => { + let cancelled = false; + + async function init() { + try { + const res = await fetch('/api/storage/config'); + if (!res.ok || cancelled) return; + const config = (await res.json()) as StorageConfigResponse; + + if (config.serverMode && !cancelled) { + setIsServerMode(true); + serverModeRef.current = true; + + // Migration first, then pull + await migrateToServer(); + if (!cancelled) { + await pullFromServer(); + } + } + } catch { + // Server unreachable — stay in local mode + } + } + + init(); + return () => { + cancelled = true; + }; + }, [migrateToServer, pullFromServer]); + + // ── Listen for storage mutations ── + useEffect(() => { + if (!isServerMode) return; + + function handleStorageChange(event: Event) { + const detail = (event as CustomEvent).detail; + if (detail?.collection) { + schedulePush(detail.collection); + } + } + + window.addEventListener('libredb-storage-change', handleStorageChange); + return () => { + window.removeEventListener('libredb-storage-change', handleStorageChange); + if (debounceTimerRef.current) { + clearTimeout(debounceTimerRef.current); + } + }; + }, [isServerMode, schedulePush]); + + return { isServerMode, isSyncing, lastSyncedAt, syncError }; +} + +// ── Helpers ── + +/** Read a collection's current data from the storage facade */ +function getCollectionData(collection: string): unknown { + switch (collection) { + case 'connections': return storage.getConnections(); + case 'history': return storage.getHistory(); + case 'saved_queries': return storage.getSavedQueries(); + case 'schema_snapshots': return storage.getSchemaSnapshots(); + case 'saved_charts': return storage.getSavedCharts(); + case 'active_connection_id': return storage.getActiveConnectionId(); + case 'audit_log': return storage.getAuditLog(); + case 'masking_config': return storage.getMaskingConfig(); + case 'threshold_config': return storage.getThresholdConfig(); + default: return null; + } +} + +/** Write server data directly to localStorage via storage key */ +function writeCollectionToLocal(collection: string, data: unknown): void { + const key = `libredb_${collection}`; + if (data === null || data === undefined) { + localStorage.removeItem(key); + } else if (typeof data === 'string') { + localStorage.setItem(key, data); + } else { + localStorage.setItem(key, JSON.stringify(data)); + } +} diff --git a/src/lib/audit.ts b/src/lib/audit.ts index 0174b85..99699ee 100644 --- a/src/lib/audit.ts +++ b/src/lib/audit.ts @@ -1,3 +1,5 @@ +import { storage } from '@/lib/storage'; + export type AuditEventType = | 'maintenance' | 'kill_session' @@ -19,7 +21,6 @@ export interface AuditEvent { details?: string; } -const AUDIT_STORAGE_KEY = 'libredb_audit_log'; const MAX_EVENTS = 1000; export class AuditRingBuffer { @@ -93,23 +94,11 @@ export function getServerAuditBuffer(): AuditRingBuffer { return _serverBuffer; } -// Client-side localStorage persistence +// Client-side localStorage persistence — delegates to storage module export function loadAuditFromStorage(): AuditEvent[] { - if (typeof window === 'undefined') return []; - try { - const stored = localStorage.getItem(AUDIT_STORAGE_KEY); - return stored ? JSON.parse(stored) : []; - } catch { - return []; - } + return storage.getAuditLog(); } export function saveAuditToStorage(events: AuditEvent[]) { - if (typeof window === 'undefined') return; - try { - const trimmed = events.slice(-MAX_EVENTS); - localStorage.setItem(AUDIT_STORAGE_KEY, JSON.stringify(trimmed)); - } catch { - // Storage full, ignore - } + storage.saveAuditLog(events); } diff --git a/src/lib/data-masking.ts b/src/lib/data-masking.ts index 5e53493..1973ac0 100644 --- a/src/lib/data-masking.ts +++ b/src/lib/data-masking.ts @@ -351,38 +351,16 @@ export function canReveal(role: string | undefined, config: MaskingConfig): bool // ─── Config Persistence ────────────────────────────────────────────────────── +import { storage } from '@/lib/storage'; + export const MASKING_CONFIG_KEY = 'libredb_masking_config'; export function loadMaskingConfig(): MaskingConfig { - if (typeof window === 'undefined') return DEFAULT_MASKING_CONFIG; - try { - const stored = localStorage.getItem(MASKING_CONFIG_KEY); - if (!stored) return DEFAULT_MASKING_CONFIG; - const parsed = JSON.parse(stored) as MaskingConfig; - // Merge with defaults to ensure new builtin patterns are included - const builtinIds = new Set(DEFAULT_MASKING_CONFIG.patterns.filter(p => p.isBuiltin).map(p => p.id)); - const storedIds = new Set(parsed.patterns.map(p => p.id)); - // Add any new builtins that don't exist in stored config - for (const defaultPattern of DEFAULT_MASKING_CONFIG.patterns) { - if (defaultPattern.isBuiltin && !storedIds.has(defaultPattern.id)) { - parsed.patterns.push(defaultPattern); - } - } - // Ensure roleSettings exists - if (!parsed.roleSettings) { - parsed.roleSettings = DEFAULT_MASKING_CONFIG.roleSettings; - } - // Remove stale builtin IDs that are no longer in defaults (unlikely but safe) - parsed.patterns = parsed.patterns.filter(p => !p.isBuiltin || builtinIds.has(p.id) || !p.id.startsWith('builtin-')); - return parsed; - } catch { - return DEFAULT_MASKING_CONFIG; - } + return storage.getMaskingConfig(); } export function saveMaskingConfig(config: MaskingConfig): void { - if (typeof window === 'undefined') return; - localStorage.setItem(MASKING_CONFIG_KEY, JSON.stringify(config)); + storage.saveMaskingConfig(config); } // ─── Preview Samples ───────────────────────────────────────────────────────── diff --git a/src/lib/storage.ts b/src/lib/storage.ts deleted file mode 100644 index a5fa01c..0000000 --- a/src/lib/storage.ts +++ /dev/null @@ -1,191 +0,0 @@ -import { DatabaseConnection, QueryHistoryItem, SavedQuery, SchemaSnapshot, SavedChartConfig } from './types'; - -const CONNECTIONS_KEY = 'libredb_connections'; -const HISTORY_KEY = 'libredb_history'; -const SAVED_QUERIES_KEY = 'libredb_saved_queries'; -const SCHEMA_SNAPSHOTS_KEY = 'libredb_schema_snapshots'; -const SAVED_CHARTS_KEY = 'libredb_saved_charts'; -const ACTIVE_CONNECTION_KEY = 'libredb_active_connection_id'; -const MAX_HISTORY_ITEMS = 500; -const MAX_SNAPSHOTS = 50; - -export const storage = { - // Connections - getConnections: (): DatabaseConnection[] => { - if (typeof window === 'undefined') return []; - const stored = localStorage.getItem(CONNECTIONS_KEY); - if (!stored) return []; - try { - return JSON.parse(stored).map((conn: DatabaseConnection) => ({ - ...conn, - createdAt: new Date(conn.createdAt) - })); - } catch (e) { - console.error('Failed to parse connections', e); - return []; - } - }, - - saveConnection: (connection: DatabaseConnection) => { - const connections = storage.getConnections(); - const existingIndex = connections.findIndex(c => c.id === connection.id); - - if (existingIndex > -1) { - connections[existingIndex] = connection; - } else { - connections.push(connection); - } - - localStorage.setItem(CONNECTIONS_KEY, JSON.stringify(connections)); - }, - - deleteConnection: (id: string) => { - const connections = storage.getConnections(); - const filtered = connections.filter(c => c.id !== id); - localStorage.setItem(CONNECTIONS_KEY, JSON.stringify(filtered)); - }, - - // History - getHistory: (): QueryHistoryItem[] => { - if (typeof window === 'undefined') return []; - const stored = localStorage.getItem(HISTORY_KEY); - if (!stored) return []; - try { - return JSON.parse(stored).map((item: QueryHistoryItem) => ({ - ...item, - executedAt: new Date(item.executedAt) - })); - } catch (e) { - console.error('Failed to parse history', e); - return []; - } - }, - - addToHistory: (item: QueryHistoryItem) => { - const history = storage.getHistory(); - const newHistory = [item, ...history].slice(0, MAX_HISTORY_ITEMS); - localStorage.setItem(HISTORY_KEY, JSON.stringify(newHistory)); - }, - - clearHistory: () => { - localStorage.setItem(HISTORY_KEY, JSON.stringify([])); - }, - - // Saved Queries - getSavedQueries: (): SavedQuery[] => { - if (typeof window === 'undefined') return []; - const stored = localStorage.getItem(SAVED_QUERIES_KEY); - if (!stored) return []; - try { - return JSON.parse(stored).map((q: SavedQuery) => ({ - ...q, - createdAt: new Date(q.createdAt), - updatedAt: new Date(q.updatedAt) - })); - } catch (e) { - console.error('Failed to parse saved queries', e); - return []; - } - }, - - saveQuery: (query: SavedQuery) => { - const queries = storage.getSavedQueries(); - const existingIndex = queries.findIndex(q => q.id === query.id); - - if (existingIndex > -1) { - queries[existingIndex] = { ...query, updatedAt: new Date() }; - } else { - queries.push({ ...query, createdAt: new Date(), updatedAt: new Date() }); - } - - localStorage.setItem(SAVED_QUERIES_KEY, JSON.stringify(queries)); - }, - - deleteSavedQuery: (id: string) => { - const queries = storage.getSavedQueries(); - const filtered = queries.filter(q => q.id !== id); - localStorage.setItem(SAVED_QUERIES_KEY, JSON.stringify(filtered)); - }, - - // Schema Snapshots - getSchemaSnapshots: (connectionId?: string): SchemaSnapshot[] => { - if (typeof window === 'undefined') return []; - const stored = localStorage.getItem(SCHEMA_SNAPSHOTS_KEY); - if (!stored) return []; - try { - const snapshots: SchemaSnapshot[] = JSON.parse(stored).map((s: SchemaSnapshot) => ({ - ...s, - createdAt: new Date(s.createdAt), - })); - if (connectionId) { - return snapshots.filter(s => s.connectionId === connectionId); - } - return snapshots; - } catch (e) { - console.error('Failed to parse schema snapshots', e); - return []; - } - }, - - saveSchemaSnapshot: (snapshot: SchemaSnapshot) => { - const snapshots = storage.getSchemaSnapshots(); - snapshots.push({ ...snapshot, createdAt: new Date() }); - // Keep only the latest MAX_SNAPSHOTS - const trimmed = snapshots.slice(-MAX_SNAPSHOTS); - localStorage.setItem(SCHEMA_SNAPSHOTS_KEY, JSON.stringify(trimmed)); - }, - - deleteSchemaSnapshot: (id: string) => { - const snapshots = storage.getSchemaSnapshots(); - const filtered = snapshots.filter(s => s.id !== id); - localStorage.setItem(SCHEMA_SNAPSHOTS_KEY, JSON.stringify(filtered)); - }, - - // Saved Charts - getSavedCharts: (): SavedChartConfig[] => { - if (typeof window === 'undefined') return []; - const stored = localStorage.getItem(SAVED_CHARTS_KEY); - if (!stored) return []; - try { - return JSON.parse(stored).map((c: SavedChartConfig) => ({ - ...c, - createdAt: new Date(c.createdAt), - })); - } catch (e) { - console.error('Failed to parse saved charts', e); - return []; - } - }, - - saveChart: (chart: SavedChartConfig) => { - const charts = storage.getSavedCharts(); - const existingIndex = charts.findIndex(c => c.id === chart.id); - if (existingIndex > -1) { - charts[existingIndex] = chart; - } else { - charts.push({ ...chart, createdAt: new Date() }); - } - localStorage.setItem(SAVED_CHARTS_KEY, JSON.stringify(charts)); - }, - - deleteChart: (id: string) => { - const charts = storage.getSavedCharts(); - const filtered = charts.filter(c => c.id !== id); - localStorage.setItem(SAVED_CHARTS_KEY, JSON.stringify(filtered)); - }, - - // Active Connection ID - getActiveConnectionId: (): string | null => { - if (typeof window === 'undefined') return null; - return localStorage.getItem(ACTIVE_CONNECTION_KEY); - }, - - setActiveConnectionId: (id: string | null) => { - if (typeof window === 'undefined') return; - if (id) { - localStorage.setItem(ACTIVE_CONNECTION_KEY, id); - } else { - localStorage.removeItem(ACTIVE_CONNECTION_KEY); - } - }, -}; diff --git a/src/lib/storage/factory.ts b/src/lib/storage/factory.ts new file mode 100644 index 0000000..1e2495c --- /dev/null +++ b/src/lib/storage/factory.ts @@ -0,0 +1,84 @@ +/** + * Storage Provider Factory + * Creates the appropriate server storage provider based on STORAGE_PROVIDER env var. + * Uses singleton pattern — one provider instance per process. + */ + +import type { ServerStorageProvider, StorageConfigResponse } from './types'; + +let _provider: ServerStorageProvider | null = null; +let _initialized = false; + +export type StorageProviderType = 'local' | 'sqlite' | 'postgres'; + +/** + * Get the configured storage provider type from environment. + * Returns 'local' if not set or invalid. + */ +export function getStorageProviderType(): StorageProviderType { + const env = process.env.STORAGE_PROVIDER?.toLowerCase(); + if (env === 'sqlite' || env === 'postgres') return env; + return 'local'; +} + +/** + * Check if server-side storage is enabled. + */ +export function isServerStorageEnabled(): boolean { + return getStorageProviderType() !== 'local'; +} + +/** + * Get the storage configuration for the /api/storage/config endpoint. + */ +export function getStorageConfig(): StorageConfigResponse { + const provider = getStorageProviderType(); + return { + provider, + serverMode: provider !== 'local', + }; +} + +/** + * Get or create the singleton server storage provider. + * Returns null if STORAGE_PROVIDER is 'local' or not set. + * The provider is automatically initialized on first call. + */ +export async function getStorageProvider(): Promise { + const providerType = getStorageProviderType(); + + if (providerType === 'local') return null; + + if (_provider && _initialized) return _provider; + + switch (providerType) { + case 'sqlite': { + const { SQLiteStorageProvider } = await import('./providers/sqlite'); + _provider = new SQLiteStorageProvider(); + break; + } + case 'postgres': { + const { PostgresStorageProvider } = await import('./providers/postgres'); + _provider = new PostgresStorageProvider(); + break; + } + } + + if (_provider && !_initialized) { + await _provider.initialize(); + _initialized = true; + } + + return _provider; +} + +/** + * Close and reset the singleton provider. Used for testing/cleanup. + */ +export async function closeStorageProvider(): Promise { + if (_provider) { + await _provider.close(); + _provider = null; + _initialized = false; + } +} diff --git a/src/lib/storage/index.ts b/src/lib/storage/index.ts new file mode 100644 index 0000000..f155bc9 --- /dev/null +++ b/src/lib/storage/index.ts @@ -0,0 +1,14 @@ +/** + * Storage module — barrel export. + * Import path `@/lib/storage` is preserved for all consumers. + */ + +export { storage } from './storage-facade'; +export type { + StorageData, + StorageCollection, + ServerStorageProvider, + StorageConfigResponse, + StorageChangeDetail, +} from './types'; +export { STORAGE_COLLECTIONS } from './types'; diff --git a/src/lib/storage/local-storage.ts b/src/lib/storage/local-storage.ts new file mode 100644 index 0000000..0b6d7e0 --- /dev/null +++ b/src/lib/storage/local-storage.ts @@ -0,0 +1,76 @@ +/** + * Pure localStorage CRUD operations. + * All reads/writes go through these functions. + * No event dispatching — that's the facade's responsibility. + */ + +const KEY_PREFIX = 'libredb_'; + +/** Map collection names to localStorage keys */ +const COLLECTION_KEYS: Record = { + connections: `${KEY_PREFIX}connections`, + history: `${KEY_PREFIX}history`, + saved_queries: `${KEY_PREFIX}saved_queries`, + schema_snapshots: `${KEY_PREFIX}schema_snapshots`, + saved_charts: `${KEY_PREFIX}saved_charts`, + active_connection_id: `${KEY_PREFIX}active_connection_id`, + audit_log: `${KEY_PREFIX}audit_log`, + masking_config: `${KEY_PREFIX}masking_config`, + threshold_config: `${KEY_PREFIX}threshold_config`, +}; + +function isClient(): boolean { + return typeof window !== 'undefined'; +} + +export function getKey(collection: string): string { + return COLLECTION_KEYS[collection] || `${KEY_PREFIX}${collection}`; +} + +/** + * Read raw JSON from localStorage. + * Returns null if not found or parse fails. + */ +export function readJSON(collection: string): T | null { + if (!isClient()) return null; + try { + const key = getKey(collection); + const raw = localStorage.getItem(key); + if (raw === null) return null; + return JSON.parse(raw) as T; + } catch { + return null; + } +} + +/** + * Read raw string from localStorage. + */ +export function readString(collection: string): string | null { + if (!isClient()) return null; + return localStorage.getItem(getKey(collection)); +} + +/** + * Write JSON to localStorage. + */ +export function writeJSON(collection: string, data: unknown): void { + if (!isClient()) return; + localStorage.setItem(getKey(collection), JSON.stringify(data)); +} + +/** + * Write raw string to localStorage. + */ +export function writeString(collection: string, value: string): void { + if (!isClient()) return; + localStorage.setItem(getKey(collection), value); +} + +/** + * Remove a key from localStorage. + */ +export function remove(collection: string): void { + if (!isClient()) return; + localStorage.removeItem(getKey(collection)); +} diff --git a/src/lib/storage/providers/postgres.ts b/src/lib/storage/providers/postgres.ts new file mode 100644 index 0000000..166f0e4 --- /dev/null +++ b/src/lib/storage/providers/postgres.ts @@ -0,0 +1,153 @@ +/** + * PostgreSQL Server Storage Provider + * Uses the existing `pg` package (already a project dependency). + */ + +import type { ServerStorageProvider, StorageCollection, StorageData } from '../types'; +import { STORAGE_COLLECTIONS } from '../types'; + +let Pool: typeof import('pg').Pool; + +export class PostgresStorageProvider implements ServerStorageProvider { + private pool: InstanceType | null = null; + private connectionString: string; + + constructor(connectionString?: string) { + this.connectionString = + connectionString || process.env.STORAGE_POSTGRES_URL || ''; + } + + async initialize(): Promise { + if (!this.connectionString) { + throw new Error( + 'STORAGE_POSTGRES_URL is required when STORAGE_PROVIDER=postgres' + ); + } + + // Dynamic import to avoid requiring pg when not needed + if (!Pool) { + const pg = await import('pg'); + Pool = pg.Pool; + } + + this.pool = new Pool({ + connectionString: this.connectionString, + max: 5, + idleTimeoutMillis: 30000, + }); + + // Create table + await this.pool.query(` + CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (user_id, collection) + ) + `); + } + + async getAllData(userId: string): Promise> { + this.ensurePool(); + const { rows } = await this.pool!.query( + 'SELECT collection, data FROM user_storage WHERE user_id = $1', + [userId] + ); + + const result: Partial = {}; + for (const row of rows) { + try { + (result as Record)[row.collection] = JSON.parse( + row.data + ); + } catch { + // Skip corrupted data + } + } + return result; + } + + async getCollection( + userId: string, + collection: K + ): Promise { + this.ensurePool(); + const { rows } = await this.pool!.query( + 'SELECT data FROM user_storage WHERE user_id = $1 AND collection = $2', + [userId, collection] + ); + if (rows.length === 0) return null; + try { + return JSON.parse(rows[0].data) as StorageData[K]; + } catch { + return null; + } + } + + async setCollection( + userId: string, + collection: K, + data: StorageData[K] + ): Promise { + this.ensurePool(); + await this.pool!.query( + `INSERT INTO user_storage (user_id, collection, data, updated_at) + VALUES ($1, $2, $3, NOW()) + ON CONFLICT (user_id, collection) + DO UPDATE SET data = EXCLUDED.data, updated_at = NOW()`, + [userId, collection, JSON.stringify(data)] + ); + } + + async mergeData(userId: string, data: Partial): Promise { + this.ensurePool(); + const client = await this.pool!.connect(); + try { + await client.query('BEGIN'); + for (const collection of STORAGE_COLLECTIONS) { + const collectionData = (data as Record)[collection]; + if (collectionData !== undefined) { + await client.query( + `INSERT INTO user_storage (user_id, collection, data, updated_at) + VALUES ($1, $2, $3, NOW()) + ON CONFLICT (user_id, collection) + DO UPDATE SET data = EXCLUDED.data, updated_at = NOW()`, + [userId, collection, JSON.stringify(collectionData)] + ); + } + } + await client.query('COMMIT'); + } catch (err) { + await client.query('ROLLBACK'); + throw err; + } finally { + client.release(); + } + } + + async isHealthy(): Promise { + try { + this.ensurePool(); + const { rows } = await this.pool!.query('SELECT 1 as ok'); + return rows[0]?.ok === 1; + } catch { + return false; + } + } + + async close(): Promise { + if (this.pool) { + await this.pool.end(); + this.pool = null; + } + } + + private ensurePool(): void { + if (!this.pool) { + throw new Error( + 'PostgreSQL storage not initialized. Call initialize() first.' + ); + } + } +} diff --git a/src/lib/storage/providers/sqlite.ts b/src/lib/storage/providers/sqlite.ts new file mode 100644 index 0000000..af7ee85 --- /dev/null +++ b/src/lib/storage/providers/sqlite.ts @@ -0,0 +1,146 @@ +/** + * SQLite Server Storage Provider + * Uses better-sqlite3 (Node.js compatible, works in production runner). + * WAL mode enabled for concurrent read performance. + */ + +import type { ServerStorageProvider, StorageCollection, StorageData } from '../types'; +import { STORAGE_COLLECTIONS } from '../types'; +import type BetterSqlite3 from 'better-sqlite3'; + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +let Database: any; + +export class SQLiteStorageProvider implements ServerStorageProvider { + private db: BetterSqlite3.Database | null = null; + private dbPath: string; + + constructor(dbPath?: string) { + this.dbPath = dbPath || process.env.STORAGE_SQLITE_PATH || './data/libredb-storage.db'; + } + + async initialize(): Promise { + // Dynamic import to avoid requiring better-sqlite3 when not needed + if (!Database) { + const mod = await import('better-sqlite3'); + Database = mod.default; + } + + // Ensure directory exists + const path = await import('path'); + const fs = await import('fs'); + const dir = path.dirname(this.dbPath); + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }); + } + + this.db = new Database(this.dbPath) as BetterSqlite3.Database; + + // Enable WAL mode for better concurrent read performance + this.db!.pragma('journal_mode = WAL'); + + // Create table + this.db!.exec(` + CREATE TABLE IF NOT EXISTS user_storage ( + user_id TEXT NOT NULL, + collection TEXT NOT NULL, + data TEXT NOT NULL, + updated_at TEXT NOT NULL DEFAULT (datetime('now')), + PRIMARY KEY (user_id, collection) + ) + `); + } + + async getAllData(userId: string): Promise> { + this.ensureDb(); + const stmt = this.db!.prepare( + 'SELECT collection, data FROM user_storage WHERE user_id = ?' + ); + const rows = stmt.all(userId) as { collection: string; data: string }[]; + + const result: Partial = {}; + for (const row of rows) { + try { + (result as Record)[row.collection] = JSON.parse(row.data); + } catch { + // Skip corrupted data + } + } + return result; + } + + async getCollection( + userId: string, + collection: K + ): Promise { + this.ensureDb(); + const stmt = this.db!.prepare( + 'SELECT data FROM user_storage WHERE user_id = ? AND collection = ?' + ); + const row = stmt.get(userId, collection) as { data: string } | undefined; + if (!row) return null; + try { + return JSON.parse(row.data) as StorageData[K]; + } catch { + return null; + } + } + + async setCollection( + userId: string, + collection: K, + data: StorageData[K] + ): Promise { + this.ensureDb(); + const stmt = this.db!.prepare(` + INSERT INTO user_storage (user_id, collection, data, updated_at) + VALUES (?, ?, ?, datetime('now')) + ON CONFLICT (user_id, collection) + DO UPDATE SET data = excluded.data, updated_at = excluded.updated_at + `); + stmt.run(userId, collection, JSON.stringify(data)); + } + + async mergeData(userId: string, data: Partial): Promise { + this.ensureDb(); + const stmt = this.db!.prepare(` + INSERT INTO user_storage (user_id, collection, data, updated_at) + VALUES (?, ?, ?, datetime('now')) + ON CONFLICT (user_id, collection) + DO UPDATE SET data = excluded.data, updated_at = excluded.updated_at + `); + + const tx = this.db!.transaction(() => { + for (const collection of STORAGE_COLLECTIONS) { + const collectionData = (data as Record)[collection]; + if (collectionData !== undefined) { + stmt.run(userId, collection, JSON.stringify(collectionData)); + } + } + }); + tx(); + } + + async isHealthy(): Promise { + try { + this.ensureDb(); + const result = this.db!.prepare('SELECT 1 as ok').get() as { ok: number }; + return result?.ok === 1; + } catch { + return false; + } + } + + async close(): Promise { + if (this.db) { + this.db.close(); + this.db = null; + } + } + + private ensureDb(): void { + if (!this.db) { + throw new Error('SQLite storage not initialized. Call initialize() first.'); + } + } +} diff --git a/src/lib/storage/storage-facade.ts b/src/lib/storage/storage-facade.ts new file mode 100644 index 0000000..f18d22b --- /dev/null +++ b/src/lib/storage/storage-facade.ts @@ -0,0 +1,272 @@ +/** + * Storage Facade — public API for all storage operations. + * Maintains the same sync interface as the original storage.ts. + * Dispatches CustomEvent on every mutation for the sync hook. + */ + +import { + DatabaseConnection, + QueryHistoryItem, + SavedQuery, + SchemaSnapshot, + SavedChartConfig, +} from '../types'; +import { type AuditEvent } from '../audit'; +import { DEFAULT_MASKING_CONFIG, type MaskingConfig } from '../data-masking'; +import { DEFAULT_THRESHOLDS, type ThresholdConfig } from '../monitoring-thresholds'; +import { readJSON, writeJSON, readString, writeString, remove } from './local-storage'; +import type { StorageCollection } from './types'; + +const MAX_HISTORY_ITEMS = 500; +const MAX_SNAPSHOTS = 50; +const MAX_AUDIT_EVENTS = 1000; + +/** Dispatch a custom event to notify the sync hook of a mutation */ +function dispatchChange(collection: StorageCollection, data: unknown): void { + if (typeof window === 'undefined') return; + window.dispatchEvent( + new CustomEvent('libredb-storage-change', { + detail: { collection, data }, + }) + ); +} + +/** Revive Date fields from JSON-parsed objects */ +function reviveDates(items: T[], ...dateFields: string[]): T[] { + return items.map((item) => { + const revived = { ...item } as Record; + for (const field of dateFields) { + if (revived[field]) { + revived[field] = new Date(revived[field] as string); + } + } + return revived as unknown as T; + }); +} + +export const storage = { + // ═══════════════════════════════════════════════════════════════════════════ + // Connections + // ═══════════════════════════════════════════════════════════════════════════ + + getConnections: (): DatabaseConnection[] => { + const data = readJSON('connections'); + if (!data) return []; + return reviveDates(data, 'createdAt'); + }, + + saveConnection: (connection: DatabaseConnection) => { + const connections = storage.getConnections(); + const existingIndex = connections.findIndex((c) => c.id === connection.id); + + if (existingIndex > -1) { + connections[existingIndex] = connection; + } else { + connections.push(connection); + } + + writeJSON('connections', connections); + dispatchChange('connections', connections); + }, + + deleteConnection: (id: string) => { + const connections = storage.getConnections(); + const filtered = connections.filter((c) => c.id !== id); + writeJSON('connections', filtered); + dispatchChange('connections', filtered); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // History + // ═══════════════════════════════════════════════════════════════════════════ + + getHistory: (): QueryHistoryItem[] => { + const data = readJSON('history'); + if (!data) return []; + return reviveDates(data, 'executedAt'); + }, + + addToHistory: (item: QueryHistoryItem) => { + const history = storage.getHistory(); + const newHistory = [item, ...history].slice(0, MAX_HISTORY_ITEMS); + writeJSON('history', newHistory); + dispatchChange('history', newHistory); + }, + + clearHistory: () => { + writeJSON('history', []); + dispatchChange('history', []); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Saved Queries + // ═══════════════════════════════════════════════════════════════════════════ + + getSavedQueries: (): SavedQuery[] => { + const data = readJSON('saved_queries'); + if (!data) return []; + return reviveDates(data, 'createdAt', 'updatedAt'); + }, + + saveQuery: (query: SavedQuery) => { + const queries = storage.getSavedQueries(); + const existingIndex = queries.findIndex((q) => q.id === query.id); + + if (existingIndex > -1) { + queries[existingIndex] = { ...query, updatedAt: new Date() }; + } else { + queries.push({ ...query, createdAt: new Date(), updatedAt: new Date() }); + } + + writeJSON('saved_queries', queries); + dispatchChange('saved_queries', queries); + }, + + deleteSavedQuery: (id: string) => { + const queries = storage.getSavedQueries(); + const filtered = queries.filter((q) => q.id !== id); + writeJSON('saved_queries', filtered); + dispatchChange('saved_queries', filtered); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Schema Snapshots + // ═══════════════════════════════════════════════════════════════════════════ + + getSchemaSnapshots: (connectionId?: string): SchemaSnapshot[] => { + const data = readJSON('schema_snapshots'); + if (!data) return []; + const snapshots = reviveDates(data, 'createdAt'); + if (connectionId) { + return snapshots.filter((s) => s.connectionId === connectionId); + } + return snapshots; + }, + + saveSchemaSnapshot: (snapshot: SchemaSnapshot) => { + const snapshots = storage.getSchemaSnapshots(); + snapshots.push({ ...snapshot, createdAt: new Date() }); + const trimmed = snapshots.slice(-MAX_SNAPSHOTS); + writeJSON('schema_snapshots', trimmed); + dispatchChange('schema_snapshots', trimmed); + }, + + deleteSchemaSnapshot: (id: string) => { + const snapshots = storage.getSchemaSnapshots(); + const filtered = snapshots.filter((s) => s.id !== id); + writeJSON('schema_snapshots', filtered); + dispatchChange('schema_snapshots', filtered); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Saved Charts + // ═══════════════════════════════════════════════════════════════════════════ + + getSavedCharts: (): SavedChartConfig[] => { + const data = readJSON('saved_charts'); + if (!data) return []; + return reviveDates(data, 'createdAt'); + }, + + saveChart: (chart: SavedChartConfig) => { + const charts = storage.getSavedCharts(); + const existingIndex = charts.findIndex((c) => c.id === chart.id); + if (existingIndex > -1) { + charts[existingIndex] = chart; + } else { + charts.push({ ...chart, createdAt: new Date() }); + } + writeJSON('saved_charts', charts); + dispatchChange('saved_charts', charts); + }, + + deleteChart: (id: string) => { + const charts = storage.getSavedCharts(); + const filtered = charts.filter((c) => c.id !== id); + writeJSON('saved_charts', filtered); + dispatchChange('saved_charts', filtered); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Active Connection ID + // ═══════════════════════════════════════════════════════════════════════════ + + getActiveConnectionId: (): string | null => { + return readString('active_connection_id'); + }, + + setActiveConnectionId: (id: string | null) => { + if (typeof window === 'undefined') return; + if (id) { + writeString('active_connection_id', id); + } else { + remove('active_connection_id'); + } + dispatchChange('active_connection_id', id); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Audit Log (consolidated from audit.ts) + // ═══════════════════════════════════════════════════════════════════════════ + + getAuditLog: (): AuditEvent[] => { + const data = readJSON('audit_log'); + return data ?? []; + }, + + saveAuditLog: (events: AuditEvent[]) => { + const trimmed = events.slice(-MAX_AUDIT_EVENTS); + writeJSON('audit_log', trimmed); + dispatchChange('audit_log', trimmed); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Masking Config (consolidated from data-masking.ts) + // ═══════════════════════════════════════════════════════════════════════════ + + getMaskingConfig: (): MaskingConfig => { + const data = readJSON('masking_config'); + if (!data) return DEFAULT_MASKING_CONFIG; + + // Merge with defaults to ensure new builtin patterns are included + const builtinIds = new Set( + DEFAULT_MASKING_CONFIG.patterns.filter((p) => p.isBuiltin).map((p) => p.id) + ); + const storedIds = new Set(data.patterns.map((p) => p.id)); + + for (const defaultPattern of DEFAULT_MASKING_CONFIG.patterns) { + if (defaultPattern.isBuiltin && !storedIds.has(defaultPattern.id)) { + data.patterns.push(defaultPattern); + } + } + + if (!data.roleSettings) { + data.roleSettings = DEFAULT_MASKING_CONFIG.roleSettings; + } + + data.patterns = data.patterns.filter( + (p) => !p.isBuiltin || builtinIds.has(p.id) || !p.id.startsWith('builtin-') + ); + + return data; + }, + + saveMaskingConfig: (config: MaskingConfig) => { + writeJSON('masking_config', config); + dispatchChange('masking_config', config); + }, + + // ═══════════════════════════════════════════════════════════════════════════ + // Threshold Config (consolidated from SecurityTab.tsx) + // ═══════════════════════════════════════════════════════════════════════════ + + getThresholdConfig: (): ThresholdConfig[] => { + const data = readJSON('threshold_config'); + return data ?? DEFAULT_THRESHOLDS; + }, + + saveThresholdConfig: (thresholds: ThresholdConfig[]) => { + writeJSON('threshold_config', thresholds); + dispatchChange('threshold_config', thresholds); + }, +}; diff --git a/src/lib/storage/types.ts b/src/lib/storage/types.ts new file mode 100644 index 0000000..9598234 --- /dev/null +++ b/src/lib/storage/types.ts @@ -0,0 +1,75 @@ +import type { + DatabaseConnection, + QueryHistoryItem, + SavedQuery, + SchemaSnapshot, + SavedChartConfig, +} from '../types'; +import type { AuditEvent } from '../audit'; +import type { MaskingConfig } from '../data-masking'; +import type { ThresholdConfig } from '../monitoring-thresholds'; + +/** + * All persistable collections and their data types. + * Maps 1:1 with localStorage keys (minus the `libredb_` prefix). + */ +export interface StorageData { + connections: DatabaseConnection[]; + history: QueryHistoryItem[]; + saved_queries: SavedQuery[]; + schema_snapshots: SchemaSnapshot[]; + saved_charts: SavedChartConfig[]; + active_connection_id: string | null; + audit_log: AuditEvent[]; + masking_config: MaskingConfig; + threshold_config: ThresholdConfig[]; +} + +/** Collection names that can be synced to server storage */ +export type StorageCollection = keyof StorageData; + +/** All persistable collection names */ +export const STORAGE_COLLECTIONS: StorageCollection[] = [ + 'connections', + 'history', + 'saved_queries', + 'schema_snapshots', + 'saved_charts', + 'active_connection_id', + 'audit_log', + 'masking_config', + 'threshold_config', +]; + +/** + * Server-side storage provider interface. + * Implements the Strategy Pattern — SQLite and PostgreSQL both implement this. + */ +export interface ServerStorageProvider { + /** Create tables if they don't exist */ + initialize(): Promise; + /** Get all collections for a user */ + getAllData(userId: string): Promise>; + /** Get a single collection for a user */ + getCollection(userId: string, collection: K): Promise; + /** Set a single collection for a user */ + setCollection(userId: string, collection: K, data: StorageData[K]): Promise; + /** Merge multiple collections (used for migration) */ + mergeData(userId: string, data: Partial): Promise; + /** Health check */ + isHealthy(): Promise; + /** Cleanup resources */ + close(): Promise; +} + +/** Storage config returned by /api/storage/config */ +export interface StorageConfigResponse { + provider: 'local' | 'sqlite' | 'postgres'; + serverMode: boolean; +} + +/** Event dispatched on storage mutations */ +export interface StorageChangeDetail { + collection: StorageCollection; + data: unknown; +} diff --git a/src/proxy.ts b/src/proxy.ts index 3803718..ae4d11e 100644 --- a/src/proxy.ts +++ b/src/proxy.ts @@ -59,7 +59,9 @@ export async function proxy(request: NextRequest) { // Health check endpoint for load balancers (Render, K8s, etc.) pathname === '/api/db/health' || // Demo connection endpoint (public for initial load) - pathname === '/api/demo-connection' + pathname === '/api/demo-connection' || + // Storage config endpoint (public, returns only mode info) + pathname === '/api/storage/config' ) { return NextResponse.next(); } @@ -90,10 +92,11 @@ export const config = { * - api/auth (auth endpoints) * - api/db/health (health check for load balancers) * - api/demo-connection (demo database connection - public) + * - api/storage/config (storage mode discovery - public) * - _next/static (static files) * - _next/image (image optimization files) * - favicon.ico (favicon file) */ - '/((?!api/auth|api/db/health|api/demo-connection|_next/static|_next/image|favicon.ico).*)', + '/((?!api/auth|api/db/health|api/demo-connection|api/storage/config|_next/static|_next/image|favicon.ico).*)', ], }; diff --git a/tests/api/storage/config.test.ts b/tests/api/storage/config.test.ts new file mode 100644 index 0000000..9a96365 --- /dev/null +++ b/tests/api/storage/config.test.ts @@ -0,0 +1,42 @@ +import { describe, test, expect, beforeEach, afterEach } from 'bun:test'; +import { GET } from '@/app/api/storage/config/route'; + +describe('GET /api/storage/config', () => { + const originalEnv = process.env.STORAGE_PROVIDER; + + beforeEach(() => { + delete process.env.STORAGE_PROVIDER; + }); + + afterEach(() => { + if (originalEnv === undefined) { + delete process.env.STORAGE_PROVIDER; + } else { + process.env.STORAGE_PROVIDER = originalEnv; + } + }); + + test('returns local config by default', async () => { + const res = await GET(); + expect(res.status).toBe(200); + const json = await res.json(); + expect(json.provider).toBe('local'); + expect(json.serverMode).toBe(false); + }); + + test('returns sqlite config when STORAGE_PROVIDER=sqlite', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + const res = await GET(); + const json = await res.json(); + expect(json.provider).toBe('sqlite'); + expect(json.serverMode).toBe(true); + }); + + test('returns postgres config when STORAGE_PROVIDER=postgres', async () => { + process.env.STORAGE_PROVIDER = 'postgres'; + const res = await GET(); + const json = await res.json(); + expect(json.provider).toBe('postgres'); + expect(json.serverMode).toBe(true); + }); +}); diff --git a/tests/api/storage/storage-routes.test.ts b/tests/api/storage/storage-routes.test.ts new file mode 100644 index 0000000..8f05d99 --- /dev/null +++ b/tests/api/storage/storage-routes.test.ts @@ -0,0 +1,267 @@ +import { describe, test, expect, mock, beforeEach } from 'bun:test'; +import { NextRequest } from 'next/server'; + +// ── Mock auth ──────────────────────────────────────────────────────────────── + +let mockSession: { username: string; role: string } | null = { username: 'admin@test.com', role: 'admin' }; + +mock.module('@/lib/auth', () => ({ + getSession: async () => mockSession, +})); + +// ── Mock storage factory ───────────────────────────────────────────────────── + +const mockProvider = { + getAllData: mock(async () => ({ + connections: [{ id: 'c1' }], + })), + getCollection: mock(async () => [{ id: 'c1' }]), + setCollection: mock(async () => {}), + mergeData: mock(async () => {}), +}; + +let providerEnabled = true; + +mock.module('@/lib/storage/factory', () => ({ + getStorageProvider: async () => (providerEnabled ? mockProvider : null), +})); + +mock.module('@/lib/storage/types', () => ({ + STORAGE_COLLECTIONS: [ + 'connections', 'history', 'saved_queries', 'schema_snapshots', + 'saved_charts', 'active_connection_id', 'audit_log', + 'masking_config', 'threshold_config', + ], +})); + +// ── Import routes ──────────────────────────────────────────────────────────── + +import { GET } from '@/app/api/storage/route'; +import { PUT } from '@/app/api/storage/[collection]/route'; +import { POST } from '@/app/api/storage/migrate/route'; + +// ── Tests ──────────────────────────────────────────────────────────────────── + +describe('GET /api/storage', () => { + beforeEach(() => { + mockSession = { username: 'admin@test.com', role: 'admin' }; + providerEnabled = true; + mockProvider.getAllData.mockClear(); + }); + + test('returns 404 when storage not enabled', async () => { + providerEnabled = false; + const res = await GET(); + expect(res.status).toBe(404); + }); + + test('returns 401 when not authenticated', async () => { + mockSession = null; + const res = await GET(); + expect(res.status).toBe(401); + }); + + test('returns user data on success', async () => { + const res = await GET(); + expect(res.status).toBe(200); + const json = await res.json(); + expect(json.connections).toEqual([{ id: 'c1' }]); + expect(mockProvider.getAllData).toHaveBeenCalledWith('admin@test.com'); + }); +}); + +describe('PUT /api/storage/[collection]', () => { + beforeEach(() => { + mockSession = { username: 'admin@test.com', role: 'admin' }; + providerEnabled = true; + mockProvider.setCollection.mockClear(); + }); + + function makeRequest(collection: string, data: unknown) { + return PUT( + new NextRequest(`http://localhost/api/storage/${collection}`, { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data }), + }), + { params: Promise.resolve({ collection }) } + ); + } + + test('returns 404 when storage not enabled', async () => { + providerEnabled = false; + const res = await makeRequest('connections', []); + expect(res.status).toBe(404); + }); + + test('returns 401 when not authenticated', async () => { + mockSession = null; + const res = await makeRequest('connections', []); + expect(res.status).toBe(401); + }); + + test('returns 400 for invalid collection', async () => { + const res = await makeRequest('invalid_collection', []); + expect(res.status).toBe(400); + }); + + test('updates collection on success', async () => { + const data = [{ id: 'c1', name: 'New DB' }]; + const res = await makeRequest('connections', data); + expect(res.status).toBe(200); + expect(mockProvider.setCollection).toHaveBeenCalledWith( + 'admin@test.com', + 'connections', + data + ); + }); +}); + +describe('POST /api/storage/migrate', () => { + beforeEach(() => { + mockSession = { username: 'admin@test.com', role: 'admin' }; + providerEnabled = true; + mockProvider.mergeData.mockClear(); + }); + + function makeMigrateRequest(data: Record) { + return POST( + new NextRequest('http://localhost/api/storage/migrate', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(data), + }) + ); + } + + test('returns 404 when storage not enabled', async () => { + providerEnabled = false; + const res = await makeMigrateRequest({}); + expect(res.status).toBe(404); + }); + + test('returns 401 when not authenticated', async () => { + mockSession = null; + const res = await makeMigrateRequest({}); + expect(res.status).toBe(401); + }); + + test('merges data on success', async () => { + const data = { connections: [{ id: 'c1' }], history: [] }; + const res = await makeMigrateRequest(data); + expect(res.status).toBe(200); + const json = await res.json(); + expect(json.ok).toBe(true); + expect(json.migrated).toContain('connections'); + expect(json.migrated).toContain('history'); + expect(mockProvider.mergeData).toHaveBeenCalledWith('admin@test.com', data); + }); + + test('returns empty migrated array for empty payload', async () => { + const res = await makeMigrateRequest({}); + expect(res.status).toBe(200); + const json = await res.json(); + expect(json.ok).toBe(true); + expect(json.migrated).toEqual([]); + }); +}); + +// ── Error propagation from provider ──────────────────────────────────────── + +describe('API routes: provider error propagation', () => { + beforeEach(() => { + mockSession = { username: 'admin@test.com', role: 'admin' }; + providerEnabled = true; + mockProvider.getAllData.mockClear(); + mockProvider.setCollection.mockClear(); + mockProvider.mergeData.mockClear(); + }); + + test('GET /api/storage propagates provider error', async () => { + mockProvider.getAllData.mockRejectedValueOnce(new Error('DB connection lost')); + // Route has no try/catch — error propagates (Next.js catches in production) + await expect(GET()).rejects.toThrow('DB connection lost'); + }); + + test('PUT collection response includes ok:true on success', async () => { + const res = await PUT( + new NextRequest('http://localhost/api/storage/connections', { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data: [{ id: 'c1' }] }), + }), + { params: Promise.resolve({ collection: 'connections' }) } + ); + expect(res.status).toBe(200); + const json = await res.json(); + expect(json.ok).toBe(true); + }); + + test('PUT uses session username for user scoping', async () => { + mockSession = { username: 'user@test.com', role: 'user' }; + const data = [{ id: 'c1' }]; + await PUT( + new NextRequest('http://localhost/api/storage/connections', { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data }), + }), + { params: Promise.resolve({ collection: 'connections' }) } + ); + expect(mockProvider.setCollection).toHaveBeenCalledWith('user@test.com', 'connections', data); + }); + + test('GET uses session username for user scoping', async () => { + mockSession = { username: 'user@test.com', role: 'user' }; + await GET(); + expect(mockProvider.getAllData).toHaveBeenCalledWith('user@test.com'); + }); + + test('PUT validates all 9 valid collection names', async () => { + const validCollections = [ + 'connections', 'history', 'saved_queries', 'schema_snapshots', + 'saved_charts', 'active_connection_id', 'audit_log', + 'masking_config', 'threshold_config', + ]; + + for (const collection of validCollections) { + mockProvider.setCollection.mockClear(); + const res = await PUT( + new NextRequest(`http://localhost/api/storage/${collection}`, { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data: [] }), + }), + { params: Promise.resolve({ collection }) } + ); + expect(res.status).toBe(200); + } + }); + + test('PUT rejects collection names not in whitelist', async () => { + const invalidNames = ['settings', 'users', 'passwords', '../connections', 'CONNECTIONS']; + for (const name of invalidNames) { + const res = await PUT( + new NextRequest(`http://localhost/api/storage/${name}`, { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ data: [] }), + }), + { params: Promise.resolve({ collection: name }) } + ); + expect(res.status).toBe(400); + } + }); + + test('migrate uses session username for user scoping', async () => { + mockSession = { username: 'user@test.com', role: 'user' }; + await POST( + new NextRequest('http://localhost/api/storage/migrate', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ connections: [] }), + }) + ); + expect(mockProvider.mergeData).toHaveBeenCalledWith('user@test.com', { connections: [] }); + }); +}); diff --git a/tests/components/DataCharts.test.tsx b/tests/components/DataCharts.test.tsx index f50de48..ff25f98 100644 --- a/tests/components/DataCharts.test.tsx +++ b/tests/components/DataCharts.test.tsx @@ -71,6 +71,33 @@ mock.module('@/components/ui/dropdown-menu', () => ({ React.createElement('div', { role: 'menuitem', onClick: onClick as (() => void), className }, children as React.ReactNode), })); +const mockGetSavedCharts = mock(() => { + try { + const stored = localStorage.getItem('libredb_saved_charts'); + return stored ? JSON.parse(stored) : []; + } catch { return []; } +}); +const mockSaveChart = mock((chart: Record) => { + const stored = localStorage.getItem('libredb_saved_charts'); + const charts = stored ? JSON.parse(stored) : []; + charts.push(chart); + localStorage.setItem('libredb_saved_charts', JSON.stringify(charts)); +}); +const mockDeleteChart = mock((id: string) => { + const stored = localStorage.getItem('libredb_saved_charts'); + const charts = stored ? JSON.parse(stored) : []; + const filtered = charts.filter((c: Record) => c.id !== id); + localStorage.setItem('libredb_saved_charts', JSON.stringify(filtered)); +}); + +mock.module('@/lib/storage', () => ({ + storage: { + getSavedCharts: mockGetSavedCharts, + saveChart: mockSaveChart, + deleteChart: mockDeleteChart, + }, +})); + mock.module('@/components/ui/select', () => ({ Select: ({ children, value }: Record) => React.createElement('div', { 'data-testid': 'select', 'data-value': value }, children as React.ReactNode), diff --git a/tests/isolated/factory-singleton.test.ts b/tests/isolated/factory-singleton.test.ts new file mode 100644 index 0000000..25e08a3 --- /dev/null +++ b/tests/isolated/factory-singleton.test.ts @@ -0,0 +1,150 @@ +/** + * Factory singleton tests — isolated process required. + * Mocks provider modules to test getStorageProvider() and closeStorageProvider() + * without real database connections. + */ +import { describe, test, expect, beforeEach, mock } from 'bun:test'; + +// ── Mock providers ────────────────────────────────────────────────────────── + +const mockInitialize = mock(async () => {}); +const mockClose = mock(async () => {}); +const mockGetAllData = mock(async () => ({})); + +function makeMockProvider() { + return { + initialize: mockInitialize, + close: mockClose, + getAllData: mockGetAllData, + getCollection: mock(async () => null), + setCollection: mock(async () => {}), + mergeData: mock(async () => {}), + isHealthy: mock(async () => true), + }; +} + +const mockSQLiteInstance = makeMockProvider(); +const mockPostgresInstance = makeMockProvider(); + +mock.module('@/lib/storage/providers/sqlite', () => ({ + SQLiteStorageProvider: mock(() => mockSQLiteInstance), +})); + +mock.module('@/lib/storage/providers/postgres', () => ({ + PostgresStorageProvider: mock(() => mockPostgresInstance), +})); + +// Import factory AFTER mocking providers +import { + getStorageProvider, + closeStorageProvider, + getStorageProviderType, +} from '@/lib/storage/factory'; + +// ── Tests ─────────────────────────────────────────────────────────────────── + +describe('factory: getStorageProvider', () => { + beforeEach(async () => { + // Reset singleton state between tests + await closeStorageProvider(); + mockInitialize.mockClear(); + mockClose.mockClear(); + delete process.env.STORAGE_PROVIDER; + }); + + test('returns null when STORAGE_PROVIDER is local', async () => { + process.env.STORAGE_PROVIDER = 'local'; + const provider = await getStorageProvider(); + expect(provider).toBeNull(); + }); + + test('returns null when STORAGE_PROVIDER is not set', async () => { + const provider = await getStorageProvider(); + expect(provider).toBeNull(); + }); + + test('creates SQLite provider when STORAGE_PROVIDER=sqlite', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + const provider = await getStorageProvider(); + + expect(provider).not.toBeNull(); + expect(mockInitialize).toHaveBeenCalledTimes(1); + }); + + test('creates Postgres provider when STORAGE_PROVIDER=postgres', async () => { + process.env.STORAGE_PROVIDER = 'postgres'; + const provider = await getStorageProvider(); + + expect(provider).not.toBeNull(); + expect(mockInitialize).toHaveBeenCalledTimes(1); + }); + + test('returns same instance on second call (singleton)', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + const first = await getStorageProvider(); + const second = await getStorageProvider(); + + expect(first).toBe(second); + // initialize called only once, not twice + expect(mockInitialize).toHaveBeenCalledTimes(1); + }); + + test('calls initialize() on first creation', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + await getStorageProvider(); + + expect(mockInitialize).toHaveBeenCalledTimes(1); + }); + + test('propagates error when initialize() throws', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + mockInitialize.mockRejectedValueOnce(new Error('DB init failed')); + + await expect(getStorageProvider()).rejects.toThrow('DB init failed'); + }); +}); + +describe('factory: closeStorageProvider', () => { + beforeEach(async () => { + await closeStorageProvider(); + mockInitialize.mockClear(); + mockClose.mockClear(); + delete process.env.STORAGE_PROVIDER; + }); + + test('closes and resets singleton', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + await getStorageProvider(); + + await closeStorageProvider(); + expect(mockClose).toHaveBeenCalledTimes(1); + }); + + test('creates new instance after close + re-get', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + await getStorageProvider(); + await closeStorageProvider(); + + mockInitialize.mockClear(); + const provider = await getStorageProvider(); + + expect(provider).not.toBeNull(); + // New initialize call — fresh instance + expect(mockInitialize).toHaveBeenCalledTimes(1); + }); + + test('does not throw when called without active provider', async () => { + await expect(closeStorageProvider()).resolves.toBeUndefined(); + expect(mockClose).not.toHaveBeenCalled(); + }); + + test('double close does not throw', async () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + await getStorageProvider(); + + await closeStorageProvider(); + await expect(closeStorageProvider()).resolves.toBeUndefined(); + // close called only once (second call has no provider) + expect(mockClose).toHaveBeenCalledTimes(1); + }); +}); diff --git a/tests/isolated/use-storage-sync.test.ts b/tests/isolated/use-storage-sync.test.ts new file mode 100644 index 0000000..4c9536e --- /dev/null +++ b/tests/isolated/use-storage-sync.test.ts @@ -0,0 +1,366 @@ +import '../setup-dom'; + +import { describe, test, expect, mock, beforeEach, afterEach } from 'bun:test'; +import { renderHook, waitFor, act, cleanup } from '@testing-library/react'; +import { mockGlobalFetch, restoreGlobalFetch } from '../helpers/mock-fetch'; + +// ── Mock storage module ───────────────────────────────────────────────────── + +const mockStorage = { + getConnections: mock(() => [{ id: 'c1' }]), + getHistory: mock(() => []), + getSavedQueries: mock(() => []), + getSchemaSnapshots: mock(() => []), + getSavedCharts: mock(() => []), + getActiveConnectionId: mock(() => null), + getAuditLog: mock(() => []), + getMaskingConfig: mock(() => ({ enabled: true, patterns: [], roleSettings: { admin: { canToggle: true, canReveal: true }, user: { canToggle: false, canReveal: false } } })), + getThresholdConfig: mock(() => []), +}; + +mock.module('@/lib/storage', () => ({ + storage: mockStorage, + STORAGE_COLLECTIONS: [ + 'connections', 'history', 'saved_queries', 'schema_snapshots', + 'saved_charts', 'active_connection_id', 'audit_log', + 'masking_config', 'threshold_config', + ], +})); + +import { useStorageSync } from '@/hooks/use-storage-sync'; + +// ── Helpers ───────────────────────────────────────────────────────────────── + +function setupLocalMode() { + return mockGlobalFetch({ + '/api/storage/config': { ok: true, status: 200, json: { provider: 'local', serverMode: false } }, + }); +} + +function setupServerMode(extraRoutes: Record = {}) { + return mockGlobalFetch({ + '/api/storage/config': { ok: true, status: 200, json: { provider: 'postgres', serverMode: true } }, + '/api/storage/migrate': { ok: true, status: 200, json: { ok: true, migrated: ['connections'] } }, + '/api/storage': { ok: true, status: 200, json: { connections: [{ id: 'server-c1' }] } }, + ...extraRoutes, + }); +} + +// ── Tests ─────────────────────────────────────────────────────────────────── + +describe('useStorageSync', () => { + beforeEach(() => { + localStorage.clear(); + Object.values(mockStorage).forEach((fn) => fn.mockClear()); + }); + + afterEach(() => { + restoreGlobalFetch(); + cleanup(); + }); + + // ── Mode discovery ────────────────────────────────────────────────────── + + describe('mode discovery', () => { + test('starts with isServerMode=false', () => { + setupLocalMode(); + const { result } = renderHook(() => useStorageSync()); + expect(result.current.isServerMode).toBe(false); + }); + + test('stays in local mode when config returns serverMode=false', async () => { + setupLocalMode(); + const { result } = renderHook(() => useStorageSync()); + + // Wait for config fetch to resolve + await waitFor(() => { + expect(result.current.isSyncing).toBe(false); + }); + + expect(result.current.isServerMode).toBe(false); + }); + + test('switches to server mode when config returns serverMode=true', async () => { + setupServerMode(); + localStorage.setItem('libredb_server_migrated', 'true'); // Skip migration + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + }); + + test('stays in local mode when config fetch fails', async () => { + mockGlobalFetch({ + '/api/storage/config': { ok: false, status: 500, json: { error: 'Server error' } }, + }); + + const { result } = renderHook(() => useStorageSync()); + + // Give it time to settle + await waitFor(() => { + expect(result.current.isSyncing).toBe(false); + }); + + expect(result.current.isServerMode).toBe(false); + }); + + test('stays in local mode when config fetch throws network error', async () => { + globalThis.fetch = mock(async () => { + throw new Error('Network error'); + }) as unknown as typeof fetch; + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isSyncing).toBe(false); + }); + + expect(result.current.isServerMode).toBe(false); + expect(result.current.syncError).toBeNull(); + }); + }); + + // ── Migration ─────────────────────────────────────────────────────────── + + describe('migration', () => { + test('performs migration on first server-mode visit', async () => { + const fetchMock = setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // Migration flag should be set + expect(localStorage.getItem('libredb_server_migrated')).not.toBeNull(); + + // migrate endpoint was called + const calls = (fetchMock.mock.calls as unknown[][]).map((c) => { + const url = typeof c[0] === 'string' ? c[0] : ''; + return new URL(url, 'http://localhost:3000').pathname; + }); + expect(calls).toContain('/api/storage/migrate'); + }); + + test('skips migration when flag already set', async () => { + localStorage.setItem('libredb_server_migrated', '2026-01-01'); + const fetchMock = setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // migrate endpoint should NOT be called + const calls = (fetchMock.mock.calls as unknown[][]).map((c) => { + const url = typeof c[0] === 'string' ? c[0] : ''; + return new URL(url, 'http://localhost:3000').pathname; + }); + expect(calls).not.toContain('/api/storage/migrate'); + }); + + test('sets migration flag even when no data to migrate', async () => { + // All storage getters return empty + mockStorage.getConnections.mockReturnValue([]); + mockStorage.getActiveConnectionId.mockReturnValue(null); + + setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + expect(localStorage.getItem('libredb_server_migrated')).not.toBeNull(); + }); + }); + + // ── Pull from server ────────────────────────────────────────────────── + + describe('pull from server', () => { + test('pulls data from server on mount in server mode', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + const fetchMock = setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // /api/storage was called for pull + const calls = (fetchMock.mock.calls as unknown[][]).map((c) => { + const url = typeof c[0] === 'string' ? c[0] : ''; + return new URL(url, 'http://localhost:3000').pathname; + }); + expect(calls).toContain('/api/storage'); + }); + + test('writes server data to localStorage on pull', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.lastSyncedAt).not.toBeNull(); + }); + + // Server returned connections: [{ id: 'server-c1' }] + const stored = localStorage.getItem('libredb_connections'); + expect(stored).not.toBeNull(); + expect(JSON.parse(stored!)).toEqual([{ id: 'server-c1' }]); + }); + + test('sets syncError on pull failure', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + mockGlobalFetch({ + '/api/storage/config': { ok: true, status: 200, json: { provider: 'postgres', serverMode: true } }, + '/api/storage': { ok: false, status: 500, json: { error: 'DB error' } }, + }); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // Pull failed but no syncError for non-ok response (graceful degradation) + // The hook just returns early without setting error for non-ok + expect(result.current.isSyncing).toBe(false); + }); + }); + + // ── Push to server (debounced) ──────────────────────────────────────── + + describe('push to server', () => { + test('pushes collection to server on storage-change event', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + const fetchMock = mockGlobalFetch({ + '/api/storage/config': { ok: true, status: 200, json: { provider: 'postgres', serverMode: true } }, + '/api/storage/migrate': { ok: true, status: 200, json: { ok: true, migrated: [] } }, + '/api/storage': { ok: true, status: 200, json: {} }, + '/api/storage/connections': { ok: true, status: 200, json: { ok: true } }, + }); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // Dispatch storage change event + act(() => { + window.dispatchEvent( + new CustomEvent('libredb-storage-change', { + detail: { collection: 'connections', data: [{ id: 'c1' }] }, + }) + ); + }); + + // Wait for debounce (500ms) + push + await waitFor(() => { + const calls = (fetchMock.mock.calls as unknown[][]).map((c) => { + const url = typeof c[0] === 'string' ? c[0] : ''; + return new URL(url, 'http://localhost:3000').pathname; + }); + return calls.includes('/api/storage/connections'); + }, { timeout: 2000 }); + }); + + test('sets syncError on push failure', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + + // Use a request handler that returns 500 specifically for PUT /connections + const fetchMock = mockGlobalFetch({ + '/api/storage/config': { ok: true, status: 200, json: { provider: 'postgres', serverMode: true } }, + '/api/storage/migrate': { ok: true, status: 200, json: { ok: true, migrated: [] } }, + '/api/storage/connections': { ok: false, status: 500, json: { error: 'Write failed' } }, + '/api/storage': { ok: true, status: 200, json: {} }, + }); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.isServerMode).toBe(true); + }); + + // Ensure isSyncing is done before triggering push + await waitFor(() => { + expect(result.current.isSyncing).toBe(false); + }); + + act(() => { + window.dispatchEvent( + new CustomEvent('libredb-storage-change', { + detail: { collection: 'connections', data: [{ id: 'c1' }] }, + }) + ); + }); + + // Wait for debounce (500ms) + push to complete and set syncError + await waitFor(() => { + expect(result.current.syncError).not.toBeNull(); + }, { timeout: 3000 }); + }); + }); + + // ── Event listener lifecycle ────────────────────────────────────────── + + describe('event listener lifecycle', () => { + test('does not listen for events in local mode', async () => { + setupLocalMode(); + const spy = mock(() => {}); + const origAdd = window.addEventListener.bind(window); + window.addEventListener = mock((...args: Parameters) => { + if (args[0] === 'libredb-storage-change') spy(); + origAdd(...args); + }) as typeof window.addEventListener; + + renderHook(() => useStorageSync()); + + await waitFor(() => { + // Give time for init to complete + }); + + // Event listener for storage change should not be added in local mode + expect(spy).not.toHaveBeenCalled(); + + window.addEventListener = origAdd; + }); + }); + + // ── Initial state ───────────────────────────────────────────────────── + + describe('initial state', () => { + test('returns correct initial state shape', () => { + setupLocalMode(); + const { result } = renderHook(() => useStorageSync()); + + expect(result.current).toEqual({ + isServerMode: false, + isSyncing: false, + lastSyncedAt: null, + syncError: null, + }); + }); + + test('updates lastSyncedAt after successful pull', async () => { + localStorage.setItem('libredb_server_migrated', 'true'); + setupServerMode(); + + const { result } = renderHook(() => useStorageSync()); + + await waitFor(() => { + expect(result.current.lastSyncedAt).not.toBeNull(); + }); + + expect(result.current.lastSyncedAt).toBeInstanceOf(Date); + }); + }); +}); diff --git a/tests/run-components.sh b/tests/run-components.sh index 4214351..a70b6d1 100755 --- a/tests/run-components.sh +++ b/tests/run-components.sh @@ -24,7 +24,7 @@ set -e PASS=0 FAIL=0 -TOTAL_GROUPS=15 +TOTAL_GROUPS=18 EXTRA_BUN_ARGS=("$@") GROUP_INDEX=0 COVERAGE_MODE=0 @@ -66,6 +66,14 @@ run_group() { fi } +# Group 0a: useStorageSync hook (isolated — mocks @/lib/storage which contaminates other hook tests) +run_group "Group 0a: useStorageSync hook" \ + tests/isolated/use-storage-sync.test.ts + +# Group 0b: Factory singleton (isolated — mocks provider modules which contaminates provider unit tests) +run_group "Group 0b: Factory singleton" \ + tests/isolated/factory-singleton.test.ts + # Group 1: Studio (isolated — mocks almost every child component) run_group "Group 1/6: Studio" \ tests/components/Studio.test.tsx @@ -140,13 +148,16 @@ run_group "Group 12/13: MaskingSettings" \ run_group "Group 13/14: SchemaDiff" \ tests/components/SchemaDiff.test.tsx -# Group 15: ConnectionModal Mobile Drawer (isolated - useIsMobile returns true) -run_group "Group 15/15: ConnectionModal Mobile" \ +# Group 16: ConnectionModal Mobile Drawer (isolated - useIsMobile returns true) +run_group "Group 16/16: ConnectionModal Mobile" \ tests/components/ConnectionModal.mobile.test.tsx -# Group 14: All remaining files (safe together) -run_group "Group 14/15: Remaining components" \ - tests/components/DataCharts.test.tsx \ +# Group 14: DataCharts (isolated — mocks @/lib/storage with chart methods) +run_group "Group 14/16: DataCharts" \ + tests/components/DataCharts.test.tsx + +# Group 15: All remaining files (safe together) +run_group "Group 15/16: Remaining components" \ tests/components/QueryEditor.test.tsx \ tests/components/QuerySafetyDialog.test.tsx \ tests/components/QueryHistory.test.tsx \ diff --git a/tests/unit/lib/storage/factory.test.ts b/tests/unit/lib/storage/factory.test.ts new file mode 100644 index 0000000..6d79875 --- /dev/null +++ b/tests/unit/lib/storage/factory.test.ts @@ -0,0 +1,71 @@ +import { describe, test, expect, beforeEach } from 'bun:test'; +import { + getStorageProviderType, + isServerStorageEnabled, + getStorageConfig, +} from '@/lib/storage/factory'; + +// Clean env before every test to prevent leakage +beforeEach(() => { + delete process.env.STORAGE_PROVIDER; +}); + +describe('storage factory: getStorageProviderType', () => { + test('returns "local" when STORAGE_PROVIDER not set', () => { + expect(getStorageProviderType()).toBe('local'); + }); + + test('returns "local" for empty string', () => { + process.env.STORAGE_PROVIDER = ''; + expect(getStorageProviderType()).toBe('local'); + }); + + test('returns "sqlite" when STORAGE_PROVIDER=sqlite', () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + expect(getStorageProviderType()).toBe('sqlite'); + }); + + test('returns "postgres" when STORAGE_PROVIDER=postgres', () => { + process.env.STORAGE_PROVIDER = 'postgres'; + expect(getStorageProviderType()).toBe('postgres'); + }); + + test('returns "local" for unknown values', () => { + process.env.STORAGE_PROVIDER = 'redis'; + expect(getStorageProviderType()).toBe('local'); + }); + + test('is case-insensitive', () => { + process.env.STORAGE_PROVIDER = 'SQLite'; + expect(getStorageProviderType()).toBe('sqlite'); + }); +}); + +describe('storage factory: isServerStorageEnabled', () => { + test('returns false when local', () => { + expect(isServerStorageEnabled()).toBe(false); + }); + + test('returns true for sqlite', () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + expect(isServerStorageEnabled()).toBe(true); + }); + + test('returns true for postgres', () => { + process.env.STORAGE_PROVIDER = 'postgres'; + expect(isServerStorageEnabled()).toBe(true); + }); +}); + +describe('storage factory: getStorageConfig', () => { + test('returns correct shape for local', () => { + const config = getStorageConfig(); + expect(config).toEqual({ provider: 'local', serverMode: false }); + }); + + test('returns correct shape for sqlite', () => { + process.env.STORAGE_PROVIDER = 'sqlite'; + const config = getStorageConfig(); + expect(config).toEqual({ provider: 'sqlite', serverMode: true }); + }); +}); diff --git a/tests/unit/lib/storage/local-storage.test.ts b/tests/unit/lib/storage/local-storage.test.ts new file mode 100644 index 0000000..c908375 --- /dev/null +++ b/tests/unit/lib/storage/local-storage.test.ts @@ -0,0 +1,70 @@ +import { describe, test, expect, beforeEach } from 'bun:test'; + +if (typeof globalThis.window === 'undefined') { + // @ts-expect-error — minimal window stub + globalThis.window = globalThis; +} + +import { readJSON, writeJSON, readString, writeString, remove, getKey } from '@/lib/storage/local-storage'; + +describe('local-storage: getKey', () => { + test('maps known collection names to libredb_ prefix keys', () => { + expect(getKey('connections')).toBe('libredb_connections'); + expect(getKey('history')).toBe('libredb_history'); + expect(getKey('saved_queries')).toBe('libredb_saved_queries'); + expect(getKey('audit_log')).toBe('libredb_audit_log'); + expect(getKey('masking_config')).toBe('libredb_masking_config'); + expect(getKey('threshold_config')).toBe('libredb_threshold_config'); + }); + + test('falls back to libredb_ prefix for unknown collections', () => { + expect(getKey('unknown')).toBe('libredb_unknown'); + }); +}); + +describe('local-storage: readJSON / writeJSON', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('writeJSON / readJSON round-trip', () => { + writeJSON('connections', [{ id: 1 }]); + expect(readJSON<{ id: number }[]>('connections')).toEqual([{ id: 1 }]); + }); + + test('readJSON returns null for non-existent key', () => { + expect(readJSON('nonexistent')).toBeNull(); + }); + + test('readJSON returns null for invalid JSON', () => { + localStorage.setItem('libredb_connections', 'not-json{{{'); + expect(readJSON('connections')).toBeNull(); + }); +}); + +describe('local-storage: readString / writeString', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('writeString / readString round-trip', () => { + writeString('active_connection_id', 'conn-42'); + expect(readString('active_connection_id')).toBe('conn-42'); + }); + + test('readString returns null for non-existent key', () => { + expect(readString('active_connection_id')).toBeNull(); + }); +}); + +describe('local-storage: remove', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('remove deletes the key', () => { + writeString('active_connection_id', 'conn-42'); + remove('active_connection_id'); + expect(readString('active_connection_id')).toBeNull(); + }); +}); diff --git a/tests/unit/lib/storage/providers/postgres.test.ts b/tests/unit/lib/storage/providers/postgres.test.ts new file mode 100644 index 0000000..fab5c62 --- /dev/null +++ b/tests/unit/lib/storage/providers/postgres.test.ts @@ -0,0 +1,231 @@ +import { describe, test, expect, beforeEach, afterEach, mock } from 'bun:test'; +import type { ServerStorageProvider } from '@/lib/storage/types'; + +// ── Mock pg ────────────────────────────────────────────────────────────────── + +/* eslint-disable @typescript-eslint/no-explicit-any */ +const mockQuery = mock(async (..._args: any[]): Promise => ({ rows: [] })); +const mockRelease = mock(() => {}); +const mockEnd = mock(async () => {}); + +const mockClient = { + query: mockQuery, + release: mockRelease, +}; + +const mockPool: Record = { + query: mockQuery, + connect: mock(async () => mockClient), + end: mockEnd, +}; + +mock.module('pg', () => ({ + Pool: mock(() => mockPool), +})); +/* eslint-enable @typescript-eslint/no-explicit-any */ + +import { PostgresStorageProvider } from '@/lib/storage/providers/postgres'; + +describe('PostgresStorageProvider', () => { + let provider: ServerStorageProvider; + + beforeEach(() => { + mockQuery.mockClear(); + mockEnd.mockClear(); + mockRelease.mockClear(); + provider = new PostgresStorageProvider('postgresql://localhost:5432/test'); + }); + + afterEach(async () => { + await provider.close(); + }); + + test('initialize creates table', async () => { + await provider.initialize(); + expect(mockQuery).toHaveBeenCalledTimes(1); + const sql = (mockQuery.mock.calls as unknown[][])[0][0] as string; + expect(sql).toContain('CREATE TABLE IF NOT EXISTS user_storage'); + }); + + test('getAllData returns parsed collections', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ + rows: [ + { collection: 'connections', data: JSON.stringify([{ id: 'c1' }]) }, + { collection: 'history', data: JSON.stringify([{ id: 'h1' }]) }, + ], + }); + + const result = await provider.getAllData('admin@test.com'); + expect(result.connections as unknown).toEqual([{ id: 'c1' }]); + expect(result.history as unknown).toEqual([{ id: 'h1' }]); + }); + + test('getCollection returns null when not found', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ rows: [] }); + + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result).toBeNull(); + }); + + test('getCollection returns parsed data', async () => { + const data = [{ id: 'c1', name: 'Test' }]; + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ + rows: [{ data: JSON.stringify(data) }], + }); + + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result as unknown).toEqual(data); + }); + + test('setCollection calls INSERT with ON CONFLICT', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ rows: [] }); + + await provider.setCollection('admin@test.com', 'connections', []); + + const calls = mockQuery.mock.calls as unknown[][]; + const lastCall = calls[calls.length - 1]; + const sql = lastCall[0] as string; + expect(sql).toContain('INSERT INTO user_storage'); + expect(sql).toContain('ON CONFLICT'); + }); + + test('isHealthy returns true on success', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ rows: [{ ok: 1 }] }); + + expect(await provider.isHealthy()).toBe(true); + }); + + test('isHealthy returns false on error', async () => { + await provider.initialize(); + mockQuery.mockRejectedValueOnce(new Error('Connection lost')); + + expect(await provider.isHealthy()).toBe(false); + }); + + test('close calls pool.end()', async () => { + await provider.initialize(); + await provider.close(); + expect(mockEnd).toHaveBeenCalledTimes(1); + }); + + test('mergeData uses transaction', async () => { + await provider.initialize(); + + const mockClientQuery = mock(async (): Promise<{ rows: unknown[] }> => ({ rows: [] })); + const client = { + query: mockClientQuery, + release: mock(() => {}), + }; + mockPool.connect = mock(async () => client); + + await provider.mergeData('admin@test.com', { + connections: [{ id: 'c1', name: 'Test', type: 'postgres', createdAt: new Date() } as import('@/lib/types').DatabaseConnection], + }); + + const queries = (mockClientQuery.mock.calls as unknown[][]).map((c) => c[0] as string); + expect(queries[0]).toBe('BEGIN'); + expect(queries[queries.length - 1]).toBe('COMMIT'); + }); + + test('mergeData rolls back on error and releases client', async () => { + await provider.initialize(); + + let callCount = 0; + const mockClientQuery = mock(async (sql: string): Promise<{ rows: unknown[] }> => { + callCount++; + // Fail on the INSERT (3rd call: BEGIN, then INSERT fails) + if (callCount === 2) throw new Error('Insert failed'); + return { rows: [] }; + }); + const mockClientRelease = mock(() => {}); + const client = { + query: mockClientQuery, + release: mockClientRelease, + }; + mockPool.connect = mock(async () => client); + + await expect( + provider.mergeData('admin@test.com', { + connections: [{ id: 'c1', name: 'Test', type: 'postgres', createdAt: new Date() } as import('@/lib/types').DatabaseConnection], + }) + ).rejects.toThrow('Insert failed'); + + // ROLLBACK should have been called + const queries = (mockClientQuery.mock.calls as unknown[][]).map((c) => c[0] as string); + expect(queries).toContain('ROLLBACK'); + // Client always released (finally block) + expect(mockClientRelease).toHaveBeenCalledTimes(1); + }); + + test('mergeData only writes provided collections', async () => { + await provider.initialize(); + + const mockClientQuery = mock(async (): Promise<{ rows: unknown[] }> => ({ rows: [] })); + const client = { + query: mockClientQuery, + release: mock(() => {}), + }; + mockPool.connect = mock(async () => client); + + await provider.mergeData('admin@test.com', { + connections: [{ id: 'c1', name: 'Test', type: 'postgres', createdAt: new Date() } as import('@/lib/types').DatabaseConnection], + }); + + const queries = (mockClientQuery.mock.calls as unknown[][]).map((c) => c[0] as string); + // BEGIN + 1 INSERT + COMMIT = 3 queries + expect(queries.length).toBe(3); + expect(queries[0]).toBe('BEGIN'); + expect(queries[1]).toContain('INSERT INTO user_storage'); + expect(queries[2]).toBe('COMMIT'); + }); + + test('getCollection returns null for corrupted JSON', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ + rows: [{ data: 'invalid-json{{{' }], + }); + + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result).toBeNull(); + }); + + test('getAllData skips corrupted JSON rows', async () => { + await provider.initialize(); + mockQuery.mockResolvedValueOnce({ + rows: [ + { collection: 'connections', data: JSON.stringify([{ id: 'c1' }]) }, + { collection: 'history', data: 'corrupted{{{' }, + ], + }); + + const result = await provider.getAllData('admin@test.com'); + expect(result.connections as unknown).toEqual([{ id: 'c1' }]); + expect(result.history).toBeUndefined(); + }); + + test('initialize throws when no connection string', async () => { + const origEnv = process.env.STORAGE_POSTGRES_URL; + delete process.env.STORAGE_POSTGRES_URL; + try { + const noUrlProvider = new PostgresStorageProvider(''); + await expect(noUrlProvider.initialize()).rejects.toThrow('STORAGE_POSTGRES_URL is required'); + } finally { + if (origEnv !== undefined) process.env.STORAGE_POSTGRES_URL = origEnv; + } + }); + + test('close on uninitialized provider does not throw', async () => { + const freshProvider = new PostgresStorageProvider('postgresql://localhost/test'); + await expect(freshProvider.close()).resolves.toBeUndefined(); + }); + + test('ensurePool throws when not initialized', async () => { + const freshProvider = new PostgresStorageProvider('postgresql://localhost/test'); + await expect(freshProvider.getAllData('test@test.com')).rejects.toThrow('not initialized'); + }); +}); diff --git a/tests/unit/lib/storage/providers/sqlite.test.ts b/tests/unit/lib/storage/providers/sqlite.test.ts new file mode 100644 index 0000000..223f034 --- /dev/null +++ b/tests/unit/lib/storage/providers/sqlite.test.ts @@ -0,0 +1,232 @@ +import { describe, test, expect, beforeEach, afterEach, mock } from 'bun:test'; +import type { ServerStorageProvider } from '@/lib/storage/types'; + +// ── Mock better-sqlite3 ───────────────────────────────────────────────────── + +/* eslint-disable @typescript-eslint/no-explicit-any */ +const mockPrepare = mock((): any => ({ + all: mock((): any[] => []), + get: mock((): any => undefined), + run: mock((..._args: any[]) => {}), +})); +const mockExec = mock((..._args: any[]) => {}); +const mockPragma = mock((..._args: any[]) => {}); +const mockClose = mock(() => {}); + +const mockDbInstance = { + prepare: mockPrepare, + exec: mockExec, + pragma: mockPragma, + close: mockClose, + transaction: mock((fn: () => void) => fn), +}; + +mock.module('better-sqlite3', () => ({ + default: mock(() => mockDbInstance), +})); + +// Mock fs and path for directory creation +mock.module('fs', () => ({ + existsSync: mock(() => true), + mkdirSync: mock(() => {}), +})); + +mock.module('path', () => ({ + dirname: mock((p: string) => p.replace(/\/[^/]*$/, '')), +})); +/* eslint-enable @typescript-eslint/no-explicit-any */ + +import { SQLiteStorageProvider } from '@/lib/storage/providers/sqlite'; + +describe('SQLiteStorageProvider', () => { + let provider: ServerStorageProvider; + + beforeEach(() => { + mockPrepare.mockClear(); + mockExec.mockClear(); + mockPragma.mockClear(); + mockClose.mockClear(); + provider = new SQLiteStorageProvider(':memory:'); + }); + + afterEach(async () => { + await provider.close(); + }); + + test('initialize creates table and enables WAL', async () => { + await provider.initialize(); + expect(mockPragma).toHaveBeenCalledWith('journal_mode = WAL'); + expect(mockExec).toHaveBeenCalledTimes(1); + const sql = (mockExec.mock.calls as unknown[][])[0][0] as string; + expect(sql).toContain('CREATE TABLE IF NOT EXISTS user_storage'); + }); + + test('getAllData returns parsed collections', async () => { + const mockRows = [ + { collection: 'connections', data: JSON.stringify([{ id: 'c1' }]) }, + { collection: 'history', data: JSON.stringify([{ id: 'h1' }]) }, + ]; + mockPrepare.mockReturnValue({ + all: mock(() => mockRows), + get: mock(() => undefined), + run: mock(() => {}), + }); + + await provider.initialize(); + const result = await provider.getAllData('admin@test.com'); + + expect(result.connections as unknown).toEqual([{ id: 'c1' }]); + expect(result.history as unknown).toEqual([{ id: 'h1' }]); + }); + + test('getCollection returns null when not found', async () => { + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => undefined), + run: mock(() => {}), + }); + + await provider.initialize(); + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result).toBeNull(); + }); + + test('getCollection returns parsed data', async () => { + const data = [{ id: 'c1', name: 'Test' }]; + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => ({ data: JSON.stringify(data) })), + run: mock(() => {}), + }); + + await provider.initialize(); + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result as unknown).toEqual(data); + }); + + test('setCollection calls INSERT OR REPLACE', async () => { + const mockRun = mock((..._args: unknown[]) => {}); + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => undefined), + run: mockRun, + }); + + await provider.initialize(); + await provider.setCollection('admin@test.com', 'connections', []); + + expect(mockRun).toHaveBeenCalled(); + const args = (mockRun.mock.calls as unknown[][])[0]; + expect(args[0]).toBe('admin@test.com'); + expect(args[1]).toBe('connections'); + }); + + test('isHealthy returns true when db works', async () => { + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => ({ ok: 1 })), + run: mock(() => {}), + }); + + await provider.initialize(); + expect(await provider.isHealthy()).toBe(true); + }); + + test('close calls db.close()', async () => { + await provider.initialize(); + await provider.close(); + expect(mockClose).toHaveBeenCalledTimes(1); + }); + + test('mergeData uses transaction', async () => { + const mockRun = mock((..._args: unknown[]) => {}); + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => undefined), + run: mockRun, + }); + + const txFn = mock((fn: () => void) => fn); + mockDbInstance.transaction = txFn; + + await provider.initialize(); + await provider.mergeData('admin@test.com', { + connections: [{ id: 'c1', name: 'DB', type: 'postgres', host: 'localhost', port: 5432, createdAt: new Date() }] as import('@/lib/types').DatabaseConnection[], + history: [{ id: 'h1', connectionId: 'c1', query: 'SELECT 1', executionTime: 10, status: 'success', executedAt: new Date() }] as import('@/lib/types').QueryHistoryItem[], + }); + + // Transaction wrapper was called + expect(txFn).toHaveBeenCalledTimes(1); + // run was called for each provided collection + expect(mockRun.mock.calls.length).toBeGreaterThanOrEqual(2); + }); + + test('mergeData only writes provided collections', async () => { + const mockRun = mock((..._args: unknown[]) => {}); + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => undefined), + run: mockRun, + }); + mockDbInstance.transaction = mock((fn: () => void) => fn); + + await provider.initialize(); + await provider.mergeData('admin@test.com', { + connections: [{ id: 'c1', name: 'DB', type: 'postgres', host: 'localhost', port: 5432, createdAt: new Date() }] as import('@/lib/types').DatabaseConnection[], + }); + + // Only connections was provided, so only 1 run call for data + expect(mockRun).toHaveBeenCalledTimes(1); + const args = (mockRun.mock.calls as unknown[][])[0]; + expect(args[1]).toBe('connections'); + }); + + test('isHealthy returns false on error', async () => { + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => { throw new Error('DB crashed'); }), + run: mock(() => {}), + }); + + await provider.initialize(); + expect(await provider.isHealthy()).toBe(false); + }); + + test('getCollection returns null for corrupted JSON', async () => { + mockPrepare.mockReturnValue({ + all: mock(() => []), + get: mock(() => ({ data: 'not-valid-json{{{' })), + run: mock(() => {}), + }); + + await provider.initialize(); + const result = await provider.getCollection('admin@test.com', 'connections'); + expect(result).toBeNull(); + }); + + test('getAllData skips corrupted JSON rows', async () => { + mockPrepare.mockReturnValue({ + all: mock(() => [ + { collection: 'connections', data: JSON.stringify([{ id: 'c1' }]) }, + { collection: 'history', data: 'corrupted{{{' }, + ]), + get: mock(() => undefined), + run: mock(() => {}), + }); + + await provider.initialize(); + const result = await provider.getAllData('admin@test.com'); + expect(result.connections as unknown).toEqual([{ id: 'c1' }]); + expect(result.history).toBeUndefined(); + }); + + test('close on uninitialized provider does not throw', async () => { + const freshProvider = new SQLiteStorageProvider(':memory:'); + await expect(freshProvider.close()).resolves.toBeUndefined(); + }); + + test('ensureDb throws when not initialized', async () => { + const freshProvider = new SQLiteStorageProvider(':memory:'); + await expect(freshProvider.getAllData('test@test.com')).rejects.toThrow('not initialized'); + }); +}); diff --git a/tests/unit/lib/storage/storage-facade-extended.test.ts b/tests/unit/lib/storage/storage-facade-extended.test.ts new file mode 100644 index 0000000..8904a39 --- /dev/null +++ b/tests/unit/lib/storage/storage-facade-extended.test.ts @@ -0,0 +1,331 @@ +import { describe, test, expect, beforeEach } from 'bun:test'; + +// Ensure `typeof window !== 'undefined'` passes +if (typeof globalThis.window === 'undefined') { + // @ts-expect-error — minimal window stub + globalThis.window = globalThis; +} + +import { storage } from '@/lib/storage'; +import type { QueryHistoryItem, SchemaSnapshot } from '@/lib/types'; + +// ── Helpers ───────────────────────────────────────────────────────────────── + +function makeHistoryItem(overrides: Partial = {}): QueryHistoryItem { + return { + id: `h-${Math.random().toString(36).slice(2, 8)}`, + connectionId: 'conn-1', + query: 'SELECT 1', + executionTime: 42, + status: 'success', + executedAt: new Date(), + ...overrides, + }; +} + +function makeSnapshot(overrides: Partial = {}): SchemaSnapshot { + return { + id: `snap-${Math.random().toString(36).slice(2, 8)}`, + connectionId: 'conn-1', + connectionName: 'Test DB', + databaseType: 'postgres', + schema: [], + createdAt: new Date(), + ...overrides, + }; +} + +// ── MongoDB JSON round-trip ───────────────────────────────────────────────── + +describe('storage facade: MongoDB JSON round-trip', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('MongoDB JSON query survives addToHistory round-trip', () => { + const mongoQuery = JSON.stringify({ + collection: 'users', + operation: 'find', + filter: { status: 'active', age: { $gt: 18 } }, + options: { limit: 50, sort: { name: 1 } }, + }); + + storage.addToHistory(makeHistoryItem({ id: 'mongo-1', query: mongoQuery })); + const history = storage.getHistory(); + + expect(history.length).toBe(1); + expect(history[0].query).toBe(mongoQuery); + + // Verify the inner JSON is valid and parseable + const parsed = JSON.parse(history[0].query); + expect(parsed.collection).toBe('users'); + expect(parsed.filter.age.$gt).toBe(18); + }); + + test('nested JSON with special characters round-trips correctly', () => { + const complexQuery = JSON.stringify({ + collection: 'logs', + operation: 'aggregate', + pipeline: [ + { $match: { message: { $regex: 'error.*"fatal"' } } }, + { $group: { _id: '$level', count: { $sum: 1 } } }, + { $sort: { count: -1 } }, + ], + }); + + storage.addToHistory(makeHistoryItem({ id: 'mongo-2', query: complexQuery })); + const result = storage.getHistory(); + + expect(result[0].query).toBe(complexQuery); + const parsed = JSON.parse(result[0].query); + expect(parsed.pipeline.length).toBe(3); + }); + + test('multiple MongoDB queries in history maintain separate JSON integrity', () => { + const queries = [ + JSON.stringify({ collection: 'users', operation: 'find', filter: {} }), + JSON.stringify({ collection: 'orders', operation: 'insertOne', document: { item: 'laptop', price: 999.99 } }), + JSON.stringify({ collection: 'products', operation: 'updateMany', filter: { stock: 0 }, update: { $set: { available: false } } }), + ]; + + queries.forEach((q, i) => { + storage.addToHistory(makeHistoryItem({ id: `m-${i}`, query: q })); + }); + + const history = storage.getHistory(); + expect(history.length).toBe(3); + + // History is prepended, so reverse order + for (let i = 0; i < 3; i++) { + const parsed = JSON.parse(history[i].query); + expect(parsed.collection).toBeDefined(); + expect(parsed.operation).toBeDefined(); + } + }); + + test('mixed SQL and MongoDB queries in same history', () => { + storage.addToHistory(makeHistoryItem({ id: 'sql-1', query: 'SELECT * FROM users WHERE name = \'O\'\'Brien\'' })); + storage.addToHistory(makeHistoryItem({ + id: 'mongo-1', + query: JSON.stringify({ collection: 'users', operation: 'find', filter: { name: "O'Brien" } }), + })); + storage.addToHistory(makeHistoryItem({ id: 'sql-2', query: 'INSERT INTO logs (msg) VALUES (\'{"key": "value"}\')' })); + + const history = storage.getHistory(); + expect(history.length).toBe(3); + + // SQL with embedded JSON string + expect(history[0].query).toContain('{"key": "value"}'); + // MongoDB JSON + const mongoParsed = JSON.parse(history[1].query); + expect(mongoParsed.filter.name).toBe("O'Brien"); + // SQL with quotes + expect(history[2].query).toContain("O''Brien"); + }); + + test('Redis JSON command round-trips correctly', () => { + const redisQuery = JSON.stringify({ + command: 'SET', + args: ['mykey', '{"nested": "json", "arr": [1,2,3]}'], + }); + + storage.addToHistory(makeHistoryItem({ id: 'redis-1', query: redisQuery })); + const result = storage.getHistory(); + + expect(result[0].query).toBe(redisQuery); + const parsed = JSON.parse(result[0].query); + expect(parsed.command).toBe('SET'); + // The nested JSON in args should also survive + const nestedJson = JSON.parse(parsed.args[1]); + expect(nestedJson.nested).toBe('json'); + expect(nestedJson.arr).toEqual([1, 2, 3]); + }); +}); + +// ── Buffer boundary tests ─────────────────────────────────────────────────── + +describe('storage facade: history buffer boundary (500)', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('trims history to 500 when adding item over limit', () => { + // Fill to exactly 500 + for (let i = 0; i < 500; i++) { + storage.addToHistory(makeHistoryItem({ id: `h-${i}` })); + } + expect(storage.getHistory().length).toBe(500); + + // Add one more — should trim oldest + storage.addToHistory(makeHistoryItem({ id: 'h-new' })); + const history = storage.getHistory(); + expect(history.length).toBe(500); + expect(history[0].id).toBe('h-new'); // newest first + }); + + test('exactly 500 items are kept without trimming', () => { + for (let i = 0; i < 500; i++) { + storage.addToHistory(makeHistoryItem({ id: `h-${i}` })); + } + expect(storage.getHistory().length).toBe(500); + }); + + test('oldest items are dropped when buffer overflows', () => { + // Fill with 500 items + for (let i = 0; i < 500; i++) { + storage.addToHistory(makeHistoryItem({ id: `old-${i}` })); + } + + // Add 3 new items + for (let i = 0; i < 3; i++) { + storage.addToHistory(makeHistoryItem({ id: `new-${i}` })); + } + + const history = storage.getHistory(); + expect(history.length).toBe(500); + // Newest 3 should be at the top + expect(history[0].id).toBe('new-2'); + expect(history[1].id).toBe('new-1'); + expect(history[2].id).toBe('new-0'); + // Oldest should be dropped + expect(history.find((h) => h.id === 'old-0')).toBeUndefined(); + expect(history.find((h) => h.id === 'old-1')).toBeUndefined(); + expect(history.find((h) => h.id === 'old-2')).toBeUndefined(); + }); +}); + +describe('storage facade: schema snapshot buffer boundary (50)', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('trims snapshots to 50 when over limit', () => { + for (let i = 0; i < 50; i++) { + storage.saveSchemaSnapshot(makeSnapshot({ id: `snap-${i}` })); + } + expect(storage.getSchemaSnapshots().length).toBe(50); + + // Add one more + storage.saveSchemaSnapshot(makeSnapshot({ id: 'snap-new' })); + const snapshots = storage.getSchemaSnapshots(); + expect(snapshots.length).toBe(50); + // Oldest should be dropped (sliced from end, keeps last 50) + expect(snapshots.find((s) => s.id === 'snap-0')).toBeUndefined(); + }); + + test('exactly 50 snapshots kept without trimming', () => { + for (let i = 0; i < 50; i++) { + storage.saveSchemaSnapshot(makeSnapshot({ id: `snap-${i}` })); + } + expect(storage.getSchemaSnapshots().length).toBe(50); + }); +}); + +describe('storage facade: audit log buffer boundary (1000)', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('trims audit log to 1000 events', () => { + const events = Array.from({ length: 1050 }, (_, i) => ({ + id: `evt-${i}`, + timestamp: new Date().toISOString(), + type: 'query_execution' as const, + action: 'SELECT', + target: 'users', + user: 'admin', + result: 'success' as const, + })); + + storage.saveAuditLog(events); + const result = storage.getAuditLog(); + expect(result.length).toBe(1000); + // Keeps the last 1000 (newest) + expect(result[0].id).toBe('evt-50'); + expect(result[999].id).toBe('evt-1049'); + }); + + test('exactly 1000 events kept without trimming', () => { + const events = Array.from({ length: 1000 }, (_, i) => ({ + id: `evt-${i}`, + timestamp: new Date().toISOString(), + type: 'query_execution' as const, + action: 'SELECT', + target: 'users', + user: 'admin', + result: 'success' as const, + })); + + storage.saveAuditLog(events); + expect(storage.getAuditLog().length).toBe(1000); + }); +}); + +// ── Delete non-existent ID ────────────────────────────────────────────────── + +describe('storage facade: delete non-existent items', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('deleteConnection with non-existent id does not throw', () => { + expect(() => storage.deleteConnection('non-existent')).not.toThrow(); + }); + + test('deleteSavedQuery with non-existent id does not throw', () => { + expect(() => storage.deleteSavedQuery('non-existent')).not.toThrow(); + }); + + test('deleteSchemaSnapshot with non-existent id does not throw', () => { + expect(() => storage.deleteSchemaSnapshot('non-existent')).not.toThrow(); + }); + + test('deleteChart with non-existent id does not throw', () => { + expect(() => storage.deleteChart('non-existent')).not.toThrow(); + }); + + test('deleteConnection does not affect existing items', () => { + storage.saveConnection({ + id: 'c1', name: 'DB1', type: 'postgres', host: 'localhost', port: 5432, createdAt: new Date(), + }); + storage.deleteConnection('non-existent'); + expect(storage.getConnections().length).toBe(1); + expect(storage.getConnections()[0].id).toBe('c1'); + }); +}); + +// ── Event dispatch for all mutation methods ───────────────────────────────── + +describe('storage facade: event dispatch completeness', () => { + beforeEach(() => { + localStorage.clear(); + }); + + const mutations: Array<{ name: string; fn: () => void; expectedCollection: string }> = [ + { name: 'clearHistory', fn: () => storage.clearHistory(), expectedCollection: 'history' }, + { name: 'saveQuery', fn: () => storage.saveQuery({ id: 'q1', name: 'Test', query: 'SELECT 1', connectionType: 'postgres', createdAt: new Date(), updatedAt: new Date() }), expectedCollection: 'saved_queries' }, + { name: 'deleteSavedQuery', fn: () => storage.deleteSavedQuery('q1'), expectedCollection: 'saved_queries' }, + { name: 'saveSchemaSnapshot', fn: () => storage.saveSchemaSnapshot(makeSnapshot()), expectedCollection: 'schema_snapshots' }, + { name: 'deleteSchemaSnapshot', fn: () => storage.deleteSchemaSnapshot('snap-1'), expectedCollection: 'schema_snapshots' }, + { name: 'saveChart', fn: () => storage.saveChart({ id: 'ch1', name: 'Chart', chartType: 'bar', xAxis: 'x', yAxis: ['y'], createdAt: new Date() }), expectedCollection: 'saved_charts' }, + { name: 'deleteChart', fn: () => storage.deleteChart('ch1'), expectedCollection: 'saved_charts' }, + { name: 'saveAuditLog', fn: () => storage.saveAuditLog([]), expectedCollection: 'audit_log' }, + { name: 'saveMaskingConfig', fn: () => storage.saveMaskingConfig({ enabled: true, patterns: [], roleSettings: { admin: { canToggle: true, canReveal: true }, user: { canToggle: false, canReveal: false } } }), expectedCollection: 'masking_config' }, + { name: 'saveThresholdConfig', fn: () => storage.saveThresholdConfig([]), expectedCollection: 'threshold_config' }, + ]; + + for (const { name, fn, expectedCollection } of mutations) { + test(`${name} dispatches event for '${expectedCollection}'`, () => { + let captured: CustomEvent | null = null; + const handler = (e: Event) => { captured = e as CustomEvent; }; + window.addEventListener('libredb-storage-change', handler); + + fn(); + + expect(captured).not.toBeNull(); + expect(captured!.detail.collection).toBe(expectedCollection); + + window.removeEventListener('libredb-storage-change', handler); + }); + } +}); diff --git a/tests/unit/lib/storage/storage-facade.test.ts b/tests/unit/lib/storage/storage-facade.test.ts new file mode 100644 index 0000000..0ee240d --- /dev/null +++ b/tests/unit/lib/storage/storage-facade.test.ts @@ -0,0 +1,184 @@ +import { describe, test, expect, beforeEach, mock } from 'bun:test'; + +// Ensure `typeof window !== 'undefined'` passes +if (typeof globalThis.window === 'undefined') { + // @ts-expect-error — minimal window stub + globalThis.window = globalThis; +} + +import { storage } from '@/lib/storage'; +import type { DatabaseConnection } from '@/lib/types'; +import type { AuditEvent } from '@/lib/audit'; +import type { MaskingConfig } from '@/lib/data-masking'; +import type { ThresholdConfig } from '@/lib/monitoring-thresholds'; + +// ── Helpers ───────────────────────────────────────────────────────────────── + +function makeConnection(overrides: Partial = {}): DatabaseConnection { + return { + id: 'conn-1', + name: 'Test DB', + type: 'postgres', + host: 'localhost', + port: 5432, + createdAt: new Date('2025-01-01'), + ...overrides, + }; +} + +function makeAuditEvent(overrides: Partial = {}): AuditEvent { + return { + id: 'evt-1', + timestamp: '2025-01-01T00:00:00Z', + type: 'query_execution', + action: 'SELECT', + target: 'users', + user: 'admin', + result: 'success', + ...overrides, + }; +} + +// ── CustomEvent dispatch ───────────────────────────────────────────────────── + +describe('storage facade: CustomEvent dispatch', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('saveConnection dispatches libredb-storage-change event', () => { + let captured: CustomEvent | null = null; + const handler = (e: Event) => { captured = e as CustomEvent; }; + window.addEventListener('libredb-storage-change', handler); + + storage.saveConnection(makeConnection()); + + expect(captured).not.toBeNull(); + expect(captured!.detail.collection).toBe('connections'); + + window.removeEventListener('libredb-storage-change', handler); + }); + + test('deleteConnection dispatches event', () => { + storage.saveConnection(makeConnection()); + const handler = mock(() => {}); + window.addEventListener('libredb-storage-change', handler); + + storage.deleteConnection('conn-1'); + + expect(handler).toHaveBeenCalledTimes(1); + window.removeEventListener('libredb-storage-change', handler); + }); + + test('addToHistory dispatches event', () => { + const handler = mock(() => {}); + window.addEventListener('libredb-storage-change', handler); + + storage.addToHistory({ + id: 'h-1', + connectionId: 'c-1', + query: 'SELECT 1', + executionTime: 42, + status: 'success', + executedAt: new Date(), + }); + + expect(handler).toHaveBeenCalledTimes(1); + window.removeEventListener('libredb-storage-change', handler); + }); + + test('setActiveConnectionId dispatches event', () => { + let captured: CustomEvent | null = null; + const handler = (e: Event) => { captured = e as CustomEvent; }; + window.addEventListener('libredb-storage-change', handler); + + storage.setActiveConnectionId('conn-42'); + + expect(captured).not.toBeNull(); + expect(captured!.detail.collection).toBe('active_connection_id'); + expect(captured!.detail.data).toBe('conn-42'); + + window.removeEventListener('libredb-storage-change', handler); + }); +}); + +// ── Audit log ──────────────────────────────────────────────────────────────── + +describe('storage facade: audit log', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('getAuditLog returns empty array when nothing stored', () => { + expect(storage.getAuditLog()).toEqual([]); + }); + + test('saveAuditLog / getAuditLog round-trip', () => { + const events = [makeAuditEvent({ id: 'e1' }), makeAuditEvent({ id: 'e2' })]; + storage.saveAuditLog(events); + const result = storage.getAuditLog(); + expect(result.length).toBe(2); + expect(result[0].id).toBe('e1'); + }); + + test('saveAuditLog trims to 1000 events', () => { + const events: AuditEvent[] = []; + for (let i = 0; i < 1050; i++) { + events.push(makeAuditEvent({ id: `e-${i}` })); + } + storage.saveAuditLog(events); + expect(storage.getAuditLog().length).toBe(1000); + }); +}); + +// ── Masking config ─────────────────────────────────────────────────────────── + +describe('storage facade: masking config', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('getMaskingConfig returns defaults when nothing stored', () => { + const config = storage.getMaskingConfig(); + expect(config.enabled).toBe(true); + expect(config.patterns.length).toBeGreaterThan(0); + }); + + test('saveMaskingConfig / getMaskingConfig round-trip', () => { + const config: MaskingConfig = { + enabled: false, + patterns: [], + roleSettings: { + admin: { canToggle: true, canReveal: true }, + user: { canToggle: false, canReveal: false }, + }, + }; + storage.saveMaskingConfig(config); + const result = storage.getMaskingConfig(); + expect(result.enabled).toBe(false); + }); +}); + +// ── Threshold config ───────────────────────────────────────────────────────── + +describe('storage facade: threshold config', () => { + beforeEach(() => { + localStorage.clear(); + }); + + test('getThresholdConfig returns defaults when nothing stored', () => { + const config = storage.getThresholdConfig(); + expect(config.length).toBeGreaterThan(0); + expect(config[0].metric).toBe('cacheHitRatio'); + }); + + test('saveThresholdConfig / getThresholdConfig round-trip', () => { + const config: ThresholdConfig[] = [ + { metric: 'custom', warning: 50, critical: 80, direction: 'above', label: 'Custom' }, + ]; + storage.saveThresholdConfig(config); + const result = storage.getThresholdConfig(); + expect(result.length).toBe(1); + expect(result[0].metric).toBe('custom'); + }); +});