diff --git a/CHANGELOG.md b/CHANGELOG.md index 49002a65..f07cec35 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,8 +12,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [1.4.0] — 2026-02-28 +### Breaking Changes + +- **MQTT `HASS_ATTRIBUTES` default changed from `full` to `short`** — This changes Home Assistant entity payloads by default, excluding large SBOM documents, scan vulnerabilities, details, and labels. To retain the previous payload behavior, set `DD_TRIGGER_MQTT_{name}_HASS_ATTRIBUTES=full` explicitly. + ### Added +- **Audit log for container state changes** — External container lifecycle events (start, stop, restart via Portainer or CLI) now generate `container-update` audit entries with the new status, so the audit log reflects all state changes, not just Drydock-initiated actions. ([#120](https://github.com/CodesWhat/drydock/discussions/120)) +- **mTLS client certificate support** — Registry providers now accept `CLIENTCERT` and `CLIENTKEY` options for mutual TLS authentication with private registries that require client certificates. + #### Backend / Core - **Container recent-status API** — `GET /api/containers/recent-status` returns pre-computed update status (`updated`/`pending`/`failed`) per container, replacing the client-side audit log scan and reducing dashboard fetch payload size. @@ -55,6 +62,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **Security vulnerability overview endpoint** — New `GET /api/containers/security/vulnerabilities` returns pre-aggregated vulnerability data grouped by image with severity summaries, so the Security view no longer needs to load all containers. - **MQTT attribute filtering for Home Assistant** — MQTT trigger supports attribute-based filtering for Home Assistant integration, allowing selective publishing based on container attributes. - **Docker Compose post_start env validation** — Docker Compose trigger validates environment variables in `post_start` hooks before execution, preventing runtime errors from missing or invalid env var references. +- **MQTT HASS entity_picture from container icons** — When Home Assistant HASS discovery is enabled, `entity_picture` is now automatically resolved from the container's `dd.display.icon` label. Icons with `sh:`, `hl:`, or `si:` prefixes map to jsDelivr CDN URLs for selfhst, homarr-labs, and simple-icons respectively. Direct HTTP/HTTPS URLs pass through unchanged. ([#138](https://github.com/CodesWhat/drydock/issues/138)) +- **`dd.display.picture` container label** — New label to override the MQTT HASS `entity_picture` URL directly. Takes precedence over icon-derived pictures when set to an HTTP/HTTPS URL. #### UI / Dashboard @@ -88,6 +97,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **Rollback confirmation dialog** — Container rollback actions now require explicit confirmation through a danger-severity dialog before restoring from backup. - **Update confirmation dialog** — Container update actions now require explicit confirmation through a dialog before triggering an update. - **SHA-1 hash deprecation banner** — Dashboard shows a dismissible deprecation banner when legacy SHA-1 password hashes are detected, prompting migration to argon2id. +- **Config tab URL deep-linking** — Config view tab selection syncs to the URL query parameter, enabling shareable direct links to specific config tabs. ### Changed @@ -119,9 +129,26 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **6 color themes** — Replaced original Drydock theme with popular editor palettes: One Dark, GitHub, Dracula, Catppuccin, Gruvbox, and Ayu. Each with dark and light variants. - **Argon2id password hashing** — Basic auth now uses argon2id (OWASP recommended) via Node.js built-in `crypto.argon2Sync()` instead of scrypt for password hashing. Default parameters: 64 MiB memory, 3 passes, parallelism 4. - **PUT `/api/settings` deprecated** — `PUT /api/settings` now returns RFC 9745 `Deprecation` and RFC 8594 `Sunset` headers. Use `PATCH /api/settings` for partial updates. PUT alias removal targeted for v1.5.0. +- **Basic auth argon2id PHC compatibility** — Basic authentication now accepts PHC-format argon2id hashes (`$argon2id$v=19$m=...,t=...,p=...$salt$hash`) in addition to the existing Drydock `argon2id$memory$passes$parallelism$salt$hash` format. Hash-generation guidance now recommends the standard `argon2` CLI command first, with Node.js as a secondary option. +- **Borderless UI redesign** — Removed borders from all views, config tabs, detail panels, and shared data components for a cleaner visual appearance. +- **Dashboard version column alignment** — Version column in the dashboard updates table is now left-aligned for better readability. +- **Detail panel expand button redesigned** — Full-page expand button in the detail panel now uses a frame-corners icon instead of the previous maximize icon. +- **Sidebar active indicator removed** — Removed the blue active indicator bar from sidebar navigation items for a cleaner look. ### Fixed +- **Log level setting had no effect** — `DD_LOG_LEVEL=debug` was correctly parsed but debug messages were silently dropped because pino's multistream destinations defaulted to `info` level. Stream destinations now inherit the configured log level. ([#134](https://github.com/CodesWhat/drydock/issues/134)) +- **Server feature flags not loaded after login** — Feature flags (`containeractions`, `delete`) were permanently stuck as disabled when authentication was required, because the pre-login bootstrap fetch failure marked the flags as "loaded" and never retried. Now failed fetches allow automatic retry after login. ([#120](https://github.com/CodesWhat/drydock/discussions/120)) +- **Compose trigger silently skips containers** — Multiple failure paths in the compose trigger were logged at `debug` level, making it nearly impossible to diagnose why a trigger reports success but containers don't update. Key diagnostic messages (compose file mismatch, label inspect failure, no containers matched) promoted to `warn` level, and the "already up to date" message now includes container names. ([#84](https://github.com/CodesWhat/drydock/discussions/84)) +- **Fallback icon cached permanently** — The Docker placeholder icon was served with `immutable` cache headers, causing browsers to cache it permanently even after the real provider icon becomes available. Fallback responses now use `no-store`. +- **Basic auth upgrade compatibility restored** — Basic auth now accepts legacy v1.3.9 Basic auth hashes (`{SHA}`, `$apr1$`/`$1$`, `crypt`, and plain fallback) to preserve smooth upgrades. Legacy formats remain deprecated and continue showing a migration banner, with removal still planned for v1.6.0. +- **Compose trigger rejects lowercase env var keys** — Configuration keys like `COMPOSEFILEONCE`, `DIGESTPINNING`, and `RECONCILIATIONMODE` were lowercased by the env parser but the Joi schema expected camelCase. Schema now maps lowercase keys to their camelCase equivalents. ([#120](https://github.com/CodesWhat/drydock/discussions/120)) +- **Compose trigger strips docker.io prefix** — When a compose file uses an explicit `docker.io/` registry prefix, compose mutations now preserve it instead of stripping it to a bare library path. ([#120](https://github.com/CodesWhat/drydock/discussions/120)) +- **Compose trigger fails when FILE points to directory** — `DD_TRIGGER_DOCKERCOMPOSE_{name}_FILE` now accepts directories, automatically probing for `compose.yaml`, `compose.yml`, `docker-compose.yaml`, or `docker-compose.yml` inside the directory. ([#84](https://github.com/CodesWhat/drydock/discussions/84)) +- **Container healthcheck fails with TLS backend** — The Dockerfile healthcheck now detects `DD_SERVER_TLS_ENABLED=true` and switches to `curl --insecure https://` for self-signed certificates. Also skips the healthcheck entirely when `DD_SERVER_ENABLED=false`. ([#120](https://github.com/CodesWhat/drydock/discussions/120)) +- **Agent CAFILE ignored without CERTFILE** — The agent subsystem now loads the CA certificate from `CAFILE` even when `CERTFILE` is not provided, fixing TLS verification for agents behind reverse proxies with custom CA chains. +- **Service worker accepts cross-origin postMessage** — The demo service worker now validates `postMessage` origins against the current host, preventing potential cross-origin message injection. + - **Action buttons disable and show spinner during in-progress actions** — Container action buttons (Stop, Start, Restart, Update, Delete) now show a disabled state with a spinner while the action runs in the background, providing clear visual feedback. The confirm dialog closes immediately on accept instead of blocking the UI. - **Command palette clears stale filter on navigation** — Navigating to a container via Ctrl+K search now clears the active `filterKind`, preventing stale filter state from hiding the navigated container. - **Manual update button works with compose triggers** — The update container endpoint now searches for both `docker` and `dockercompose` trigger types, matching the existing preview endpoint behavior. Previously, users with only a compose trigger saw "No docker trigger found for this container". @@ -186,6 +213,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **Backup retention on failed updates** — Backup entries are now pruned on the failure path, not just after successful updates, preventing indefinite accumulation of stale backups. - **Backup pruning with undefined maxCount** — `pruneOldBackups()` no longer deletes all backups when `maxCount` is `undefined` (e.g. when `DD_BACKUPCOUNT` is not configured). Now correctly no-ops on invalid or non-finite values. - **Auto-rollback audit fromVersion accuracy** — Rollback audit entries now correctly record `fromVersion` as the failing new image tag (via `updateKind.remoteValue`) instead of the pre-update old tag. +- **HASS entity_picture URL broken after logo rename** — MQTT HASS discovery payload referenced a renamed logo file (`drydock.png` instead of `whale-logo.png`), causing missing entity pictures in Home Assistant. ([#138](https://github.com/CodesWhat/drydock/issues/138)) +- **Watcher crashes on containers with empty names** — Docker watcher's same-name deduplication filter threw errors when containers had empty or missing names. Now skips deduplication for unnamed containers. +- **Container names not reconciled after external recreate** — Containers recreated externally (via Portainer or `docker compose up`) retained stale names in the store until the next full poll cycle. Now reconciles container names immediately on detection. +- **Nested icon prefixes fail proxy request** — Icon proxy rejected icons with doubled prefixes like `mdi:mdi-docker`. Now normalizes nested prefixes before proxying. +- **Colon-separated icon prefixes rejected** — `dd.display.icon` labels using colon separators (e.g., `sh:nextcloud`) were rejected by the API validation pattern. Validation now accepts colon-prefixed icon identifiers. +- **Bouncer-blocked state missing from container details** — Container detail views didn't reflect bouncer-blocked status. Now correctly wires the blocked state into detail panel display. ### Security @@ -227,6 +260,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **Identity-aware rate limit keying** — Opt-in `DD_SERVER_RATELIMIT_IDENTITYKEYING=true` keys authenticated route rate limits by session/username instead of IP, preventing collisions for multiple users behind shared proxies. Unauthenticated routes remain IP-keyed. Disabled by default. - **Reactive server feature flags in UI** — Container action buttons (update, rollback, scan, triggers) are now gated by server-side feature flags via a `useServerFeatures` composable. When features like `DD_SERVER_FEATURE_CONTAINERACTIONS` are disabled, buttons show a disabled state with tooltip explaining why instead of silently failing at runtime. - **Compose trigger hardening** — Auto compose file detection from container labels (`com.docker.compose.project.config_files`) with Docker inspect fallback, pre-commit `docker compose config --quiet` validation before writes, compose file reconciliation (warn/block modes for runtime vs compose image drift), optional digest pinning (`DIGESTPINNING` trigger config), compose-file-once batch mode for multi-service stacks, multi-file compose chain awareness with deterministic writable target selection, compose metadata in update preview API, and compose file path display in container detail UI. +- **Unsupported hash formats fail closed** — Basic auth now rejects unsupported hash formats instead of falling through to plaintext comparison, preventing accidental plaintext password acceptance. ### Performance @@ -706,7 +740,6 @@ Remaining upstream-only changes (not ported — not applicable to drydock): | Fix codeberg tests | Covered by drydock's own tests | | Update changelog | Upstream-specific | -[Unreleased]: https://github.com/CodesWhat/drydock/compare/v1.4.0...HEAD [1.4.0]: https://github.com/CodesWhat/drydock/compare/v1.3.9...v1.4.0 [1.3.9]: https://github.com/CodesWhat/drydock/compare/v1.3.8...v1.3.9 [1.3.8]: https://github.com/CodesWhat/drydock/compare/v1.3.7...v1.3.8 diff --git a/Dockerfile b/Dockerfile index de37df74..2f34c401 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ ENV WORKDIR=/home/node/app ENV DD_LOG_FORMAT=text ENV DD_VERSION=$DD_VERSION -HEALTHCHECK --interval=30s --timeout=5s CMD ["sh", "-c", "if [ -z \"$DD_SERVER_ENABLED\" ] || [ \"$DD_SERVER_ENABLED\" = 'true' ]; then curl --fail http://localhost:${DD_SERVER_PORT:-3000}/health || exit 1; else exit 0; fi"] +HEALTHCHECK --interval=30s --timeout=5s CMD ["sh", "-c", "if [ -n \"$DD_SERVER_ENABLED\" ] && [ \"$DD_SERVER_ENABLED\" != 'true' ]; then exit 0; fi; if [ \"$DD_SERVER_TLS_ENABLED\" = 'true' ]; then curl --fail --insecure https://localhost:${DD_SERVER_PORT:-3000}/health || exit 1; else curl --fail http://localhost:${DD_SERVER_PORT:-3000}/health || exit 1; fi"] # Install system packages, trivy, and cosign # hadolint ignore=DL3018,DL3028,DL4006 diff --git a/README.md b/README.md index be47edfa..ebd4545d 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@

Version - GHCR pulls + GHCR pulls Docker Hub pulls Quay.io
@@ -52,7 +52,7 @@ - [📖 Documentation](https://drydock.codeswhat.com/docs) - [🚀 Quick Start](#quick-start) -- [📸 Screenshots](#screenshots) +- [📸 Screenshots & Live Demo](#screenshots) - [✨ Features](#features) - [🔌 Supported Integrations](#supported-integrations) - [⚖️ Feature Comparison](#feature-comparison) @@ -61,6 +61,7 @@ - [📖 Documentation](#documentation) - [⭐ Star History](#star-history) - [🔧 Built With](#built-with) +- [🤝 Community QA](#community-qa)


@@ -82,7 +83,7 @@ docker run -d \ > node -e 'const c=require("node:crypto");const s=c.randomBytes(32);const h=c.argon2Sync("argon2id",{message:process.argv[1],nonce:s,memory:65536,passes:3,parallelism:4,tagLength:64});console.log("argon2id$65536$3$4$"+s.toString("base64")+"$"+h.toString("base64"));' "yourpassword" > ``` > -> Legacy `{SHA}` hashes are accepted but deprecated (removed in v1.6.0). MD5/crypt/plain htpasswd hashes are not supported. +> Legacy v1.3.9 Basic auth hashes (`{SHA}`, `$apr1$`/`$1$`, `crypt`, and plain) are accepted for upgrade compatibility but deprecated (removed in v1.6.0). Argon2id is recommended for all new configurations. > Authentication is **required by default**. See the [auth docs](https://drydock.codeswhat.com/docs/configuration/authentications) for OIDC, anonymous access, and other options. > To explicitly allow anonymous access on fresh installs, set `DD_ANONYMOUS_AUTH_CONFIRM=true`. @@ -92,10 +93,8 @@ See the [Quick Start guide](https://drydock.codeswhat.com/docs/quickstart) for D
-

📸 Screenshots

+

📸 Screenshots & Live Demo

-
-Dashboard @@ -106,81 +105,16 @@ See the [Quick Start guide](https://drydock.codeswhat.com/docs/quickstart) for D
LightDashboard Dark
-
-
-Containers - - - - - - - - - -
LightDark
Containers LightContainers Dark
-
+
-
-Container Detail - - - - - - - - - -
LightDark
Container Detail LightContainer Detail Dark
-
+**Why look at screenshots when you can experience it yourself?** -
-Security - - - - - - - - - -
LightDark
Security LightSecurity Dark
-
+Try the Live Demo -
-Login - - - - - - - - - -
LightDark
Login LightLogin Dark
-
+Fully interactive — real UI, mock data, no install required. Runs entirely in-browser. -
-Mobile Responsive - - - - - - - - - - - - - -
Dashboard LightDashboard DarkContainers LightContainers Dark
Mobile Dashboard LightMobile Dashboard DarkMobile Containers LightMobile Containers Dark
-
+

@@ -388,6 +322,7 @@ Drop-in replacement — swap the image, restart, done. All `WUD_*` env vars and | Resource | Link | | --- | --- | | Website | [drydock.codeswhat.com](https://drydock.codeswhat.com/) | +| Live Demo | [demo.drydock.codeswhat.com](https://demo.drydock.codeswhat.com) | | Docs | [drydock.codeswhat.com/docs](https://drydock.codeswhat.com/docs) | | Configuration | [Configuration](https://drydock.codeswhat.com/docs/configuration) | | Quick Start | [Quick Start](https://drydock.codeswhat.com/docs/quickstart) | @@ -431,6 +366,12 @@ Drop-in replacement — swap the image, restart, done. All `WUD_*` env vars and [![Docker](https://img.shields.io/badge/Docker-2496ED?logo=docker&logoColor=fff)](https://www.docker.com/) [![Anthropic](https://img.shields.io/badge/Anthropic-000000?style=flat&logo=anthropic&logoColor=white)](https://claude.ai/) +### Community QA + +Thanks to the users who helped test v1.4.0 release candidates and reported bugs: + +[@RK62](https://github.com/RK62) · [@flederohr](https://github.com/flederohr) · [@rj10rd](https://github.com/rj10rd) · [@larueli](https://github.com/larueli) · [@Waler](https://github.com/Waler) · [@ElVit](https://github.com/ElVit) · [@nchieffo](https://github.com/nchieffo) + --- **[AGPL-3.0 License](LICENSE)** diff --git a/app/agent/AgentClient.test.ts b/app/agent/AgentClient.test.ts index 6ee3c2a4..0122bd32 100644 --- a/app/agent/AgentClient.test.ts +++ b/app/agent/AgentClient.test.ts @@ -122,6 +122,17 @@ describe('AgentClient', () => { expect(c.axiosOptions.httpsAgent).toBeDefined(); }); + test('should create https agent when cafile provided without certfile', () => { + const c = new AgentClient('a', { + host: 'myhost', + port: 4000, + secret: 's', + cafile: '/path/to/ca.pem', + }); + expect(c.baseUrl).toBe('https://myhost:4000'); + expect(c.axiosOptions.httpsAgent).toBeDefined(); + }); + test('should skip cert file read when resolved cert path is empty', () => { mockResolveConfiguredPath.mockImplementation((path, options) => { if (options?.label === 'a cert file') { diff --git a/app/agent/AgentClient.ts b/app/agent/AgentClient.ts index 8a352118..c0b410f6 100644 --- a/app/agent/AgentClient.ts +++ b/app/agent/AgentClient.ts @@ -54,7 +54,7 @@ export class AgentClient { let candidateUrl = `${this.config.host}:${port}`; // Add protocol if not present if (!candidateUrl.startsWith('http')) { - const useHttps = Boolean(this.config.certfile) || port === 443; + const useHttps = Boolean(this.config.certfile) || Boolean(this.config.cafile) || port === 443; candidateUrl = `http${useHttps ? 's' : ''}://${candidateUrl}`; } // Validate the URL to prevent request forgery (CodeQL js/request-forgery) @@ -70,17 +70,17 @@ export class AgentClient { }, }; - if (this.config.certfile) { + if (this.config.certfile || this.config.cafile) { const caPath = this.config.cafile ? resolveConfiguredPath(this.config.cafile, { label: `${name} ca file` }) : undefined; - const certPath = resolveConfiguredPath(this.config.certfile, { - label: `${name} cert file`, - }); + const certPath = this.config.certfile + ? resolveConfiguredPath(this.config.certfile, { label: `${name} cert file` }) + : undefined; const keyPath = this.config.keyfile ? resolveConfiguredPath(this.config.keyfile, { label: `${name} key file` }) : undefined; - // Intentional: mTLS with optional self-signed CA for agent communication + // Intentional: custom CA / mTLS for agent communication // lgtm[js/disabling-certificate-validation] this.axiosOptions.httpsAgent = new https.Agent({ ca: caPath ? fs.readFileSync(caPath) : undefined, diff --git a/app/api/icons.test.ts b/app/api/icons.test.ts index 42c37123..0b57278c 100644 --- a/app/api/icons.test.ts +++ b/app/api/icons.test.ts @@ -139,11 +139,7 @@ describe('Icons Router', () => { mockRename.mockResolvedValue(undefined); mockUnlink.mockResolvedValue(undefined); mockReaddir.mockResolvedValue([]); - mockStat.mockResolvedValue({ - mtimeMs: Date.now(), - size: 1024, - isFile: () => true, - }); + mockStat.mockRejectedValue(new Error('not found')); }); test('should initialize router with icon and cache routes', () => { @@ -186,7 +182,11 @@ describe('Icons Router', () => { }); test('should serve icon from cache when available', async () => { - mockAccess.mockResolvedValue(undefined); + mockStat.mockResolvedValue({ + mtimeMs: Date.now(), + size: 1024, + isFile: () => true, + }); const handler = getHandler(); const res = createResponse(); @@ -359,7 +359,11 @@ describe('Icons Router', () => { }); test('should skip axios when icon appears in cache after first miss', async () => { - mockAccess.mockRejectedValueOnce(new Error('not found')).mockResolvedValueOnce(undefined); + mockStat.mockRejectedValueOnce(new Error('not found')).mockResolvedValueOnce({ + mtimeMs: Date.now(), + size: 1024, + isFile: () => true, + }); const handler = getHandler(); const res = createResponse(); @@ -527,15 +531,18 @@ describe('Icons Router', () => { mockReaddir .mockResolvedValueOnce([{ name: 'simple', isDirectory: () => true }]) .mockResolvedValueOnce(['old.svg', 'docker.svg']); - mockStat.mockImplementation(async (targetPath: string) => { - if (targetPath === '/store/icons/simple/old.svg') { - return { mtimeMs: Date.now() - 1_000, size: 150 * 1024 * 1024, isFile: () => true }; - } - if (targetPath === '/store/icons/simple/docker.svg') { - return { mtimeMs: Date.now(), size: 50 * 1024 * 1024, isFile: () => true }; - } - return { mtimeMs: Date.now(), size: 1024, isFile: () => true }; - }); + mockStat + .mockRejectedValueOnce(new Error('not found')) + .mockRejectedValueOnce(new Error('not found')) + .mockImplementation(async (targetPath: string) => { + if (targetPath === '/store/icons/simple/old.svg') { + return { mtimeMs: Date.now() - 1_000, size: 150 * 1024 * 1024, isFile: () => true }; + } + if (targetPath === '/store/icons/simple/docker.svg') { + return { mtimeMs: Date.now(), size: 50 * 1024 * 1024, isFile: () => true }; + } + return { mtimeMs: Date.now(), size: 1024, isFile: () => true }; + }); const handler = getHandler(); const res = createResponse(); @@ -573,23 +580,26 @@ describe('Icons Router', () => { }, ); - mockStat.mockImplementation((targetPath: string) => { - if (targetPath === '/store/icons/simple/old.svg') { - return oldStatPromise; - } - if (targetPath === '/store/icons/simple/docker.svg') { + mockStat + .mockRejectedValueOnce(new Error('not found')) + .mockRejectedValueOnce(new Error('not found')) + .mockImplementation((targetPath: string) => { + if (targetPath === '/store/icons/simple/old.svg') { + return oldStatPromise; + } + if (targetPath === '/store/icons/simple/docker.svg') { + return Promise.resolve({ + mtimeMs: Date.now(), + size: 50 * 1024 * 1024, + isFile: () => true, + }); + } return Promise.resolve({ mtimeMs: Date.now(), - size: 50 * 1024 * 1024, + size: 1024, isFile: () => true, }); - } - return Promise.resolve({ - mtimeMs: Date.now(), - size: 1024, - isFile: () => true, }); - }); const handler = getHandler(); const res = createResponse(); @@ -603,9 +613,16 @@ describe('Icons Router', () => { res, ); - await vi.waitFor(() => expect(mockStat).toHaveBeenCalled()); + await vi.waitFor(() => { + const statTargets = mockStat.mock.calls.map((call) => call[0]); + expect(statTargets).toEqual( + expect.arrayContaining(['/store/icons/simple/old.svg', '/store/icons/simple/docker.svg']), + ); + }); try { - expect(mockStat).toHaveBeenCalledTimes(2); + // The exact call count depends on cache-hit checks before fetch. What matters + // here is that enforcement stats both entries without waiting for old.svg first. + expect(mockStat).toHaveBeenCalled(); } finally { resolveOldStat?.({ mtimeMs: Date.now() - 1_000, @@ -715,11 +732,14 @@ describe('Icons Router', () => { mockReaddir .mockResolvedValueOnce([{ name: 'simple', isDirectory: () => true }]) .mockResolvedValueOnce(['docker.svg']); - mockStat.mockResolvedValue({ - mtimeMs: Date.now(), - size: 1024, - isFile: () => true, - }); + mockStat + .mockRejectedValueOnce(new Error('not found')) + .mockRejectedValueOnce(new Error('not found')) + .mockResolvedValue({ + mtimeMs: Date.now(), + size: 1024, + isFile: () => true, + }); const handler = getHandler(); const res1 = createResponse(); const res2 = createResponse(); @@ -767,18 +787,21 @@ describe('Icons Router', () => { { name: 'simple', isDirectory: () => true }, ]) .mockResolvedValueOnce(['stale.svg', 'nested', 'docker.svg']); - mockStat.mockImplementation(async (targetPath: string) => { - if (targetPath === '/store/icons/simple/stale.svg') { - return { mtimeMs: 0, size: 1024, isFile: () => true }; - } - if (targetPath === '/store/icons/simple/nested') { - return { mtimeMs: Date.now(), size: 0, isFile: () => false }; - } - if (targetPath === '/store/icons/simple/docker.svg') { + mockStat + .mockRejectedValueOnce(new Error('not found')) + .mockRejectedValueOnce(new Error('not found')) + .mockImplementation(async (targetPath: string) => { + if (targetPath === '/store/icons/simple/stale.svg') { + return { mtimeMs: 0, size: 1024, isFile: () => true }; + } + if (targetPath === '/store/icons/simple/nested') { + return { mtimeMs: Date.now(), size: 0, isFile: () => false }; + } + if (targetPath === '/store/icons/simple/docker.svg') { + return { mtimeMs: Date.now(), size: 1024, isFile: () => true }; + } return { mtimeMs: Date.now(), size: 1024, isFile: () => true }; - } - return { mtimeMs: Date.now(), size: 1024, isFile: () => true }; - }); + }); const handler = getHandler(); const res = createResponse(); @@ -888,15 +911,18 @@ describe('Icons Router', () => { mockReaddir .mockResolvedValueOnce([{ name: 'simple', isDirectory: () => true }]) .mockResolvedValueOnce(['stale.svg', 'docker.svg']); - mockStat.mockImplementation(async (targetPath: string) => { - if (targetPath === '/store/icons/simple/stale.svg') { - return { mtimeMs: 0, size: 1024, isFile: () => true }; - } - if (targetPath === '/store/icons/simple/docker.svg') { + mockStat + .mockRejectedValueOnce(new Error('not found')) + .mockRejectedValueOnce(new Error('not found')) + .mockImplementation(async (targetPath: string) => { + if (targetPath === '/store/icons/simple/stale.svg') { + return { mtimeMs: 0, size: 1024, isFile: () => true }; + } + if (targetPath === '/store/icons/simple/docker.svg') { + return { mtimeMs: Date.now(), size: 1024, isFile: () => true }; + } return { mtimeMs: Date.now(), size: 1024, isFile: () => true }; - } - return { mtimeMs: Date.now(), size: 1024, isFile: () => true }; - }); + }); mockUnlink.mockRejectedValue(new Error('permission denied')); const handler = getHandler(); const res = createResponse(); @@ -925,15 +951,18 @@ describe('Icons Router', () => { mockReaddir .mockResolvedValueOnce([{ name: 'simple', isDirectory: () => true }]) .mockResolvedValueOnce(['old.svg', 'docker.svg']); - mockStat.mockImplementation(async (targetPath: string) => { - if (targetPath === '/store/icons/simple/old.svg') { - return { mtimeMs: Date.now() - 1_000, size: 150 * 1024 * 1024, isFile: () => true }; - } - if (targetPath === '/store/icons/simple/docker.svg') { - return { mtimeMs: Date.now(), size: 50 * 1024 * 1024, isFile: () => true }; - } - return { mtimeMs: Date.now(), size: 1024, isFile: () => true }; - }); + mockStat + .mockRejectedValueOnce(new Error('not found')) + .mockRejectedValueOnce(new Error('not found')) + .mockImplementation(async (targetPath: string) => { + if (targetPath === '/store/icons/simple/old.svg') { + return { mtimeMs: Date.now() - 1_000, size: 150 * 1024 * 1024, isFile: () => true }; + } + if (targetPath === '/store/icons/simple/docker.svg') { + return { mtimeMs: Date.now(), size: 50 * 1024 * 1024, isFile: () => true }; + } + return { mtimeMs: Date.now(), size: 1024, isFile: () => true }; + }); mockUnlink.mockImplementation(async (targetPath: string) => { if (targetPath === '/store/icons/simple/old.svg') { throw new Error('unlink failed'); @@ -1076,13 +1105,78 @@ describe('Icons Router', () => { expect(res.status).not.toHaveBeenCalledWith(404); expect(res.json).not.toHaveBeenCalled(); - expect(res.set).toHaveBeenCalledWith('Cache-Control', 'public, max-age=31536000, immutable'); + expect(res.set).toHaveBeenCalledWith('Cache-Control', 'no-store'); expect(res.type).toHaveBeenCalledWith('image/png'); expect(res.sendFile).toHaveBeenCalledWith('docker.png', { root: '/runtime/assets/icons/selfhst', }); }); + test('should use no-store cache headers for fallback images instead of immutable', async () => { + const upstreamError = Object.assign(new Error('not found'), { + response: { status: 404 }, + }); + mockAccess.mockImplementation(async (targetPath: string) => { + if (targetPath === '/runtime/assets/icons/selfhst/docker.png') { + return; + } + throw new Error('not found'); + }); + mockAxiosGet.mockRejectedValue(upstreamError); + mockAxiosIsAxiosError.mockReturnValue(true); + const handler = getHandler(); + const res = createResponse(); + + await handler( + { + params: { + provider: 'homarr', + slug: 'missing', + }, + headers: { + 'sec-fetch-dest': 'image', + }, + }, + res, + ); + + expect(res.set).toHaveBeenCalledWith('Cache-Control', 'no-store'); + expect(res.set).not.toHaveBeenCalledWith( + 'Cache-Control', + 'public, max-age=31536000, immutable', + ); + expect(res.type).toHaveBeenCalledWith('image/png'); + expect(res.sendFile).toHaveBeenCalledWith('docker.png', { + root: '/runtime/assets/icons/selfhst', + }); + }); + + test('should use immutable cache headers for successfully cached icons', async () => { + mockStat.mockResolvedValue({ + mtimeMs: Date.now(), + size: 1024, + isFile: () => true, + }); + const handler = getHandler(); + const res = createResponse(); + + await handler( + { + params: { + provider: 'homarr', + slug: 'docker', + }, + }, + res, + ); + + expect(res.set).toHaveBeenCalledWith('Cache-Control', 'public, max-age=31536000, immutable'); + expect(res.type).toHaveBeenCalledWith('image/png'); + expect(res.sendFile).toHaveBeenCalledWith('docker.png', { + root: '/store/icons/homarr', + }); + }); + test('should serve bundled fallback image when sec-fetch-dest header is an array', async () => { const upstreamError = Object.assign(new Error('forbidden'), { response: { status: 403 }, diff --git a/app/api/icons/response.test.ts b/app/api/icons/response.test.ts index 9ea370f8..e9c36001 100644 --- a/app/api/icons/response.test.ts +++ b/app/api/icons/response.test.ts @@ -51,6 +51,7 @@ describe('icons/response', () => { }); expect(mockFindBundledIconPath).toHaveBeenCalledWith('selfhst', 'docker', 'png'); + expect(res.set).toHaveBeenCalledWith('Cache-Control', 'no-store'); expect(res.status).not.toHaveBeenCalled(); expect(res.json).not.toHaveBeenCalled(); expect(res.sendFile).toHaveBeenCalledWith('docker.png', { diff --git a/app/api/icons/response.ts b/app/api/icons/response.ts index 4f519b3d..e6089dcd 100644 --- a/app/api/icons/response.ts +++ b/app/api/icons/response.ts @@ -3,6 +3,7 @@ import type { Request, Response } from 'express'; import { providers } from './providers.js'; import { CACHE_CONTROL_HEADER, + FALLBACK_CACHE_CONTROL_HEADER, FALLBACK_ICON, FALLBACK_IMAGE_PROVIDER, FALLBACK_IMAGE_SLUG, @@ -50,7 +51,9 @@ async function sendMissingIconResponse({ providers[FALLBACK_IMAGE_PROVIDER].extension, ); if (fallbackPath) { - sendCachedIcon(res, fallbackPath, providers[FALLBACK_IMAGE_PROVIDER].contentType); + res.set('Cache-Control', FALLBACK_CACHE_CONTROL_HEADER); + res.type(providers[FALLBACK_IMAGE_PROVIDER].contentType); + res.sendFile(path.basename(fallbackPath), { root: path.dirname(fallbackPath) }); return; } } diff --git a/app/api/icons/settings.ts b/app/api/icons/settings.ts index 07aaffe8..601794e0 100644 --- a/app/api/icons/settings.ts +++ b/app/api/icons/settings.ts @@ -1,6 +1,7 @@ import { toPositiveInteger } from '../../util/parse.js'; const CACHE_CONTROL_HEADER = 'public, max-age=31536000, immutable'; +const FALLBACK_CACHE_CONTROL_HEADER = 'no-store'; const FALLBACK_ICON = 'fab fa-docker'; const FALLBACK_IMAGE_PROVIDER = 'selfhst'; const FALLBACK_IMAGE_SLUG = 'docker'; @@ -48,6 +49,7 @@ function getIconInFlightTimeoutMs() { export { CACHE_CONTROL_HEADER, + FALLBACK_CACHE_CONTROL_HEADER, FALLBACK_ICON, FALLBACK_IMAGE_PROVIDER, FALLBACK_IMAGE_SLUG, diff --git a/app/api/icons/storage.test.ts b/app/api/icons/storage.test.ts index 45fd24ba..683ac96f 100644 --- a/app/api/icons/storage.test.ts +++ b/app/api/icons/storage.test.ts @@ -81,6 +81,20 @@ describe('icons/storage', () => { expect(mockUnlink).toHaveBeenCalledWith('/store/icons/simple/stale.svg'); }); + test('checks cached icon usability via stat without pre-access syscall', async () => { + mockStat.mockResolvedValue({ + mtimeMs: Date.now(), + size: 1024, + isFile: () => true, + }); + + const usable = await isCachedIconUsable('/store/icons/simple/fresh.svg'); + + expect(usable).toBe(true); + expect(mockStat).toHaveBeenCalledWith('/store/icons/simple/fresh.svg'); + expect(mockAccess).not.toHaveBeenCalled(); + }); + test('evicts oldest cache entry when byte budget is exceeded', async () => { const nowSpy = vi.spyOn(Date, 'now'); nowSpy.mockReturnValue(2_000_000_000_000); @@ -133,6 +147,43 @@ describe('icons/storage', () => { expect(mockUnlink).not.toHaveBeenCalledWith('/store/icons/simple/protected.svg'); }); + test('ignores cache entries that fail stat between directory scan and stat call', async () => { + mockReaddir + .mockResolvedValueOnce([{ name: 'simple', isDirectory: () => true }]) + .mockResolvedValueOnce(['vanished.svg', 'fresh.svg']); + mockStat.mockImplementation(async (targetPath: string) => { + if (targetPath === '/store/icons/simple/vanished.svg') { + throw new Error('ENOENT'); + } + return { mtimeMs: Date.now(), size: 1024, isFile: () => true }; + }); + + await enforceIconCacheLimits(); + + expect(mockUnlink).not.toHaveBeenCalledWith('/store/icons/simple/vanished.svg'); + }); + + test('keeps protected cache entry when no other eviction candidate is available', async () => { + const nowSpy = vi.spyOn(Date, 'now'); + nowSpy.mockReturnValue(2_000_000_000_000); + mockReaddir + .mockResolvedValueOnce([{ name: 'simple', isDirectory: () => true }]) + .mockResolvedValueOnce(['protected.svg']); + mockStat.mockResolvedValue({ + mtimeMs: Date.now() - 1_000, + size: 150 * 1024 * 1024, + isFile: () => true, + }); + + try { + await enforceIconCacheLimits({ protectedPath: '/store/icons/simple/protected.svg' }); + } finally { + nowSpy.mockRestore(); + } + + expect(mockUnlink).not.toHaveBeenCalledWith('/store/icons/simple/protected.svg'); + }); + test('writes icons atomically through a tmp file', async () => { await writeIconAtomically('/store/icons/simple/docker.svg', Buffer.from('')); diff --git a/app/api/icons/storage.ts b/app/api/icons/storage.ts index 2adea64c..db107094 100644 --- a/app/api/icons/storage.ts +++ b/app/api/icons/storage.ts @@ -68,10 +68,6 @@ async function findBundledIconPath(provider: string, slug: string, extension: st } async function isCachedIconUsable(iconPath: string) { - if (!(await iconExists(iconPath))) { - return false; - } - try { const iconStats = await fs.stat(iconPath); if (!iconStats.isFile()) { diff --git a/app/authentications/providers/basic/Basic.test.ts b/app/authentications/providers/basic/Basic.test.ts index eca1d60e..d8c1aa7e 100644 --- a/app/authentications/providers/basic/Basic.test.ts +++ b/app/authentications/providers/basic/Basic.test.ts @@ -29,14 +29,51 @@ vi.mock('node:crypto', async () => { import { argon2Sync, createHash, randomBytes } from 'node:crypto'; import Basic from './Basic.js'; -function createArgon2Hash( +type Argon2Params = { memory: number; passes: number; parallelism: number }; +type PhcParamKey = 'm' | 't' | 'p'; + +const DEFAULT_ARGON2_PARAMS: Argon2Params = { + memory: 65536, + passes: 3, + parallelism: 4, +}; + +function createArgon2Hash(password: string, params: Argon2Params = DEFAULT_ARGON2_PARAMS) { + const salt = randomBytes(32); + const derived = argon2Sync('argon2id', { + message: password, + nonce: salt, + memory: params.memory, + passes: params.passes, + parallelism: params.parallelism, + tagLength: 64, + }); + return `argon2id$${params.memory}$${params.passes}$${params.parallelism}$${salt.toString('base64')}$${derived.toString('base64')}`; +} + +function toPhcBase64(value: Buffer, padded = false): string { + const encoded = value.toString('base64').replaceAll('+', '-').replaceAll('/', '_'); + return padded ? encoded : encoded.replace(/=+$/u, ''); +} + +function createPhcArgon2Hash( password: string, - params: { memory: number; passes: number; parallelism: number } = { - memory: 65536, - passes: 3, - parallelism: 4, - }, + options: { + params?: Argon2Params; + version?: string; + parameterOrder?: PhcParamKey[]; + paddedSegments?: boolean; + } = {}, ) { + const params = options.params ?? DEFAULT_ARGON2_PARAMS; + const version = options.version ?? 'v=19'; + const parameterOrder = options.parameterOrder ?? ['m', 't', 'p']; + const paramValueByKey: Record = { + m: params.memory, + t: params.passes, + p: params.parallelism, + }; + const parameterSegment = parameterOrder.map((key) => `${key}=${paramValueByKey[key]}`).join(','); const salt = randomBytes(32); const derived = argon2Sync('argon2id', { message: password, @@ -46,7 +83,8 @@ function createArgon2Hash( parallelism: params.parallelism, tagLength: 64, }); - return `argon2id$${params.memory}$${params.passes}$${params.parallelism}$${salt.toString('base64')}$${derived.toString('base64')}`; + + return `$argon2id$${version}$${parameterSegment}$${toPhcBase64(salt, options.paddedSegments)}$${toPhcBase64(derived, options.paddedSegments)}`; } function createShaHash(password: string) { @@ -56,6 +94,13 @@ function createShaHash(password: string) { const VALID_SALT_BASE64 = Buffer.alloc(16, 1).toString('base64'); const VALID_HASH_BASE64 = Buffer.alloc(32, 1).toString('base64'); +const VALID_SALT_BASE64URL = toPhcBase64(Buffer.alloc(16, 1)); +const VALID_HASH_BASE64URL = toPhcBase64(Buffer.alloc(32, 1)); +const LEGACY_APR1_HASH = '$apr1$r31.....$HqJZimcKQFAMYayBlzkrA/'; +const LEGACY_MD5_HASH = '$1$saltsalt$2vnaRpHa6Jxjz5n83ok8Z0'; +const LEGACY_CRYPT_HASH = 'rqXexS6ZhobKA'; +const LEGACY_PLAIN_HASH = 'plaintext-password'; +const UNSUPPORTED_BCRYPT_HASH = '$2b$10$123456789012345678901u8Q4W2nLw8Qm7w7fA9sQ3lV7qVQX0w2.'; describe('Basic Authentication', () => { let basic: InstanceType; @@ -274,6 +319,63 @@ describe('Basic Authentication', () => { }); }); + test('should validate configuration schema with PHC argon2id hash', async () => { + const hash = createPhcArgon2Hash('password'); + expect( + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toEqual({ + user: 'testuser', + hash, + }); + }); + + test('should authenticate valid user with PHC argon2id hash', async () => { + basic.configuration = { + user: 'testuser', + hash: createPhcArgon2Hash('password'), + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'password', (_err, result) => { + expect(result).toEqual({ username: 'testuser' }); + resolve(); + }); + }); + }); + + test.each([ + ['m=65536,t=3,p=4'], + ['t=3,p=4,m=65536'], + ['p=4,m=65536,t=3'], + ])('should accept PHC argon2id hashes with reordered parameters (%s)', (parameterSegment) => { + const hash = `$argon2id$v=19$${parameterSegment}$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`; + expect( + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toEqual({ + user: 'testuser', + hash, + }); + }); + + test('should accept PHC argon2id hashes with padded base64url segments', async () => { + const hash = createPhcArgon2Hash('password', { paddedSegments: true }); + expect( + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toEqual({ + user: 'testuser', + hash, + }); + }); + test('should throw on invalid configuration', async () => { expect(() => basic.validateConfiguration({})).toThrow('"user" is required'); }); @@ -393,7 +495,40 @@ describe('Basic Authentication', () => { ).toThrow('must be an argon2id hash'); }); - describe('SHA-1 legacy hash support', () => { + test('should reject PHC argon2id hashes missing version segment', async () => { + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash: `$argon2id$m=65536,t=3,p=4$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should reject PHC argon2id hashes with wrong version', async () => { + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash: `$argon2id$v=18$m=65536,t=3,p=4$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should not treat malformed PHC argon2id hash as plain fallback during authentication', async () => { + const malformedPhcHash = `$argon2id$v=18$m=65536,t=3,p=4$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`; + basic.configuration = { + user: 'testuser', + hash: malformedPhcHash, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', malformedPhcHash, (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + + describe('legacy v1.3.9 hash support', () => { test('should accept SHA-1 hash in configuration schema', async () => { const hash = createShaHash('password'); expect( @@ -481,23 +616,54 @@ describe('Basic Authentication', () => { }); }); - test('should reject SHA-1 hash with invalid digest length', async () => { + test('should accept SHA-1 hash with invalid digest length in schema but reject authentication', async () => { const shortDigest = Buffer.alloc(10, 1).toString('base64'); - expect(() => + + expect( basic.validateConfiguration({ user: 'testuser', hash: `{SHA}${shortDigest}`, }), - ).toThrow('must be an argon2id hash'); + ).toEqual({ + user: 'testuser', + hash: `{SHA}${shortDigest}`, + }); + + basic.configuration = { + user: 'testuser', + hash: `{SHA}${shortDigest}`, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'password', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); }); - test('should reject SHA-1 hash with malformed base64', async () => { - expect(() => + test('should accept SHA-1 hash with malformed base64 in schema but reject authentication', async () => { + expect( basic.validateConfiguration({ user: 'testuser', hash: '{SHA}not*valid*base64', }), - ).toThrow('must be an argon2id hash'); + ).toEqual({ + user: 'testuser', + hash: '{SHA}not*valid*base64', + }); + + basic.configuration = { + user: 'testuser', + hash: '{SHA}not*valid*base64', + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'password', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); }); test('should reject when SHA hash parsing fails during verification', async () => { @@ -560,64 +726,863 @@ describe('Basic Authentication', () => { createHashSpy.mockRestore(); }); - test('should reject unrecognized hash formats', async () => { + test('should accept APR1 hash in configuration schema', async () => { + expect( + basic.validateConfiguration({ + user: 'testuser', + hash: LEGACY_APR1_HASH, + }), + ).toEqual({ + user: 'testuser', + hash: LEGACY_APR1_HASH, + }); + }); + + test('should authenticate valid user with APR1 hash', async () => { + basic.configuration = { + user: 'testuser', + hash: LEGACY_APR1_HASH, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'myPassword', (_err, result) => { + expect(result).toEqual({ username: 'testuser' }); + resolve(); + }); + }); + }); + + test('should reject invalid password with APR1 hash', async () => { basic.configuration = { user: 'testuser', - hash: 'plaintext-password', + hash: LEGACY_APR1_HASH, }; await new Promise((resolve) => { - basic.authenticate('testuser', 'plaintext-password', (err, result) => { + basic.authenticate('testuser', 'wrongpassword', (_err, result) => { expect(result).toBe(false); resolve(); }); }); }); - }); - describe('getMetadata', () => { - test('should return usesLegacyHash: false for argon2id hash', () => { + test('should accept $1$ MD5 hash in configuration schema', async () => { + expect( + basic.validateConfiguration({ + user: 'testuser', + hash: LEGACY_MD5_HASH, + }), + ).toEqual({ + user: 'testuser', + hash: LEGACY_MD5_HASH, + }); + }); + + test('should authenticate valid user with $1$ MD5 hash', async () => { basic.configuration = { user: 'testuser', - hash: createArgon2Hash('password'), + hash: LEGACY_MD5_HASH, }; - expect(basic.getMetadata()).toEqual({ usesLegacyHash: false }); + + await new Promise((resolve) => { + basic.authenticate('testuser', 'myPassword', (_err, result) => { + expect(result).toEqual({ username: 'testuser' }); + resolve(); + }); + }); }); - test('should return usesLegacyHash: true for SHA-1 hash', () => { + test('should reject invalid password with $1$ MD5 hash', async () => { basic.configuration = { user: 'testuser', - hash: createShaHash('password'), + hash: LEGACY_MD5_HASH, }; - expect(basic.getMetadata()).toEqual({ usesLegacyHash: true }); + + await new Promise((resolve) => { + basic.authenticate('testuser', 'wrongpassword', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); }); - }); - describe('initAuthentication', () => { - test('should log deprecation warning when SHA-1 hash is registered', () => { - const warnFn = vi.fn(); - basic.log = { warn: warnFn, info: vi.fn(), debug: vi.fn(), error: vi.fn() } as any; + test('should accept crypt hash in configuration schema', async () => { + expect( + basic.validateConfiguration({ + user: 'testuser', + hash: LEGACY_CRYPT_HASH, + }), + ).toEqual({ + user: 'testuser', + hash: LEGACY_CRYPT_HASH, + }); + }); + + test('should authenticate valid user with crypt hash', async () => { basic.configuration = { user: 'testuser', - hash: createShaHash('password'), + hash: LEGACY_CRYPT_HASH, }; - basic.initAuthentication(); + await new Promise((resolve) => { + basic.authenticate('testuser', 'myPassword', (_err, result) => { + expect(result).toEqual({ username: 'testuser' }); + resolve(); + }); + }); + }); + + test('should reject invalid password with crypt hash', async () => { + basic.configuration = { + user: 'testuser', + hash: LEGACY_CRYPT_HASH, + }; - expect(warnFn).toHaveBeenCalledWith(expect.stringContaining('SHA-1 password hash detected')); + await new Promise((resolve) => { + basic.authenticate('testuser', 'wrongpassword', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); }); - test('should not log warning when argon2id hash is registered', () => { - const warnFn = vi.fn(); - basic.log = { warn: warnFn, info: vi.fn(), debug: vi.fn(), error: vi.fn() } as any; + test('should accept plain hash fallback in configuration schema', async () => { + expect( + basic.validateConfiguration({ + user: 'testuser', + hash: LEGACY_PLAIN_HASH, + }), + ).toEqual({ + user: 'testuser', + hash: LEGACY_PLAIN_HASH, + }); + }); + + test('should authenticate valid user with plain hash fallback', async () => { basic.configuration = { user: 'testuser', - hash: createArgon2Hash('password'), + hash: LEGACY_PLAIN_HASH, }; - basic.initAuthentication(); + await new Promise((resolve) => { + basic.authenticate('testuser', LEGACY_PLAIN_HASH, (_err, result) => { + expect(result).toEqual({ username: 'testuser' }); + resolve(); + }); + }); + }); - expect(warnFn).not.toHaveBeenCalled(); + test('should reject invalid password with plain hash fallback', async () => { + basic.configuration = { + user: 'testuser', + hash: LEGACY_PLAIN_HASH, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'wrongpassword', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + + test('should reject bcrypt-style hash in configuration schema', async () => { + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash: UNSUPPORTED_BCRYPT_HASH, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should not treat bcrypt-style hash as plain fallback during authentication', async () => { + basic.configuration = { + user: 'testuser', + hash: UNSUPPORTED_BCRYPT_HASH, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', UNSUPPORTED_BCRYPT_HASH, (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + + test('should classify md5, crypt, plain and unsupported hashes in metadata', () => { + basic.configuration = { + user: 'testuser', + hash: LEGACY_MD5_HASH, + }; + expect(basic.getMetadata()).toEqual({ usesLegacyHash: true }); + + basic.configuration = { + user: 'testuser', + hash: LEGACY_CRYPT_HASH, + }; + expect(basic.getMetadata()).toEqual({ usesLegacyHash: true }); + + basic.configuration = { + user: 'testuser', + hash: LEGACY_PLAIN_HASH, + }; + expect(basic.getMetadata()).toEqual({ usesLegacyHash: true }); + + basic.configuration = { + user: 'testuser', + hash: UNSUPPORTED_BCRYPT_HASH, + }; + expect(basic.getMetadata()).toEqual({ usesLegacyHash: false }); + }); + + test('should treat malformed SHA/APR1 prefixes as plain legacy metadata', () => { + basic.configuration = { + user: 'testuser', + hash: '{SHA}', + }; + expect(basic.getMetadata()).toEqual({ usesLegacyHash: true }); + + basic.configuration = { + user: 'testuser', + hash: '$apr1$', + }; + expect(basic.getMetadata()).toEqual({ usesLegacyHash: true }); + + basic.configuration = { + user: 'testuser', + hash: '$apr1$$broken', + }; + expect(basic.getMetadata()).toEqual({ usesLegacyHash: true }); + }); + + test('should reject authentication when argon2 hash cannot be parsed during verification', async () => { + const validArgon2Parts = createArgon2Hash('password').split('$'); + let splitCallCount = 0; + const flakyArgon2Hash = { + trim() { + return this as unknown as string; + }, + split(_separator: string) { + splitCallCount += 1; + return splitCallCount === 1 ? validArgon2Parts : ['argon2id']; + }, + } as unknown as string; + + basic.configuration = { + user: 'testuser', + hash: flakyArgon2Hash, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'password', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + + test('should reject authentication when SHA hash becomes invalid during verification', async () => { + const validShaHash = createShaHash('password'); + let substringCallCount = 0; + const flakyShaHash = { + trim() { + return this as unknown as string; + }, + split() { + return ['not-argon2']; + }, + get length() { + return validShaHash.length; + }, + substring(start: number, end?: number) { + if (start === 0 && end === 5) { + return '{SHA}'; + } + substringCallCount += 1; + return substringCallCount === 1 ? validShaHash.substring(5) : ''; + }, + } as unknown as string; + + basic.configuration = { + user: 'testuser', + hash: flakyShaHash, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'password', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + + test('should reject authentication when MD5 hash becomes invalid during verification', async () => { + let splitCallCount = 0; + const flakyMd5Hash = { + trim() { + return this as unknown as string; + }, + split() { + splitCallCount += 1; + if (splitCallCount === 1) { + return ['not-argon2']; + } + if (splitCallCount === 2) { + return LEGACY_MD5_HASH.split('$'); + } + return ['', '1']; + }, + get length() { + return 4; + }, + startsWith(prefix: string) { + return prefix === '$1$'; + }, + } as unknown as string; + + basic.configuration = { + user: 'testuser', + hash: flakyMd5Hash, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'password', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + + test('should reject authentication when APR1/MD5 verification throws', async () => { + const throwingPassword = { + [Symbol.toPrimitive]() { + throw new Error('password coercion failed'); + }, + } as unknown as string; + + basic.configuration = { + user: 'testuser', + hash: LEGACY_MD5_HASH, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', throwingPassword, (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + + test('should reject authentication when crypt hash becomes invalid during verification', async () => { + let lengthReadCount = 0; + const flakyCryptHash = { + trim() { + return this as unknown as string; + }, + split() { + return ['not-argon2']; + }, + get length() { + lengthReadCount += 1; + return lengthReadCount === 3 ? 12 : 13; + }, + substring(start: number, end?: number) { + if (start === 0 && end === 5) { + return 'crypt'; + } + return LEGACY_CRYPT_HASH.substring(start, end); + }, + startsWith() { + return false; + }, + } as unknown as string; + + basic.configuration = { + user: 'testuser', + hash: flakyCryptHash, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'password', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + + test('should reject authentication when crypt verification throws', async () => { + const throwingPassword = new Proxy( + {}, + { + get() { + throw new Error('password coercion failed'); + }, + }, + ) as unknown as string; + + basic.configuration = { + user: 'testuser', + hash: LEGACY_CRYPT_HASH, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', throwingPassword, (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + + test('should reject authentication when plain comparison coercion throws', async () => { + const throwingPassword = { + [Symbol.toPrimitive]() { + throw new Error('password coercion failed'); + }, + } as unknown as string; + + basic.configuration = { + user: 'testuser', + hash: LEGACY_PLAIN_HASH, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', throwingPassword, (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + + test('should reject authentication when timingSafeEqual throws during password comparison', async () => { + mockTimingSafeEqual + .mockImplementationOnce( + (left: Buffer, right: Buffer) => left.length === right.length && left.equals(right), + ) + .mockImplementationOnce(() => { + throw new Error('timingSafeEqual failed'); + }); + + basic.configuration = { + user: 'testuser', + hash: LEGACY_PLAIN_HASH, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', LEGACY_PLAIN_HASH, (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + }); + + describe('getMetadata', () => { + test('should return usesLegacyHash: false for argon2id hash', () => { + basic.configuration = { + user: 'testuser', + hash: createArgon2Hash('password'), + }; + expect(basic.getMetadata()).toEqual({ usesLegacyHash: false }); + }); + + test('should return usesLegacyHash: true for SHA-1 hash', () => { + basic.configuration = { + user: 'testuser', + hash: createShaHash('password'), + }; + expect(basic.getMetadata()).toEqual({ usesLegacyHash: true }); + }); + + test('should return usesLegacyHash: true for APR1 hash', () => { + basic.configuration = { + user: 'testuser', + hash: LEGACY_APR1_HASH, + }; + expect(basic.getMetadata()).toEqual({ usesLegacyHash: true }); + }); + }); + + describe('initAuthentication', () => { + test('should log deprecation warning when SHA-1 hash is registered', () => { + const warnFn = vi.fn(); + basic.log = { warn: warnFn, info: vi.fn(), debug: vi.fn(), error: vi.fn() } as any; + basic.configuration = { + user: 'testuser', + hash: createShaHash('password'), + }; + + basic.initAuthentication(); + + expect(warnFn).toHaveBeenCalledWith( + expect.stringContaining('Legacy password hash format detected (sha1)'), + ); + }); + + test('should log deprecation warning when APR1 hash is registered', () => { + const warnFn = vi.fn(); + basic.log = { warn: warnFn, info: vi.fn(), debug: vi.fn(), error: vi.fn() } as any; + basic.configuration = { + user: 'testuser', + hash: LEGACY_APR1_HASH, + }; + + basic.initAuthentication(); + + expect(warnFn).toHaveBeenCalledWith( + expect.stringContaining('Legacy password hash format detected (apr1)'), + ); + }); + + test('should not log warning when argon2id hash is registered', () => { + const warnFn = vi.fn(); + basic.log = { warn: warnFn, info: vi.fn(), debug: vi.fn(), error: vi.fn() } as any; + basic.configuration = { + user: 'testuser', + hash: createArgon2Hash('password'), + }; + + basic.initAuthentication(); + + expect(warnFn).not.toHaveBeenCalled(); + }); + }); + + describe('decodeBase64 edge cases', () => { + test('should reject base64 with padding not at proper boundary (length % 4 !== 0)', () => { + // "abcde=" passes the regex but has length 6 (6 % 4 !== 0) — triggers line 77 + const hash = `$argon2id$v=19$m=65536,t=3,p=4$abcde=$${VALID_HASH_BASE64URL}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should reject base64 with length % 4 === 1 and no padding', () => { + // A 5-char base64url string with no padding: length % 4 === 1 — triggers line 80 + const hash = `$argon2id$v=19$m=65536,t=3,p=4$abcde$${VALID_HASH_BASE64URL}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should reject base64 that decodes to empty buffer', () => { + // Line 89: decodeBase64 returns undefined when decoded.length === 0. + // This is a defensive check — valid base64 chars always decode to >=1 byte. + // To reach this branch, temporarily mock Buffer.from to return an empty buffer + // for the specific padded base64 call while preserving normal behavior elsewhere. + const originalFrom = Buffer.from.bind(Buffer); + const spy = vi.spyOn(Buffer, 'from').mockImplementation((...args: unknown[]) => { + // Intercept the base64 decode of the salt segment "AAAA" + if (args[0] === 'AAAA' && args[1] === 'base64') { + spy.mockRestore(); + return Buffer.alloc(0); + } + return (originalFrom as (...a: unknown[]) => Buffer)(...args); + }); + + // "AAAA" is a valid 4-char base64 string (length % 4 === 0, no padding needed). + // Normally decodes to 3 bytes, but our mock returns empty buffer -> line 89. + const hash = `argon2id$65536$3$4$AAAA$${VALID_HASH_BASE64}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + }); + + describe('parsePhcArgon2Parameters rejection branches', () => { + test('should reject PHC hash with wrong parameter count (only 2 entries)', () => { + const hash = `$argon2id$v=19$m=65536,t=3$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should reject PHC hash with malformed key=value entry (missing value)', () => { + const hash = `$argon2id$v=19$m=65536,t,p=4$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should reject PHC hash with malformed key=value entry (extra equals)', () => { + const hash = `$argon2id$v=19$m=65536,t=3=x,p=4$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should reject PHC hash with duplicate m key', () => { + const hash = `$argon2id$v=19$m=65536,m=65536,p=4$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should reject PHC hash with duplicate t key', () => { + const hash = `$argon2id$v=19$m=65536,t=3,t=3$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should reject PHC hash with duplicate p key', () => { + const hash = `$argon2id$v=19$p=4,t=3,p=4$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should reject PHC hash with unknown parameter key', () => { + const hash = `$argon2id$v=19$m=65536,t=3,x=4$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should reject PHC hash with missing required parameter after loop', () => { + // 3 entries, unique keys, but one required key is missing (no 'p', has unknown 'x') + // Wait — unknown key returns immediately at line 159. For missing-after-loop (line 164), + // we need 3 entries, all with valid keys (m/t/p), but one key is duplicated — that + // triggers the duplicate check first. Actually, line 164 fires when rawMemory, rawPasses, + // or rawParallelism is still undefined after the loop. This can happen if a key has an + // empty value (value is "" which is not undefined). Let's construct: + // "m=,t=3,p=4" — m has empty value, rawMemory = "", the loop completes, then + // !rawMemory (empty string is falsy) triggers line 164. + const hash = `$argon2id$v=19$m=,t=3,p=4$${VALID_SALT_BASE64URL}$${VALID_HASH_BASE64URL}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + }); + + describe('parsePhcArgon2Hash salt/hash too short', () => { + test('should reject PHC hash with salt shorter than MIN_SALT_SIZE', () => { + // 8-byte salt (needs 16 minimum) + const shortSalt = toPhcBase64(Buffer.alloc(8, 1)); + const hash = `$argon2id$v=19$m=65536,t=3,p=4$${shortSalt}$${VALID_HASH_BASE64URL}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + + test('should reject PHC hash with hash shorter than MIN_HASH_SIZE', () => { + // 16-byte hash (needs 32 minimum) + const shortHash = toPhcBase64(Buffer.alloc(16, 1)); + const hash = `$argon2id$v=19$m=65536,t=3,p=4$${VALID_SALT_BASE64URL}$${shortHash}`; + expect(() => + basic.validateConfiguration({ + user: 'testuser', + hash, + }), + ).toThrow('must be an argon2id hash'); + }); + }); + + describe('getLegacyHashFormat malformed argon2id prefix', () => { + test('should not treat malformed Drydock argon2id hash as plain fallback', () => { + // Starts with "argon2id$" so looksLikeArgon2Hash returns true, but parsing fails + const malformedDrydockHash = `argon2id$broken`; + basic.configuration = { + user: 'testuser', + hash: malformedDrydockHash, + }; + // getMetadata uses isLegacyHash -> getLegacyHashFormat which returns undefined for + // hashes that look like argon2 but fail parsing — so usesLegacyHash should be false + expect(basic.getMetadata()).toEqual({ usesLegacyHash: false }); + }); + + test('should not treat malformed PHC argon2id hash as plain fallback', () => { + // Starts with "$argon2id$" but has wrong structure + const malformedPhcHash = `$argon2id$garbage`; + basic.configuration = { + user: 'testuser', + hash: malformedPhcHash, + }; + expect(basic.getMetadata()).toEqual({ usesLegacyHash: false }); + }); + + test('should reject authentication against malformed Drydock argon2id hash', async () => { + basic.configuration = { + user: 'testuser', + hash: `argon2id$broken`, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'password', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + }); + + describe('verifyShaPassword and verifyMd5Password undefined parse results', () => { + test('should reject SHA authentication when parseShaHash returns undefined on second call', async () => { + // Line 362: verifyShaPassword is called but its internal parseShaHash returns undefined. + // verifyPassword calls normalizeHash -> trim() on the hash, then uses the result for + // all dispatch checks. If trim() returns `this` (the proxy), we can control substring() + // calls to make the first parseShaHash succeed and the second (inside verifyShaPassword) fail. + // + // Call trace through proxy: + // verifyPassword -> normalizeHash -> trim() [returns self] + // parseArgon2Hash -> normalizeHash -> trim() [returns self] + // parseDrydockArgon2Hash -> split('$') [returns non-argon2] + // parsePhcArgon2Hash -> split('$') [returns non-argon2] + // looksLikeArgon2Hash -> normalizeHash -> trim() [returns self] + // startsWith('argon2id$') -> false + // startsWith('$argon2id$') -> false + // parseShaHash (dispatch) -> normalizeHash -> trim() [returns self] + // substring(0,5) -> '{SHA}', substring(5) -> valid 20-byte base64 + // verifyShaPassword -> parseShaHash -> normalizeHash -> trim() [returns self] + // substring(0,5) -> '{SHA}', substring(5) -> '' (fails !encoded check) + const validSha20 = Buffer.alloc(20, 1).toString('base64'); + let substringFromFiveCount = 0; + const flakyHash = { + trim() { + return this; + }, + split() { + return ['not-argon2']; + }, + startsWith() { + return false; + }, + get length() { + return 100; + }, + substring(start: number, end?: number) { + if (start === 0 && end === 5) { + return '{SHA}'; + } + if (start === 5) { + substringFromFiveCount += 1; + // First call (dispatch check): return valid base64 of 20 bytes + if (substringFromFiveCount === 1) { + return validSha20; + } + // Second call (inside verifyShaPassword): return empty -> parseShaHash returns undefined + return ''; + } + return ''; + }, + toLowerCase() { + return '{sha}'; + }, + } as unknown as string; + + basic.configuration = { + user: 'testuser', + hash: flakyHash, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'password', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); + }); + + test('should reject MD5 authentication when parseMd5Hash returns undefined on second call', async () => { + // Line 376: verifyMd5Password is called but its internal parseMd5Hash returns undefined. + // Same proxy strategy: trim() returns self so we control all method calls. + // + // parseMd5Hash checks: + // normalizeHash -> trim() [returns self] + // startsWith('$apr1$') or startsWith('$1$') -> needs true + // split('$') -> needs >= 4 parts with variant='1' and valid salt + // + // On second call inside verifyMd5Password, split('$') returns < 4 parts. + let splitDollarCount = 0; + const flakyHash = { + trim() { + return this; + }, + split(separator: string) { + if (separator === '$') { + splitDollarCount += 1; + // parseDrydockArgon2Hash & parsePhcArgon2Hash also call split('$') + // Calls 1-2: argon2 checks -> return non-argon2 + if (splitDollarCount <= 2) { + return ['not-argon2']; + } + // parseShaHash does NOT call split — it uses substring. + // parseMd5Hash calls split('$'): + // Call 3 (dispatch check): return valid MD5 parts + if (splitDollarCount === 3) { + return LEGACY_MD5_HASH.split('$'); + } + // Call 4 (inside verifyMd5Password): return too few parts -> undefined + return ['', '1']; + } + return ['not-argon2']; + }, + startsWith(prefix: string) { + // For looksLikeArgon2Hash + if (prefix === 'argon2id$' || prefix === '$argon2id$') { + return false; + } + // For parseMd5Hash: $1$ or $apr1$ + return prefix === '$1$'; + }, + get length() { + return 4; + }, + substring(start: number, end?: number) { + // parseShaHash calls substring(0, 5) — needs to NOT match {sha} + if (start === 0 && end === 5) { + return '$1$sa'; + } + return ''; + }, + } as unknown as string; + + basic.configuration = { + user: 'testuser', + hash: flakyHash, + }; + + await new Promise((resolve) => { + basic.authenticate('testuser', 'password', (_err, result) => { + expect(result).toBe(false); + resolve(); + }); + }); }); }); }); diff --git a/app/authentications/providers/basic/Basic.ts b/app/authentications/providers/basic/Basic.ts index 9ac9e84d..22c69e1e 100644 --- a/app/authentications/providers/basic/Basic.ts +++ b/app/authentications/providers/basic/Basic.ts @@ -1,12 +1,19 @@ import { argon2, createHash, timingSafeEqual } from 'node:crypto'; +import { createRequire } from 'node:module'; import Authentication from '../Authentication.js'; import BasicStrategy from './BasicStrategy.js'; +const require = createRequire(import.meta.url); +const apacheMd5 = require('apache-md5') as (password: string, salt: string) => string; +const unixCrypt = require('unix-crypt-td-js') as (password: string, salt: string) => string; + function hashValue(value: string): Buffer { return createHash('sha256').update(value, 'utf8').digest(); } -const ARGON2_HASH_PARTS = 6; +const DRYDOCK_ARGON2_HASH_PARTS = 6; +const PHC_ARGON2_HASH_PARTS = 6; +const PHC_ARGON2_VERSION = 19; const MIN_SALT_SIZE = 16; const MIN_HASH_SIZE = 32; const MIN_ARGON2_MEMORY = 19456; @@ -24,6 +31,26 @@ interface ParsedArgon2Hash { hash: Buffer; } +interface ParsedMd5Hash { + variant: 'apr1' | '1'; + salt: string; + encodedHash: string; +} + +interface ParsedCryptHash { + salt: string; + encodedHash: string; +} + +type LegacyHashFormat = 'sha1' | 'apr1' | 'md5' | 'crypt' | 'plain'; +const UNSUPPORTED_PLAIN_FALLBACK_PATTERNS: RegExp[] = [ + /^\$2[abxy]\$/i, // bcrypt variants +]; + +function normalizeHash(rawHash: string): string { + return rawHash.trim(); +} + function parsePositiveInteger(raw: string): number | undefined { if (!/^\d+$/.test(raw)) { return undefined; @@ -39,62 +66,259 @@ function decodeBase64(raw: string): Buffer | undefined { if (raw.length === 0) { return undefined; } - if (!/^[A-Za-z0-9+/]+={0,2}$/.test(raw) || raw.length % 4 !== 0) { + if (!/^[A-Za-z0-9+/_-]+={0,2}$/.test(raw)) { return undefined; } - return Buffer.from(raw, 'base64'); -} + const normalized = raw.replaceAll('-', '+').replaceAll('_', '/'); + const firstPaddingIndex = normalized.indexOf('='); + if (firstPaddingIndex !== -1) { + if (!/^=+$/.test(normalized.substring(firstPaddingIndex)) || normalized.length % 4 !== 0) { + return undefined; + } + } else if (normalized.length % 4 === 1) { + return undefined; + } -function parseArgon2Hash(rawHash: string): ParsedArgon2Hash | undefined { - const parts = rawHash.split('$'); - if (parts.length !== ARGON2_HASH_PARTS || parts[0] !== 'argon2id') { + const padded = + firstPaddingIndex === -1 + ? normalized.padEnd(normalized.length + ((4 - (normalized.length % 4)) % 4), '=') + : normalized; + const decoded = Buffer.from(padded, 'base64'); + if (decoded.length === 0) { return undefined; } - const memory = parsePositiveInteger(parts[1]); - const passes = parsePositiveInteger(parts[2]); - const parallelism = parsePositiveInteger(parts[3]); - const salt = decodeBase64(parts[4]); - const hash = decodeBase64(parts[5]); + return decoded; +} + +function parseArgon2Parameters( + rawMemory: string, + rawPasses: string, + rawParallelism: string, +): { memory: number; passes: number; parallelism: number } | undefined { + const memory = parsePositiveInteger(rawMemory); + const passes = parsePositiveInteger(rawPasses); + const parallelism = parsePositiveInteger(rawParallelism); if ( !memory || !passes || !parallelism || - !salt || - !hash || memory < MIN_ARGON2_MEMORY || memory > MAX_ARGON2_MEMORY || passes < MIN_ARGON2_PASSES || passes > MAX_ARGON2_PASSES || parallelism < MIN_ARGON2_PARALLELISM || - parallelism > MAX_ARGON2_PARALLELISM || - salt.length < MIN_SALT_SIZE || - hash.length < MIN_HASH_SIZE + parallelism > MAX_ARGON2_PARALLELISM + ) { + return undefined; + } + + return { memory, passes, parallelism }; +} + +function parsePhcArgon2Parameters( + rawParameters: string, +): { memory: number; passes: number; parallelism: number } | undefined { + const entries = rawParameters.split(','); + if (entries.length !== 3) { + return undefined; + } + + let rawMemory: string | undefined; + let rawPasses: string | undefined; + let rawParallelism: string | undefined; + + for (const entry of entries) { + const [key, value, ...extra] = entry.split('='); + if (!key || value === undefined || extra.length > 0) { + return undefined; + } + + switch (key) { + case 'm': + if (rawMemory !== undefined) { + return undefined; + } + rawMemory = value; + break; + case 't': + if (rawPasses !== undefined) { + return undefined; + } + rawPasses = value; + break; + case 'p': + if (rawParallelism !== undefined) { + return undefined; + } + rawParallelism = value; + break; + default: + return undefined; + } + } + + if (!rawMemory || !rawPasses || !rawParallelism) { + return undefined; + } + + return parseArgon2Parameters(rawMemory, rawPasses, rawParallelism); +} + +function parseDrydockArgon2Hash(normalizedHash: string): ParsedArgon2Hash | undefined { + const parts = normalizedHash.split('$'); + if (parts.length !== DRYDOCK_ARGON2_HASH_PARTS || parts[0] !== 'argon2id') { + return undefined; + } + + const params = parseArgon2Parameters(parts[1], parts[2], parts[3]); + const salt = decodeBase64(parts[4]); + const hash = decodeBase64(parts[5]); + + if (!params || !salt || !hash || salt.length < MIN_SALT_SIZE || hash.length < MIN_HASH_SIZE) { + return undefined; + } + + return { ...params, salt, hash }; +} + +function parsePhcArgon2Hash(normalizedHash: string): ParsedArgon2Hash | undefined { + const parts = normalizedHash.split('$'); + if ( + parts.length !== PHC_ARGON2_HASH_PARTS || + parts[0] !== '' || + parts[1] !== 'argon2id' || + parts[2] !== `v=${PHC_ARGON2_VERSION}` ) { return undefined; } - return { memory, passes, parallelism, salt, hash }; + const params = parsePhcArgon2Parameters(parts[3]); + const salt = decodeBase64(parts[4]); + const hash = decodeBase64(parts[5]); + + if (!params || !salt || !hash || salt.length < MIN_SALT_SIZE || hash.length < MIN_HASH_SIZE) { + return undefined; + } + + return { ...params, salt, hash }; +} + +function looksLikeArgon2Hash(rawHash: string): boolean { + const normalizedHash = normalizeHash(rawHash); + return normalizedHash.startsWith('argon2id$') || normalizedHash.startsWith('$argon2id$'); } +function parseArgon2Hash(rawHash: string): ParsedArgon2Hash | undefined { + const normalizedHash = normalizeHash(rawHash); + return parseDrydockArgon2Hash(normalizedHash) ?? parsePhcArgon2Hash(normalizedHash); +} + +const SHA1_DIGEST_SIZE = 20; + function parseShaHash(rawHash: string): Buffer | undefined { - if (rawHash.length < 5) { + const normalizedHash = normalizeHash(rawHash); + if (normalizedHash.length < 5) { return undefined; } - const prefix = rawHash.substring(0, 5); + const prefix = normalizedHash.substring(0, 5); if (prefix.toLowerCase() !== '{sha}') { return undefined; } - const encoded = rawHash.substring(5); - const decoded = decodeBase64(encoded); - if (!decoded || decoded.length !== 20) { + const encoded = normalizedHash.substring(5); + if (!encoded) { + return undefined; + } + const decoded = Buffer.from(encoded, 'base64'); + if (decoded.length !== SHA1_DIGEST_SIZE) { return undefined; } return decoded; } +function parseMd5Hash(rawHash: string): ParsedMd5Hash | undefined { + const normalizedHash = normalizeHash(rawHash); + if (!normalizedHash.startsWith('$apr1$') && !normalizedHash.startsWith('$1$')) { + return undefined; + } + + const parts = normalizedHash.split('$'); + if (parts.length < 4) { + return undefined; + } + + const variant = parts[1]; + const salt = parts[2]; + if ((variant !== 'apr1' && variant !== '1') || !salt) { + return undefined; + } + + return { + variant, + salt, + encodedHash: normalizedHash, + }; +} + +function parseCryptHash(rawHash: string): ParsedCryptHash | undefined { + const normalizedHash = normalizeHash(rawHash); + if (normalizedHash.length !== 13) { + return undefined; + } + return { + salt: normalizedHash.substring(0, 2), + encodedHash: normalizedHash, + }; +} + +function timingSafeEqualString(left: string, right: string): boolean { + const leftBuffer = Buffer.from(left, 'utf8'); + const rightBuffer = Buffer.from(right, 'utf8'); + if (leftBuffer.length !== rightBuffer.length) { + return false; + } + + try { + return timingSafeEqual(leftBuffer, rightBuffer); + } catch { + return false; + } +} + +function isUnsupportedPlainFallbackHash(hash: string): boolean { + const normalizedHash = normalizeHash(hash); + return UNSUPPORTED_PLAIN_FALLBACK_PATTERNS.some((pattern) => pattern.test(normalizedHash)); +} + +function getLegacyHashFormat(hash: string): LegacyHashFormat | undefined { + if (parseArgon2Hash(hash)) { + return undefined; + } + if (looksLikeArgon2Hash(hash)) { + return undefined; + } + if (parseShaHash(hash) !== undefined) { + return 'sha1'; + } + + const md5Hash = parseMd5Hash(hash); + if (md5Hash) { + return md5Hash.variant === 'apr1' ? 'apr1' : 'md5'; + } + + if (parseCryptHash(hash)) { + return 'crypt'; + } + + if (isUnsupportedPlainFallbackHash(hash)) { + return undefined; + } + + return 'plain'; +} + function deriveArgon2Password(password: string, parsedHash: ParsedArgon2Hash): Promise { return new Promise((resolve, reject) => { argon2( @@ -132,37 +356,92 @@ async function verifyArgon2Password(password: string, encodedHash: string): Prom } } +// Legacy SHA-1 verification for v1.3.x upgrade compatibility only. +// SHA-1 is intentionally used here to match existing stored hashes — not as a +// new hashing strategy. Users are prompted to migrate to argon2id on login. +// Removal planned for v1.6.0. function verifyShaPassword(password: string, encodedHash: string): boolean { - const expectedHash = parseShaHash(encodedHash); - if (!expectedHash) { + const expectedDigest = parseShaHash(encodedHash); + if (!expectedDigest) { + return false; + } + + try { + // codeql[js/insufficient-password-hash] + const actualDigest = createHash('sha1').update(password).digest(); + return timingSafeEqual(actualDigest, expectedDigest); + } catch { + return false; + } +} + +function verifyMd5Password(password: string, encodedHash: string): boolean { + const parsedHash = parseMd5Hash(encodedHash); + if (!parsedHash) { + return false; + } + + try { + const salt = `$${parsedHash.variant}$${parsedHash.salt}$`; + const actualHash = apacheMd5(password, salt); + return timingSafeEqualString(actualHash, parsedHash.encodedHash); + } catch { + return false; + } +} + +function verifyCryptPassword(password: string, encodedHash: string): boolean { + const parsedHash = parseCryptHash(encodedHash); + if (!parsedHash) { return false; } try { - const actualHash = createHash('sha1').update(password).digest(); - return timingSafeEqual(actualHash, expectedHash); + const actualHash = unixCrypt(password, parsedHash.salt); + return timingSafeEqualString(actualHash, parsedHash.encodedHash); + } catch { + return false; + } +} + +function verifyPlainPassword(password: string, encodedHash: string): boolean { + try { + return timingSafeEqualString(password, normalizeHash(encodedHash)); } catch { return false; } } async function verifyPassword(password: string, encodedHash: string): Promise { - if (parseArgon2Hash(encodedHash)) { - return await verifyArgon2Password(password, encodedHash); + const normalizedHash = normalizeHash(encodedHash); + if (parseArgon2Hash(normalizedHash)) { + return await verifyArgon2Password(password, normalizedHash); + } + if (looksLikeArgon2Hash(normalizedHash)) { + return false; + } + if (parseShaHash(normalizedHash)) { + return verifyShaPassword(password, normalizedHash); } - if (parseShaHash(encodedHash)) { - return verifyShaPassword(password, encodedHash); + if (parseMd5Hash(normalizedHash)) { + return verifyMd5Password(password, normalizedHash); } - return false; + if (parseCryptHash(normalizedHash)) { + return verifyCryptPassword(password, normalizedHash); + } + if (isUnsupportedPlainFallbackHash(normalizedHash)) { + return false; + } + return verifyPlainPassword(password, normalizedHash); } -function isLegacyShaHash(hash: string): boolean { - return parseShaHash(hash) !== undefined; +function isLegacyHash(hash: string): boolean { + return getLegacyHashFormat(hash) !== undefined; } /** * Basic authentication backed by argon2id password hashes. - * Legacy SHA-1 {SHA} hashes are accepted with deprecation warnings. + * Legacy v1.3.9 hash formats are accepted with deprecation warnings. */ class Basic extends Authentication { /** @@ -174,27 +453,33 @@ class Basic extends Authentication { user: this.joi.string().required(), hash: this.joi .string() + .trim() .required() .custom((value: string, helpers: { error: (key: string) => unknown }) => { - if (parseArgon2Hash(value) || parseShaHash(value)) { - return value; + const normalizedHash = normalizeHash(value); + if (looksLikeArgon2Hash(normalizedHash) && !parseArgon2Hash(normalizedHash)) { + return helpers.error('any.invalid'); + } + if (isUnsupportedPlainFallbackHash(normalizedHash)) { + return helpers.error('any.invalid'); } - return helpers.error('any.invalid'); + return value; }, 'password hash validation') .messages({ 'any.invalid': - '"hash" must be an argon2id hash (argon2id$memory$passes$parallelism$salt$hash) or a legacy {SHA} hash', + '"hash" must be an argon2id hash ($argon2id$v=19$m=65536,t=3,p=4$salt$hash) or compatible Drydock format (argon2id$memory$passes$parallelism$salt$hash), or a supported legacy v1.3.9 hash', }), }); } /** - * Init authentication. Log deprecation warning if SHA hash detected. + * Init authentication. Log deprecation warning if legacy hash is detected. */ initAuthentication(): void { - if (isLegacyShaHash(this.configuration.hash)) { + const format = getLegacyHashFormat(this.configuration.hash); + if (format) { this.log.warn( - 'SHA-1 password hash detected — SHA-1 is deprecated and will be removed in v1.6.0. Migrate to argon2id hashing.', + `Legacy password hash format detected (${format}) — v1.3.9 formats (SHA, APR1/MD5, crypt, plain) are deprecated and will be removed in v1.6.0. Migrate to argon2id hashing.`, ); } } @@ -226,7 +511,7 @@ class Basic extends Authentication { getMetadata(): Record { return { - usesLegacyHash: isLegacyShaHash(this.configuration.hash), + usesLegacyHash: isLegacyHash(this.configuration.hash), }; } diff --git a/app/event/audit-subscriptions.test.ts b/app/event/audit-subscriptions.test.ts index 612cf49a..6f7f443a 100644 --- a/app/event/audit-subscriptions.test.ts +++ b/app/event/audit-subscriptions.test.ts @@ -31,10 +31,12 @@ type OrderedEventHandlerFn = (payload: TPayload) => void | Promise; agentDisconnectedHandler: OrderedEventHandlerFn; + containerUpdatedHandler: (payload: ContainerLifecycleEventPayload) => void; } { const handlers: { securityAlert?: OrderedEventHandlerFn; agentDisconnected?: OrderedEventHandlerFn; + containerUpdated?: (payload: ContainerLifecycleEventPayload) => void; } = {}; const registerOrdered = @@ -61,18 +63,22 @@ function setupAuditSubscriptions(): { handlers.agentDisconnected = handler; }), registerContainerAdded: registerEvent(() => {}), + registerContainerUpdated: registerEvent((handler) => { + handlers.containerUpdated = handler; + }), registerContainerRemoved: registerEvent(() => {}), }; registerAuditLogSubscriptions(registrars); - if (!handlers.securityAlert || !handlers.agentDisconnected) { + if (!handlers.securityAlert || !handlers.agentDisconnected || !handlers.containerUpdated) { throw new Error('Expected audit handlers to be registered'); } return { securityAlertHandler: handlers.securityAlert, agentDisconnectedHandler: handlers.agentDisconnected, + containerUpdatedHandler: handlers.containerUpdated, }; } @@ -153,4 +159,22 @@ describe('audit-subscriptions dedupe windows', () => { expect(mockInsertAudit).toHaveBeenCalledTimes(2); expect(mockInc).toHaveBeenCalledTimes(2); }); + + test('records container update audit with empty containerName fallback when name and id are missing', () => { + const { containerUpdatedHandler } = setupAuditSubscriptions(); + + containerUpdatedHandler({ + image: { name: 'nginx' }, + status: 'running', + } as unknown as ContainerLifecycleEventPayload); + + expect(mockInsertAudit).toHaveBeenCalledWith( + expect.objectContaining({ + action: 'container-update', + containerName: '', + details: 'status: running', + }), + ); + expect(mockInc).toHaveBeenCalledWith({ action: 'container-update' }); + }); }); diff --git a/app/event/audit-subscriptions.ts b/app/event/audit-subscriptions.ts index 89e8f880..a8e684c7 100644 --- a/app/event/audit-subscriptions.ts +++ b/app/event/audit-subscriptions.ts @@ -31,6 +31,7 @@ export interface AuditSubscriptionRegistrars { registerSecurityAlert: OrderedEventRegistrarFn; registerAgentDisconnected: OrderedEventRegistrarFn; registerContainerAdded: EventRegistrarFn; + registerContainerUpdated: EventRegistrarFn; registerContainerRemoved: EventRegistrarFn; } @@ -165,6 +166,19 @@ export function registerAuditLogSubscriptions(registrars: AuditSubscriptionRegis getAuditCounter()?.inc({ action: 'container-added' }); }); + registrars.registerContainerUpdated((containerUpdated) => { + auditStore.insertAudit({ + id: '', + timestamp: new Date().toISOString(), + action: 'container-update', + containerName: containerUpdated.name || containerUpdated.id || '', + containerImage: containerUpdated.image?.name, + status: 'info', + details: containerUpdated.status ? `status: ${containerUpdated.status}` : undefined, + }); + getAuditCounter()?.inc({ action: 'container-update' }); + }); + registrars.registerContainerRemoved((containerRemoved) => { auditStore.insertAudit({ id: '', diff --git a/app/event/index.audit.test.ts b/app/event/index.audit.test.ts index ca113b18..329fc6f6 100644 --- a/app/event/index.audit.test.ts +++ b/app/event/index.audit.test.ts @@ -156,6 +156,41 @@ describe('event default audit listeners', () => { expect(mockInc).toHaveBeenCalledWith({ action: 'container-removed' }); }); + test('should record container-update audit with status details', async () => { + const event = await loadEventModule(); + + event.emitContainerUpdated({ + name: 'nginx', + status: 'running', + image: { name: 'library/nginx' }, + }); + + expect(mockInsertAudit).toHaveBeenCalledWith( + expect.objectContaining({ + action: 'container-update', + containerName: 'nginx', + containerImage: 'library/nginx', + status: 'info', + details: 'status: running', + }), + ); + expect(mockInc).toHaveBeenCalledWith({ action: 'container-update' }); + }); + + test('should record container-update audit with id fallback and no status', async () => { + const event = await loadEventModule(); + + event.emitContainerUpdated({ id: 'abc123' }); + + expect(mockInsertAudit).toHaveBeenCalledWith( + expect.objectContaining({ + action: 'container-update', + containerName: 'abc123', + details: undefined, + }), + ); + }); + test('should record security-alert audits', async () => { const event = await loadEventModule(); diff --git a/app/event/index.ts b/app/event/index.ts index 6c3e614a..9599b7ed 100644 --- a/app/event/index.ts +++ b/app/event/index.ts @@ -407,6 +407,7 @@ registerAuditLogSubscriptions({ registerSecurityAlert, registerAgentDisconnected, registerContainerAdded, + registerContainerUpdated, registerContainerRemoved, }); diff --git a/app/log/index.debug-level.test.ts b/app/log/index.debug-level.test.ts new file mode 100644 index 00000000..3ef174fd --- /dev/null +++ b/app/log/index.debug-level.test.ts @@ -0,0 +1,51 @@ +const { mockAddEntry } = vi.hoisted(() => ({ + mockAddEntry: vi.fn(), +})); + +vi.mock('../configuration', () => ({ + getLogLevel: vi.fn(() => 'debug'), + getLogFormat: vi.fn(() => 'json'), + getLogBufferEnabled: vi.fn(() => true), +})); + +vi.mock('./buffer.js', () => ({ + addEntry: mockAddEntry, +})); + +vi.mock('./warn.js', () => ({ + setWarnLogger: vi.fn(), +})); + +describe('Logger with debug level', () => { + test('should propagate debug level to multistream destinations', async () => { + const log = (await import('./index.js')).default; + + expect(log.level).toBe('debug'); + + log.debug({ component: 'test' }, 'debug-level-message'); + + await vi.waitFor(() => { + expect(mockAddEntry).toHaveBeenCalledWith( + expect.objectContaining({ + level: 'debug', + msg: 'debug-level-message', + }), + ); + }); + }); + + test('should deliver info messages when level is debug', async () => { + const log = (await import('./index.js')).default; + + log.info({ component: 'test' }, 'info-level-message'); + + await vi.waitFor(() => { + expect(mockAddEntry).toHaveBeenCalledWith( + expect.objectContaining({ + level: 'info', + msg: 'info-level-message', + }), + ); + }); + }); +}); diff --git a/app/log/index.ts b/app/log/index.ts index 0b9686f6..3d623e41 100644 --- a/app/log/index.ts +++ b/app/log/index.ts @@ -37,9 +37,10 @@ function createMainLogStream() { } function createLogStreams() { - const streams: { stream: Writable }[] = [{ stream: createMainLogStream() }]; + const level = getLogLevel(); + const streams: { stream: Writable; level: string }[] = [{ stream: createMainLogStream(), level }]; if (getLogBufferEnabled()) { - streams.push({ stream: bufferStream }); + streams.push({ stream: bufferStream, level }); } return streams; } diff --git a/app/package-lock.json b/app/package-lock.json index f54f4485..e373c2af 100644 --- a/app/package-lock.json +++ b/app/package-lock.json @@ -13,6 +13,7 @@ "@slack/web-api": "^7.14.1", "ajv": "^8.18.0", "ajv-formats": "^3.0.1", + "apache-md5": "1.1.8", "axios": "^1.13.6", "capitalize": "2.0.4", "change-case": "^5.4.4", @@ -52,6 +53,7 @@ "set-value": "4.1.0", "sort-es": "1.7.18", "undici": "^7.22.0", + "unix-crypt-td-js": "1.1.4", "uuid": "^13.0.0", "yaml": "2.8.2" }, @@ -3416,6 +3418,15 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/apache-md5": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/apache-md5/-/apache-md5-1.1.8.tgz", + "integrity": "sha512-FCAJojipPn0bXjuEpjOOOMN8FZDkxfWWp4JGN9mifU2IhxvKyXZYqpzPHdnTSUpmPDy+tsslB6Z1g+Vg6nVbYA==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/arg": { "version": "4.1.3", "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", @@ -7453,6 +7464,12 @@ "integrity": "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==", "license": "MIT" }, + "node_modules/unix-crypt-td-js": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/unix-crypt-td-js/-/unix-crypt-td-js-1.1.4.tgz", + "integrity": "sha512-8rMeVYWSIyccIJscb9NdCfZKSRBKYTeVnwmiRYT2ulE3qd1RaDQ0xQDP+rI3ccIWbhu/zuo5cgN8z73belNZgw==", + "license": "BSD-3-Clause" + }, "node_modules/unpipe": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", diff --git a/app/package.json b/app/package.json index c54c5d8f..99a3f2c7 100644 --- a/app/package.json +++ b/app/package.json @@ -24,6 +24,7 @@ "@slack/web-api": "^7.14.1", "ajv": "^8.18.0", "ajv-formats": "^3.0.1", + "apache-md5": "1.1.8", "axios": "^1.13.6", "capitalize": "2.0.4", "change-case": "^5.4.4", @@ -63,6 +64,7 @@ "set-value": "4.1.0", "sort-es": "1.7.18", "undici": "^7.22.0", + "unix-crypt-td-js": "1.1.4", "uuid": "^13.0.0", "yaml": "2.8.2" }, diff --git a/app/registries/BaseRegistry.ts b/app/registries/BaseRegistry.ts index fd00601b..7d2abf77 100644 --- a/app/registries/BaseRegistry.ts +++ b/app/registries/BaseRegistry.ts @@ -76,7 +76,8 @@ class BaseRegistry extends Registry { private getHttpsAgent() { const shouldDisableTlsVerification = this.configuration?.insecure === true; const hasCaFile = Boolean(this.configuration?.cafile); - if (!shouldDisableTlsVerification && !hasCaFile) { + const hasMutualTls = Boolean(this.configuration?.clientcert); + if (!shouldDisableTlsVerification && !hasCaFile && !hasMutualTls) { return undefined; } @@ -92,10 +93,25 @@ class BaseRegistry extends Registry { ca = fs.readFileSync(caPath); } + let cert; + let key; + if (hasMutualTls) { + const certPath = resolveConfiguredPath(this.configuration.clientcert, { + label: `registry ${this.getId()} client certificate file path`, + }); + cert = fs.readFileSync(certPath); + const keyPath = resolveConfiguredPath(this.configuration.clientkey, { + label: `registry ${this.getId()} client key file path`, + }); + key = fs.readFileSync(keyPath); + } + // Intentional opt-in for self-hosted registries with private/self-signed cert chains. // lgtm[js/disabling-certificate-validation] this.httpsAgent = new https.Agent({ ca, + cert, + key, rejectUnauthorized: !shouldDisableTlsVerification, }); return this.httpsAgent; diff --git a/app/registries/providers/custom/Custom.test.ts b/app/registries/providers/custom/Custom.test.ts index a31deedc..ab84d041 100644 --- a/app/registries/providers/custom/Custom.test.ts +++ b/app/registries/providers/custom/Custom.test.ts @@ -43,6 +43,38 @@ test('validatedConfiguration should accept cafile and insecure tls options', asy }); }); +test('validatedConfiguration should accept mTLS client certificate options', async () => { + expect( + custom.validateConfiguration({ + url: 'http://localhost:5000', + clientcert: '/certs/client.pem', + clientkey: '/certs/client-key.pem', + }), + ).toStrictEqual({ + url: 'http://localhost:5000', + clientcert: '/certs/client.pem', + clientkey: '/certs/client-key.pem', + }); +}); + +test('validatedConfiguration should reject clientcert without clientkey', async () => { + expect(() => + custom.validateConfiguration({ + url: 'http://localhost:5000', + clientcert: '/certs/client.pem', + }), + ).toThrow(); +}); + +test('validatedConfiguration should reject clientkey without clientcert', async () => { + expect(() => + custom.validateConfiguration({ + url: 'http://localhost:5000', + clientkey: '/certs/client-key.pem', + }), + ).toThrow(); +}); + test('validatedConfiguration should throw error when auth is not base64', async () => { expect(() => { custom.validateConfiguration({ diff --git a/app/registries/providers/custom/Custom.ts b/app/registries/providers/custom/Custom.ts index 68f1cdda..a709b3e7 100644 --- a/app/registries/providers/custom/Custom.ts +++ b/app/registries/providers/custom/Custom.ts @@ -18,10 +18,13 @@ class Custom extends BaseRegistry { auth: authSchema, cafile: this.joi.string(), insecure: this.joi.boolean(), + clientcert: this.joi.string(), + clientkey: this.joi.string(), }) .and('login', 'password') .without('login', 'auth') - .without('password', 'auth'); + .without('password', 'auth') + .and('clientcert', 'clientkey'); return this.joi.alternatives([this.joi.string().allow(''), customConfigSchema]); } diff --git a/app/registries/providers/shared/SelfHostedBasic.test.ts b/app/registries/providers/shared/SelfHostedBasic.test.ts index 7fb28758..64b390d2 100644 --- a/app/registries/providers/shared/SelfHostedBasic.test.ts +++ b/app/registries/providers/shared/SelfHostedBasic.test.ts @@ -185,6 +185,41 @@ test('validateConfiguration should allow cafile and insecure options', async () }); }); +test('validateConfiguration should allow mTLS client certificate options', async () => { + const registry = new SelfHostedBasic(); + expect( + registry.validateConfiguration({ + url: 'https://registry.acme.com', + clientcert: '/certs/client.pem', + clientkey: '/certs/client-key.pem', + }), + ).toStrictEqual({ + url: 'https://registry.acme.com', + clientcert: '/certs/client.pem', + clientkey: '/certs/client-key.pem', + }); +}); + +test('validateConfiguration should reject clientcert without clientkey', async () => { + const registry = new SelfHostedBasic(); + expect(() => + registry.validateConfiguration({ + url: 'https://registry.acme.com', + clientcert: '/certs/client.pem', + }), + ).toThrow(); +}); + +test('validateConfiguration should reject clientkey without clientcert', async () => { + const registry = new SelfHostedBasic(); + expect(() => + registry.validateConfiguration({ + url: 'https://registry.acme.com', + clientkey: '/certs/client-key.pem', + }), + ).toThrow(); +}); + test('authenticate should set httpsAgent with rejectUnauthorized=false when insecure=true', async () => { const registry = new SelfHostedBasic(); registry.configuration = { @@ -231,3 +266,70 @@ test('authenticate should load CA file into httpsAgent when cafile is configured fs.rmSync(tempDir, { recursive: true, force: true }); } }); + +test('authenticate should load client cert and key into httpsAgent for mTLS', async () => { + const registry = new SelfHostedBasic(); + const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'drydock-selfhosted-')); + const certPath = path.join(tempDir, 'client.pem'); + const keyPath = path.join(tempDir, 'client-key.pem'); + + try { + fs.writeFileSync(certPath, 'test-client-cert'); + fs.writeFileSync(keyPath, 'test-client-key'); + registry.configuration = { + url: 'https://registry.acme.com', + clientcert: certPath, + clientkey: keyPath, + }; + + const result = await registry.authenticate( + { + name: 'library/nginx', + registry: { url: 'registry.acme.com' }, + }, + { headers: {} }, + ); + + expect(result.httpsAgent).toBeDefined(); + expect(result.httpsAgent.options.rejectUnauthorized).toBe(true); + expect(result.httpsAgent.options.cert.toString('utf-8')).toBe('test-client-cert'); + expect(result.httpsAgent.options.key.toString('utf-8')).toBe('test-client-key'); + } finally { + fs.rmSync(tempDir, { recursive: true, force: true }); + } +}); + +test('authenticate should combine CA file and mTLS client cert in httpsAgent', async () => { + const registry = new SelfHostedBasic(); + const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'drydock-selfhosted-')); + const caPath = path.join(tempDir, 'ca.pem'); + const certPath = path.join(tempDir, 'client.pem'); + const keyPath = path.join(tempDir, 'client-key.pem'); + + try { + fs.writeFileSync(caPath, 'test-ca-content'); + fs.writeFileSync(certPath, 'test-client-cert'); + fs.writeFileSync(keyPath, 'test-client-key'); + registry.configuration = { + url: 'https://registry.acme.com', + cafile: caPath, + clientcert: certPath, + clientkey: keyPath, + }; + + const result = await registry.authenticate( + { + name: 'library/nginx', + registry: { url: 'registry.acme.com' }, + }, + { headers: {} }, + ); + + expect(result.httpsAgent).toBeDefined(); + expect(result.httpsAgent.options.ca.toString('utf-8')).toBe('test-ca-content'); + expect(result.httpsAgent.options.cert.toString('utf-8')).toBe('test-client-cert'); + expect(result.httpsAgent.options.key.toString('utf-8')).toBe('test-client-key'); + } finally { + fs.rmSync(tempDir, { recursive: true, force: true }); + } +}); diff --git a/app/registries/providers/shared/SelfHostedBasic.ts b/app/registries/providers/shared/SelfHostedBasic.ts index 71e4e03b..e110794c 100644 --- a/app/registries/providers/shared/SelfHostedBasic.ts +++ b/app/registries/providers/shared/SelfHostedBasic.ts @@ -18,10 +18,13 @@ class SelfHostedBasic extends BaseRegistry { auth: authSchema, cafile: this.joi.string(), insecure: this.joi.boolean(), + clientcert: this.joi.string(), + clientkey: this.joi.string(), }) .and('login', 'password') .without('login', 'auth') - .without('password', 'auth'); + .without('password', 'auth') + .and('clientcert', 'clientkey'); } maskConfiguration() { diff --git a/app/triggers/providers/docker/Docker.test.ts b/app/triggers/providers/docker/Docker.test.ts index 38a56468..b531faf5 100644 --- a/app/triggers/providers/docker/Docker.test.ts +++ b/app/triggers/providers/docker/Docker.test.ts @@ -2237,6 +2237,36 @@ describe('additional docker trigger coverage', () => { ); }); + test('cleanupOldImages should warn when digest image removal fails', async () => { + docker.configuration.prune = true; + vi.spyOn(docker, 'removeImage').mockRejectedValue(new Error('remove failed')); + const registryProvider = { + getImageFullName: vi.fn(() => 'my-registry/test/test:sha256:old'), + }; + const logContainer = createMockLog('warn'); + + await docker.cleanupOldImages( + {}, + registryProvider, + { + image: { + registry: { name: 'hub', url: 'my-registry' }, + name: 'test/test', + tag: { value: '1.0.0' }, + digest: { repo: 'sha256:old' }, + }, + updateKind: { + kind: 'digest', + }, + }, + logContainer, + ); + + expect(logContainer.warn).toHaveBeenCalledWith( + expect.stringContaining('Unable to remove previous digest image'), + ); + }); + test('cleanupOldImages should skip digest pruning when digest repo is missing', async () => { docker.configuration.prune = true; const removeImageSpy = vi.spyOn(docker, 'removeImage').mockResolvedValue(undefined); diff --git a/app/triggers/providers/docker/Docker.ts b/app/triggers/providers/docker/Docker.ts index 6ab46d55..ff9eda05 100644 --- a/app/triggers/providers/docker/Docker.ts +++ b/app/triggers/providers/docker/Docker.ts @@ -807,7 +807,7 @@ class Docker extends Trigger { const oldImage = registry.getImageFullName(container.image, container.image.digest.repo); await this.removeImage(dockerApi, oldImage, logContainer); } catch (e) { - logContainer.debug(`Unable to remove previous digest image (${e.message})`); + logContainer.warn(`Unable to remove previous digest image (${e.message})`); } } } diff --git a/app/triggers/providers/dockercompose/Dockercompose.test.ts b/app/triggers/providers/dockercompose/Dockercompose.test.ts index 4ccf34eb..e390be81 100644 --- a/app/triggers/providers/dockercompose/Dockercompose.test.ts +++ b/app/triggers/providers/dockercompose/Dockercompose.test.ts @@ -8,6 +8,7 @@ import { getState } from '../../../registry/index.js'; import * as backupStore from '../../../store/backup.js'; import { sleep } from '../../../util/sleep.js'; import Dockercompose, { + testable_hasExplicitRegistryHost, testable_normalizeImplicitLatest, testable_normalizePostStartEnvironmentValue, testable_normalizePostStartHooks, @@ -695,6 +696,28 @@ describe('Dockercompose Trigger', () => { ); }); + test('processComposeFile should report when all mapped containers are already up to date', async () => { + trigger.configuration.dryrun = false; + const container = makeContainer({ + tagValue: '1.0.0', + remoteValue: '1.0.0', + updateAvailable: false, + }); + + vi.spyOn(trigger, 'getComposeFileAsObject').mockResolvedValue( + makeCompose({ nginx: { image: 'nginx:1.0.0' } }), + ); + + const { writeComposeFileSpy, composeUpdateSpy } = spyOnProcessComposeHelpers(trigger); + + const updated = await trigger.processComposeFile('/opt/drydock/test/stack.yml', [container]); + + expect(updated).toBe(false); + expect(mockLog.info).toHaveBeenCalledWith(expect.stringContaining('already up to date')); + expect(writeComposeFileSpy).not.toHaveBeenCalled(); + expect(composeUpdateSpy).not.toHaveBeenCalled(); + }); + test('processComposeFile should warn when no containers belong to compose', async () => { const container = makeContainer({ name: 'unknown', @@ -708,6 +731,7 @@ describe('Dockercompose Trigger', () => { await trigger.processComposeFile('/opt/drydock/test/stack.yml', [container]); expect(mockLog.warn).toHaveBeenCalledWith(expect.stringContaining('No containers found')); + expect(mockLog.warn).toHaveBeenCalledWith(expect.stringContaining('not found in compose file')); }); test('processComposeFile should warn and continue on compose/runtime reconciliation mismatch by default', async () => { @@ -1911,6 +1935,39 @@ describe('Dockercompose Trigger', () => { ).not.toThrow(); }); + test('resolveComposeFilePathFromDirectory should return original path when target is a file', async () => { + fs.stat.mockResolvedValueOnce({ + isDirectory: () => false, + mtimeMs: 1_700_000_000_000, + } as any); + + const resolved = await trigger.resolveComposeFilePathFromDirectory( + '/opt/drydock/test/stack.yml', + ); + + expect(resolved).toBe('/opt/drydock/test/stack.yml'); + }); + + test('resolveComposeFilePathFromDirectory should warn and return null when directory has no compose candidates', async () => { + fs.stat.mockResolvedValueOnce({ + isDirectory: () => true, + mtimeMs: 1_700_000_000_000, + } as any); + const missingComposeFileError = Object.assign(new Error('ENOENT'), { code: 'ENOENT' }); + fs.access + .mockRejectedValueOnce(missingComposeFileError) + .mockRejectedValueOnce(missingComposeFileError) + .mockRejectedValueOnce(missingComposeFileError) + .mockRejectedValueOnce(missingComposeFileError); + + const resolved = await trigger.resolveComposeFilePathFromDirectory('/opt/drydock/test/stack'); + + expect(resolved).toBeNull(); + expect(mockLog.warn).toHaveBeenCalledWith( + expect.stringContaining('does not contain a compose file candidate'), + ); + }); + test('resolveComposeServiceContext should throw when no compose file is configured', async () => { trigger.configuration.file = undefined; @@ -2675,6 +2732,34 @@ describe('Dockercompose Trigger', () => { expect(writeSpy).not.toHaveBeenCalled(); }); + test('mutateComposeFile should forward a pre-parsed compose object to validation', async () => { + vi.spyOn(trigger, 'getComposeFile').mockResolvedValue( + Buffer.from('services:\n nginx:\n image: nginx:1.0.0\n'), + ); + const validateSpy = vi + .spyOn(trigger, 'validateComposeConfiguration') + .mockResolvedValue(undefined); + vi.spyOn(trigger, 'writeComposeFile').mockResolvedValue(); + const parsedComposeFileObject = makeCompose({ nginx: { image: 'nginx:1.1.0' } }); + + const changed = await trigger.mutateComposeFile( + '/opt/drydock/test/compose.yml', + (text) => text.replace('nginx:1.0.0', 'nginx:1.1.0'), + { + parsedComposeFileObject, + }, + ); + + expect(changed).toBe(true); + expect(validateSpy).toHaveBeenCalledWith( + '/opt/drydock/test/compose.yml', + expect.stringContaining('nginx:1.1.0'), + { + parsedComposeFileObject, + }, + ); + }); + test('validateComposeConfiguration should validate compose text in-process without shell commands', async () => { await trigger.validateComposeConfiguration( '/opt/drydock/test/compose.yml', @@ -2698,6 +2783,25 @@ describe('Dockercompose Trigger', () => { expect(getComposeFileAsObjectSpy).toHaveBeenCalledWith('/opt/drydock/test/stack.yml'); }); + test('validateComposeConfiguration should reuse a pre-parsed compose object when provided', async () => { + const parseSpy = vi.spyOn(yaml, 'parse'); + const getComposeFileAsObjectSpy = vi + .spyOn(trigger, 'getComposeFileAsObject') + .mockResolvedValue(makeCompose({ base: { image: 'busybox:1.0.0' } })); + + await trigger.validateComposeConfiguration( + '/opt/drydock/test/stack.override.yml', + 'services:\n nginx:\n image: nginx:1.1.0\n', + { + composeFiles: ['/opt/drydock/test/stack.yml', '/opt/drydock/test/stack.override.yml'], + parsedComposeFileObject: makeCompose({ nginx: { image: 'nginx:1.1.0' } }), + }, + ); + + expect(parseSpy).not.toHaveBeenCalled(); + expect(getComposeFileAsObjectSpy).toHaveBeenCalledWith('/opt/drydock/test/stack.yml'); + }); + test('updateComposeServiceImageInText should throw when compose document has parse errors', () => { expect(() => testable_updateComposeServiceImageInText('services:\n nginx: [\n', 'nginx', 'nginx:2.0.0'), @@ -2997,6 +3101,35 @@ describe('Dockercompose Trigger', () => { expect(mockLog.warn).toHaveBeenCalledWith(expect.stringContaining('permission denied')); }); + test('triggerBatch should warn when container compose file does not match configured file', async () => { + trigger.configuration.file = '/opt/drydock/configured.yml'; + fs.access.mockResolvedValue(undefined); + + const container = { + name: 'mismatched', + watcher: 'local', + labels: { 'dd.compose.file': '/opt/drydock/other.yml' }, + }; + + await trigger.triggerBatch([container]); + + expect(mockLog.warn).toHaveBeenCalledWith( + expect.stringContaining('do not match configured file'), + ); + }); + + test('triggerBatch should warn when no containers matched any compose file', async () => { + trigger.configuration.file = undefined; + + const container = { name: 'orphan', watcher: 'local' }; + + await trigger.triggerBatch([container]); + + expect(mockLog.warn).toHaveBeenCalledWith( + 'No containers matched any compose file for this trigger', + ); + }); + test('triggerBatch should group containers by compose file and process each', async () => { trigger.configuration.file = undefined; fs.access.mockResolvedValue(undefined); @@ -3047,6 +3180,35 @@ describe('Dockercompose Trigger', () => { ]); }); + test('triggerBatch should only access each compose file once across containers sharing the same compose chain', async () => { + trigger.configuration.file = undefined; + fs.access.mockResolvedValue(undefined); + + const sharedComposeLabels = { + 'com.docker.compose.project.config_files': + '/opt/drydock/test/stack.yml,/opt/drydock/test/stack.override.yml', + }; + const container1 = { + name: 'app1', + watcher: 'local', + labels: sharedComposeLabels, + }; + const container2 = { + name: 'app2', + watcher: 'local', + labels: sharedComposeLabels, + }; + + const processComposeFileSpy = vi.spyOn(trigger, 'processComposeFile').mockResolvedValue(); + + await trigger.triggerBatch([container1, container2]); + + expect(processComposeFileSpy).toHaveBeenCalledTimes(1); + expect(fs.access).toHaveBeenCalledTimes(2); + expect(fs.access).toHaveBeenCalledWith('/opt/drydock/test/stack.yml'); + expect(fs.access).toHaveBeenCalledWith('/opt/drydock/test/stack.override.yml'); + }); + test('triggerBatch should only process containers matching configured compose file affinity', async () => { trigger.configuration.file = '/opt/drydock/test/monitoring.yml'; fs.access.mockImplementation(async (composeFilePath) => { @@ -3077,8 +3239,43 @@ describe('Dockercompose Trigger', () => { expect(processComposeFileSpy).toHaveBeenCalledWith('/opt/drydock/test/monitoring.yml', [ monitoringContainer, ]); + expect(mockLog.warn).toHaveBeenCalledWith( + expect.stringContaining('do not match configured file'), + ); + }); + + test('triggerBatch should resolve a configured compose directory to compose.yaml for affinity matching', async () => { + trigger.configuration.file = '/opt/drydock/stacks/filebrowser'; + fs.stat.mockImplementation(async (candidatePath: string) => { + if (candidatePath === '/opt/drydock/stacks/filebrowser') { + return { + isDirectory: () => true, + mtimeMs: 1_700_000_000_000, + } as any; + } + return { + isDirectory: () => false, + mtimeMs: 1_700_000_000_000, + } as any; + }); + fs.access.mockResolvedValue(undefined); + + const container = { + name: 'filebrowser', + watcher: 'local', + labels: { 'dd.compose.file': '/opt/drydock/stacks/filebrowser/compose.yaml' }, + }; + const processComposeFileSpy = vi.spyOn(trigger, 'processComposeFile').mockResolvedValue(true); + + await trigger.triggerBatch([container]); + + expect(processComposeFileSpy).toHaveBeenCalledTimes(1); + expect(processComposeFileSpy).toHaveBeenCalledWith( + '/opt/drydock/stacks/filebrowser/compose.yaml', + [container], + ); expect(mockLog.warn).not.toHaveBeenCalledWith( - expect.stringContaining('/opt/drydock/test/mysql.yml'), + expect.stringContaining('do not match configured file'), ); }); @@ -3255,13 +3452,33 @@ describe('Dockercompose Trigger', () => { test('trigger should delegate to triggerBatch with single container', async () => { const container = { name: 'test' }; - const spy = vi.spyOn(trigger, 'triggerBatch').mockResolvedValue(); + const spy = vi.spyOn(trigger, 'triggerBatch').mockResolvedValue([true]); await trigger.trigger(container); expect(spy).toHaveBeenCalledWith([container]); }); + test('trigger should throw when update is still available but compose trigger applies no runtime updates', async () => { + trigger.configuration.dryrun = false; + const container = { name: 'test', updateAvailable: true }; + vi.spyOn(trigger, 'triggerBatch').mockResolvedValue([false]); + + await expect(trigger.trigger(container)).rejects.toThrow( + 'No compose updates were applied for container test', + ); + }); + + test('trigger should use unknown fallback when throwing without a container name', async () => { + trigger.configuration.dryrun = false; + const container = { updateAvailable: true }; + vi.spyOn(trigger, 'triggerBatch').mockResolvedValue([false]); + + await expect(trigger.trigger(container as any)).rejects.toThrow( + 'No compose updates were applied for container unknown', + ); + }); + test('getConfigurationSchema should extend Docker schema with compose hardening options', () => { const schema = trigger.getConfigurationSchema(); expect(schema).toBeDefined(); @@ -3279,6 +3496,26 @@ describe('Dockercompose Trigger', () => { expect(error).toBeUndefined(); }); + test('getConfigurationSchema should accept env-normalized compose hardening keys', () => { + const schema = trigger.getConfigurationSchema(); + const { error, value } = schema.validate({ + prune: false, + dryrun: false, + autoremovetimeout: 10000, + file: '/opt/drydock/test/compose.yml', + backup: true, + composefilelabel: 'com.example.compose.file', + reconciliationmode: 'block', + digestpinning: true, + composefileonce: true, + }); + expect(error).toBeUndefined(); + expect(value.composeFileLabel).toBe('com.example.compose.file'); + expect(value.reconciliationMode).toBe('block'); + expect(value.digestPinning).toBe(true); + expect(value.composeFileOnce).toBe(true); + }); + test('normalizeImplicitLatest should return input when image is empty or already digest/tag qualified', () => { expect(testable_normalizeImplicitLatest('')).toBe(''); expect(testable_normalizeImplicitLatest('alpine@sha256:abc')).toBe('alpine@sha256:abc'); @@ -3289,6 +3526,12 @@ describe('Dockercompose Trigger', () => { expect(testable_normalizeImplicitLatest('repo/')).toBe('repo/:latest'); }); + test('hasExplicitRegistryHost should detect empty, host:port, and localhost prefixes', () => { + expect(testable_hasExplicitRegistryHost('')).toBe(false); + expect(testable_hasExplicitRegistryHost('registry.example.com:5000/nginx:1.1.0')).toBe(true); + expect(testable_hasExplicitRegistryHost('localhost/nginx:1.1.0')).toBe(true); + }); + test('normalizePostStartHooks should return empty array when post_start is missing', () => { expect(testable_normalizePostStartHooks(undefined)).toEqual([]); }); @@ -3977,7 +4220,7 @@ describe('Dockercompose Trigger', () => { name: 'nginx', } as any), ).resolves.toEqual([]); - expect(mockLog.debug).toHaveBeenCalledWith( + expect(mockLog.warn).toHaveBeenCalledWith( expect.stringContaining('Unable to inspect compose labels'), ); }); @@ -4463,6 +4706,40 @@ describe('Dockercompose Trigger', () => { ).toBe('nginx:1.1.0'); }); + test('getComposeMutationImageReference should preserve explicit docker.io prefix from compose image', () => { + const container = makeContainer({ + updateKind: 'digest', + remoteValue: 'abc123', + result: {}, + }); + + trigger.configuration.digestPinning = false; + expect( + trigger.getComposeMutationImageReference( + container as any, + 'nginx:1.1.0', + 'docker.io/nginx:1.0.0', + ), + ).toBe('docker.io/nginx:1.1.0'); + + trigger.configuration.digestPinning = true; + expect( + trigger.getComposeMutationImageReference( + container as any, + 'nginx:1.1.0', + 'docker.io/nginx:1.0.0', + ), + ).toBe('docker.io/nginx@sha256:abc123'); + + expect( + trigger.getComposeMutationImageReference( + container as any, + 'ghcr.io/acme/nginx:1.1.0', + 'docker.io/nginx:1.0.0', + ), + ).toBe('ghcr.io/acme/nginx@sha256:abc123'); + }); + test('buildComposeServiceImageUpdates should use runtime update image when compose update override is missing', () => { const serviceUpdates = trigger.buildComposeServiceImageUpdates([ { @@ -4474,6 +4751,30 @@ describe('Dockercompose Trigger', () => { expect(serviceUpdates.get('nginx')).toBe('nginx:1.1.0'); }); + test('buildUpdatedComposeFileObjectForValidation should return undefined for non-object input', () => { + const updated = trigger.buildUpdatedComposeFileObjectForValidation(null, new Map()); + + expect(updated).toBeUndefined(); + }); + + test('buildUpdatedComposeFileObjectForValidation should normalize non-object service sections and entries', () => { + const updatedFromInvalidServices = trigger.buildUpdatedComposeFileObjectForValidation( + { version: '3.9', services: 'invalid' }, + new Map([['nginx', 'nginx:1.1.0']]), + ) as any; + const updatedFromScalarService = trigger.buildUpdatedComposeFileObjectForValidation( + { services: { nginx: 'legacy' } }, + new Map([['nginx', 'nginx:1.1.0']]), + ) as any; + + expect(updatedFromInvalidServices.services).toEqual({ + nginx: { image: 'nginx:1.1.0' }, + }); + expect(updatedFromScalarService.services.nginx).toEqual({ + image: 'nginx:1.1.0', + }); + }); + test('reconcileComposeMappings should no-op when reconciliation mode is off', () => { trigger.configuration.reconciliationMode = 'off'; @@ -4630,6 +4931,40 @@ describe('Dockercompose Trigger', () => { ); }); + test('buildPerformContainerUpdateOptions should compose options without duplicate spread logic', () => { + const runtimeContext = { + dockerApi: mockDockerApi, + auth: { from: 'context' }, + newImage: 'nginx:9.9.9', + registry: getState().registry.hub, + }; + + const options = (trigger as any).buildPerformContainerUpdateOptions( + { + composeFiles: ['/opt/drydock/test/stack.yml', '/opt/drydock/test/stack.override.yml'], + skipPull: true, + }, + runtimeContext, + ); + + expect(options).toEqual({ + composeFiles: ['/opt/drydock/test/stack.yml', '/opt/drydock/test/stack.override.yml'], + skipPull: true, + runtimeContext, + }); + }); + + test('buildPerformContainerUpdateOptions should omit runtime context and compose chain when not needed', () => { + const options = (trigger as any).buildPerformContainerUpdateOptions( + { + composeFiles: ['/opt/drydock/test/stack.yml'], + }, + {}, + ); + + expect(options).toEqual({}); + }); + test('performContainerUpdate should pass compose chain to per-service update', async () => { trigger.configuration.dryrun = false; const container = makeContainer({ diff --git a/app/triggers/providers/dockercompose/Dockercompose.ts b/app/triggers/providers/dockercompose/Dockercompose.ts index e32ca2cb..3b47b2e0 100644 --- a/app/triggers/providers/dockercompose/Dockercompose.ts +++ b/app/triggers/providers/dockercompose/Dockercompose.ts @@ -3,6 +3,7 @@ import fs from 'node:fs/promises'; import path from 'node:path'; import yaml, { type Pair, type ParsedNode } from 'yaml'; import type { ContainerImage } from '../../../model/container.js'; +import type Registry from '../../../registries/Registry.js'; import { getState } from '../../../registry/index.js'; import { resolveConfiguredPath, resolveConfiguredPathWithinBase } from '../../../runtime/paths.js'; import { sleep } from '../../../util/sleep.js'; @@ -15,6 +16,12 @@ const COMPOSE_RENAME_RETRY_MS = 200; const COMPOSE_PROJECT_LABEL = 'com.docker.compose.project'; const COMPOSE_PROJECT_CONFIG_FILES_LABEL = 'com.docker.compose.project.config_files'; const COMPOSE_PROJECT_WORKING_DIR_LABEL = 'com.docker.compose.project.working_dir'; +const COMPOSE_DIRECTORY_FILE_CANDIDATES = [ + 'compose.yaml', + 'compose.yml', + 'docker-compose.yaml', + 'docker-compose.yml', +]; const COMPOSE_CACHE_MAX_ENTRIES = 256; const POST_START_ENVIRONMENT_KEY_PATTERN = /^[A-Za-z_][A-Za-z0-9_]*$/; const SELF_CONTAINER_IDENTIFIER_PATTERN = /^[a-zA-Z0-9][a-zA-Z0-9_.-]*$/; @@ -88,17 +95,42 @@ type RegistryImageContainerReference = { }; }; +type RegistryPullAuth = Awaited>; +type ComposeRuntimeContext = { + dockerApi?: unknown; + auth?: RegistryPullAuth; + newImage?: string; + registry?: unknown; +}; + +type ComposeUpdateLifecycleContext = { + composeFile: string; + service: string; + serviceDefinition?: unknown; + composeFiles?: string[]; + composeFileOnceApplied?: boolean; + skipPull?: boolean; + runtimeContext?: ComposeRuntimeContext; +}; + +type ComposeRuntimeUpdateMapping = { + service: string; + container: ComposeContainerReference & + RuntimeUpdateContainerReference & + RegistryImageContainerReference; +}; + type ComposeRuntimeRefreshOptions = { shouldStart?: boolean; skipPull?: boolean; forceRecreate?: boolean; composeFiles?: string[]; - runtimeContext?: { - dockerApi?: unknown; - auth?: unknown; - newImage?: string; - registry?: unknown; - }; + runtimeContext?: ComposeRuntimeContext; +}; + +type ValidateComposeConfigurationOptions = { + composeFiles?: string[]; + parsedComposeFileObject?: unknown; }; function getDockerApiFromWatcher(watcher: unknown): DockerApiLike | undefined { @@ -174,6 +206,35 @@ function normalizeImplicitLatest(image) { return `${image}:latest`; } +function hasExplicitRegistryHost(imageReference: string): boolean { + if (!imageReference) { + return false; + } + const referenceWithoutDigest = imageReference.split('@')[0]; + const firstSlashIndex = referenceWithoutDigest.indexOf('/'); + if (firstSlashIndex < 0) { + return false; + } + const firstSegment = referenceWithoutDigest.slice(0, firstSlashIndex); + return firstSegment.includes('.') || firstSegment.includes(':') || firstSegment === 'localhost'; +} + +function preserveExplicitDockerIoPrefix( + currentComposeImage: string | null | undefined, + targetImageReference: string, +): string { + if (!targetImageReference || typeof currentComposeImage !== 'string') { + return targetImageReference; + } + if (!/^docker\.io\//i.test(currentComposeImage.trim())) { + return targetImageReference; + } + if (hasExplicitRegistryHost(targetImageReference)) { + return targetImageReference; + } + return `docker.io/${targetImageReference}`; +} + function normalizePostStartHooks(postStart) { if (!postStart) { return []; @@ -441,16 +502,33 @@ class Dockercompose extends Docker { */ getConfigurationSchema() { const schemaDocker = super.getConfigurationSchema(); - return schemaDocker.append({ - // Make file optional since we now support per-container compose files - file: this.joi.string().optional(), - backup: this.joi.boolean().default(false), - // Add configuration for the label name to look for - composeFileLabel: this.joi.string().default('dd.compose.file'), - reconciliationMode: this.joi.string().valid('warn', 'block', 'off').default('warn'), - digestPinning: this.joi.boolean().default(false), - composeFileOnce: this.joi.boolean().default(false), - }); + return schemaDocker + .append({ + // Make file optional since we now support per-container compose files + file: this.joi.string().optional(), + backup: this.joi.boolean().default(false), + // Add configuration for the label name to look for + composeFileLabel: this.joi.string().default('dd.compose.file'), + reconciliationMode: this.joi.string().valid('warn', 'block', 'off').default('warn'), + digestPinning: this.joi.boolean().default(false), + composeFileOnce: this.joi.boolean().default(false), + }) + .rename('composefilelabel', 'composeFileLabel', { + ignoreUndefined: true, + override: true, + }) + .rename('reconciliationmode', 'reconciliationMode', { + ignoreUndefined: true, + override: true, + }) + .rename('digestpinning', 'digestPinning', { + ignoreUndefined: true, + override: true, + }) + .rename('composefileonce', 'composeFileOnce', { + ignoreUndefined: true, + override: true, + }); } async initTrigger() { @@ -793,7 +871,7 @@ class Dockercompose extends Docker { container.name, ); } catch (e) { - this.log.debug( + this.log.warn( `Unable to inspect compose labels for container ${container.name}; falling back to default compose file resolution (${e.message})`, ); return []; @@ -815,13 +893,49 @@ class Dockercompose extends Docker { return composeFilesFromInspect; } - const composeFileFromDefault = this.getDefaultComposeFilePath(); + const composeFileFromDefault = await this.resolveDefaultComposeFilePathForRuntime(); if (!composeFileFromDefault) { return []; } return [composeFileFromDefault]; } + async resolveComposeFilePathFromDirectory(composePath: string): Promise { + try { + const composePathStat = await fs.stat(composePath); + if (!composePathStat.isDirectory()) { + return composePath; + } + } catch { + // Keep existing behavior for missing/inaccessible files; downstream checks + // emit detailed does-not-exist/permission warnings. + return composePath; + } + + for (const composeFileCandidate of COMPOSE_DIRECTORY_FILE_CANDIDATES) { + const composeFileCandidatePath = path.join(composePath, composeFileCandidate); + try { + await fs.access(composeFileCandidatePath); + return composeFileCandidatePath; + } catch { + // try next candidate + } + } + + this.log.warn( + `Configured compose path ${composePath} is a directory and does not contain a compose file candidate (${COMPOSE_DIRECTORY_FILE_CANDIDATES.join(', ')})`, + ); + return null; + } + + async resolveDefaultComposeFilePathForRuntime(): Promise { + const composeFileFromDefault = this.getDefaultComposeFilePath(); + if (!composeFileFromDefault) { + return null; + } + return this.resolveComposeFilePathFromDirectory(composeFileFromDefault); + } + normalizeDigestPinningValue(value: unknown): string | null { if (!value || typeof value !== 'string') { return null; @@ -855,22 +969,22 @@ class Dockercompose extends Docker { getComposeMutationImageReference( container: RuntimeUpdateContainerReference, runtimeUpdateImage: string, + currentComposeImage?: string, ): string { - if (this.configuration.digestPinning !== true) { - return runtimeUpdateImage; - } - const digestPinningCandidate = - container?.result?.digest || - (container?.updateKind?.kind === 'digest' ? container?.updateKind?.remoteValue : undefined); - const digestToPin = this.normalizeDigestPinningValue(digestPinningCandidate); - if (!digestToPin) { - return runtimeUpdateImage; - } - const imageName = this.getImageNameFromReference(runtimeUpdateImage); - if (!imageName) { - return runtimeUpdateImage; + let composeMutationReference = runtimeUpdateImage; + if (this.configuration.digestPinning === true) { + const digestPinningCandidate = + container?.result?.digest || + (container?.updateKind?.kind === 'digest' ? container?.updateKind?.remoteValue : undefined); + const digestToPin = this.normalizeDigestPinningValue(digestPinningCandidate); + if (digestToPin) { + const imageName = this.getImageNameFromReference(runtimeUpdateImage); + if (imageName) { + composeMutationReference = `${imageName}@${digestToPin}`; + } + } } - return `${imageName}@${digestToPin}`; + return preserveExplicitDockerIoPrefix(currentComposeImage, composeMutationReference); } getContainerRuntimeImageReference(container: RegistryImageContainerReference): string { @@ -914,6 +1028,47 @@ class Dockercompose extends Docker { return serviceImageUpdates; } + buildUpdatedComposeFileObjectForValidation(composeFileObject, serviceImageUpdates) { + if ( + !composeFileObject || + typeof composeFileObject !== 'object' || + Array.isArray(composeFileObject) + ) { + return undefined; + } + + const composeFileRecord = composeFileObject as Record; + const existingServices = composeFileRecord.services; + const servicesRecord = + existingServices && typeof existingServices === 'object' && !Array.isArray(existingServices) + ? (existingServices as Record) + : {}; + const updatedServices = { ...servicesRecord }; + + for (const [serviceName, newImage] of serviceImageUpdates.entries()) { + const serviceDefinition = updatedServices[serviceName]; + if ( + serviceDefinition && + typeof serviceDefinition === 'object' && + !Array.isArray(serviceDefinition) + ) { + updatedServices[serviceName] = { + ...(serviceDefinition as Record), + image: newImage, + }; + continue; + } + updatedServices[serviceName] = { + image: newImage, + }; + } + + return { + ...composeFileRecord, + services: updatedServices, + }; + } + async getComposeFileChainAsObject(composeFiles, composeByFile = null) { const mergedCompose = { services: {}, @@ -1085,11 +1240,12 @@ class Dockercompose extends Docker { } } - async validateComposeConfiguration(composeFilePath, composeFileText, options = {}) { - const composeFileChain = this.normalizeComposeFileChain( - composeFilePath, - (options as { composeFiles?: string[] }).composeFiles, - ); + async validateComposeConfiguration( + composeFilePath, + composeFileText, + options: ValidateComposeConfigurationOptions = {}, + ) { + const composeFileChain = this.normalizeComposeFileChain(composeFilePath, options.composeFiles); const effectiveComposeFileChain = composeFileChain.includes(composeFilePath) ? composeFileChain : [...composeFileChain, composeFilePath]; @@ -1097,12 +1253,16 @@ class Dockercompose extends Docker { const composeByFile = new Map(); for (const composeFile of effectiveComposeFileChain) { if (composeFile === composeFilePath) { - composeByFile.set( - composeFile, - yaml.parse(composeFileText, { - maxAliasCount: YAML_MAX_ALIAS_COUNT, - }), - ); + if (options.parsedComposeFileObject !== undefined) { + composeByFile.set(composeFile, options.parsedComposeFileObject); + } else { + composeByFile.set( + composeFile, + yaml.parse(composeFileText, { + maxAliasCount: YAML_MAX_ALIAS_COUNT, + }), + ); + } continue; } composeByFile.set(composeFile, await this.getComposeFileAsObject(composeFile)); @@ -1115,14 +1275,15 @@ class Dockercompose extends Docker { } } - async mutateComposeFile(file, updateComposeText, options = {}) { + async mutateComposeFile( + file, + updateComposeText, + options: ValidateComposeConfigurationOptions = {}, + ) { return this.withComposeFileLock(file, async (filePath) => { const composeFileText = (await this.getComposeFile(filePath)).toString(); const composeFileStat = await fs.stat(filePath); - const composeFileChain = this.normalizeComposeFileChain( - filePath, - (options as { composeFiles?: string[] }).composeFiles, - ); + const composeFileChain = this.normalizeComposeFileChain(filePath, options.composeFiles); const updatedComposeFileText = updateComposeText(composeFileText, { filePath, mtimeMs: composeFileStat.mtimeMs, @@ -1130,12 +1291,21 @@ class Dockercompose extends Docker { if (updatedComposeFileText === composeFileText) { return false; } + const validationOptions: ValidateComposeConfigurationOptions = {}; if (composeFileChain.length > 1) { - await this.validateComposeConfiguration(filePath, updatedComposeFileText, { - composeFiles: composeFileChain, - }); - } else { + validationOptions.composeFiles = composeFileChain; + } + if (options.parsedComposeFileObject !== undefined) { + validationOptions.parsedComposeFileObject = options.parsedComposeFileObject; + } + if (Object.keys(validationOptions).length === 0) { await this.validateComposeConfiguration(filePath, updatedComposeFileText); + } else { + await this.validateComposeConfiguration( + filePath, + updatedComposeFileText, + validationOptions, + ); } await this.writeComposeFile(filePath, updatedComposeFileText); return true; @@ -1146,9 +1316,12 @@ class Dockercompose extends Docker { * Override: provide shared runtime dependencies once per lifecycle run. * Runtime container state is still resolved on demand per service refresh. */ - async createTriggerContext(container, logContainer, composeContext) { - const runtimeContext = (composeContext as { runtimeContext?: unknown } | undefined) - ?.runtimeContext as ComposeRuntimeRefreshOptions['runtimeContext'] | undefined; + async createTriggerContext( + container, + logContainer, + composeContext?: ComposeUpdateLifecycleContext, + ) { + const runtimeContext = composeContext?.runtimeContext; if ( runtimeContext?.dockerApi && runtimeContext?.registry && @@ -1184,13 +1357,16 @@ class Dockercompose extends Docker { * Override: apply compose-specific hooks while performing runtime refresh * through the Docker Engine API. */ - async performContainerUpdate(context, container, _logContainer, composeCtx) { + async performContainerUpdate( + context, + container, + _logContainer, + composeCtx?: ComposeUpdateLifecycleContext, + ) { if (!composeCtx) { throw new Error(`Missing compose context for container ${container.name}`); } - const composeRuntimeContext = (composeCtx as { runtimeContext?: unknown })?.runtimeContext as - | ComposeRuntimeRefreshOptions['runtimeContext'] - | undefined; + const composeRuntimeContext = composeCtx.runtimeContext; const runtimeContext = { dockerApi: context?.dockerApi, auth: context?.auth, @@ -1198,11 +1374,10 @@ class Dockercompose extends Docker { registry: context?.registry, ...(composeRuntimeContext || {}), }; - const hasRuntimeContext = - runtimeContext.dockerApi !== undefined || - runtimeContext.auth !== undefined || - runtimeContext.newImage !== undefined || - runtimeContext.registry !== undefined; + const composeUpdateOptions = this.buildPerformContainerUpdateOptions( + composeCtx, + runtimeContext, + ); if (composeCtx.composeFileOnceApplied === true) { const logContainer = this.log.child({ @@ -1212,28 +1387,12 @@ class Dockercompose extends Docker { `Skip per-service compose refresh for ${composeCtx.service} because compose-file-once mode already refreshed ${composeCtx.composeFile}`, ); } else { - if (Array.isArray(composeCtx.composeFiles) && composeCtx.composeFiles.length > 1) { - await this.updateContainerWithCompose( - composeCtx.composeFile, - composeCtx.service, - container, - { - composeFiles: composeCtx.composeFiles, - ...(composeCtx.skipPull === true ? { skipPull: true } : {}), - ...(hasRuntimeContext ? { runtimeContext } : {}), - }, - ); - } else { - await this.updateContainerWithCompose( - composeCtx.composeFile, - composeCtx.service, - container, - { - ...(composeCtx.skipPull === true ? { skipPull: true } : {}), - ...(hasRuntimeContext ? { runtimeContext } : {}), - }, - ); - } + await this.updateContainerWithCompose( + composeCtx.composeFile, + composeCtx.service, + container, + composeUpdateOptions, + ); } await this.runServicePostStartHooks( container, @@ -1244,6 +1403,33 @@ class Dockercompose extends Docker { return !this.configuration.dryrun; } + buildPerformContainerUpdateOptions( + composeCtx: ComposeUpdateLifecycleContext, + runtimeContext: ComposeRuntimeContext, + ): Pick { + const composeUpdateOptions = {} as Pick< + ComposeRuntimeRefreshOptions, + 'composeFiles' | 'skipPull' | 'runtimeContext' + >; + + if (Array.isArray(composeCtx.composeFiles) && composeCtx.composeFiles.length > 1) { + composeUpdateOptions.composeFiles = composeCtx.composeFiles; + } + if (composeCtx.skipPull === true) { + composeUpdateOptions.skipPull = true; + } + if ( + runtimeContext.dockerApi !== undefined || + runtimeContext.auth !== undefined || + runtimeContext.newImage !== undefined || + runtimeContext.registry !== undefined + ) { + composeUpdateOptions.runtimeContext = runtimeContext; + } + + return composeUpdateOptions; + } + /** * Keep compose dry-run side-effect free: no prune and no backup records. */ @@ -1291,7 +1477,17 @@ class Dockercompose extends Docker { * @returns {Promise} */ async trigger(container) { - await this.triggerBatch([container]); + const triggerBatchResults = await this.triggerBatch([container]); + const hasRuntimeUpdates = triggerBatchResults.some((result) => result === true); + if ( + this.configuration.dryrun !== true && + container?.updateAvailable === true && + !hasRuntimeUpdates + ) { + throw new Error( + `No compose updates were applied for container ${container?.name || 'unknown'}`, + ); + } } async resolveAndGroupContainersByComposeFile( @@ -1299,6 +1495,25 @@ class Dockercompose extends Docker { configuredComposeFilePath: string | null, ): Promise> { const containersByComposeFile = new Map(); + const composeFileAccessErrorByPath = new Map(); + + const getComposeFileAccessError = async (composeFile: string): Promise => { + if (composeFileAccessErrorByPath.has(composeFile)) { + return composeFileAccessErrorByPath.get(composeFile) ?? null; + } + try { + await fs.access(composeFile); + composeFileAccessErrorByPath.set(composeFile, null); + return null; + } catch (e) { + const reason = + e.code === 'EACCES' + ? `permission denied (${ROOT_MODE_BREAK_GLASS_HINT})` + : 'does not exist'; + composeFileAccessErrorByPath.set(composeFile, reason); + return reason; + } + }; for (const container of containers) { // Filter on containers running on local host @@ -1319,27 +1534,26 @@ class Dockercompose extends Docker { continue; } if (configuredComposeFilePath && !composeFiles.includes(configuredComposeFilePath)) { - this.log.debug( + this.log.warn( `Skip container ${container.name} because compose files ${composeFiles.join(', ')} do not match configured file ${configuredComposeFilePath}`, ); continue; } let missingComposeFile = null as string | null; + let missingComposeFileReason = null as string | null; for (const composeFile of composeFiles) { - try { - await fs.access(composeFile); - } catch (e) { - const reason = - e.code === 'EACCES' - ? `permission denied (${ROOT_MODE_BREAK_GLASS_HINT})` - : 'does not exist'; - this.log.warn(`Compose file ${composeFile} for container ${container.name} ${reason}`); + const composeFileAccessError = await getComposeFileAccessError(composeFile); + if (composeFileAccessError) { missingComposeFile = composeFile; + missingComposeFileReason = composeFileAccessError; break; } } if (missingComposeFile) { + this.log.warn( + `Compose file ${missingComposeFile} for container ${container.name} ${missingComposeFileReason}`, + ); continue; } @@ -1364,17 +1578,21 @@ class Dockercompose extends Docker { /** * Update the docker-compose stack. * @param containers the containers - * @returns {Promise} + * @returns {Promise} */ - async triggerBatch(containers): Promise { - const configuredComposeFilePath = this.getDefaultComposeFilePath(); + async triggerBatch(containers): Promise { + const configuredComposeFilePath = await this.resolveDefaultComposeFilePathForRuntime(); const containersByComposeFile = await this.resolveAndGroupContainersByComposeFile( containers, configuredComposeFilePath, ); + if (containersByComposeFile.size === 0) { + this.log.warn('No containers matched any compose file for this trigger'); + } + // Process each compose file group - const batchResults: unknown[] = []; + const batchResults: boolean[] = []; for (const { composeFile, composeFiles, @@ -1391,13 +1609,55 @@ class Dockercompose extends Docker { return batchResults; } + private async buildComposeFileOnceRuntimeContextByService( + mappingsNeedingRuntimeUpdate: ComposeRuntimeUpdateMapping[], + ): Promise>> { + const composeFileOnceRuntimeContextByService = new Map< + string, + NonNullable + >(); + const firstContainerByService = new Map(); + for (const mapping of mappingsNeedingRuntimeUpdate) { + if (!firstContainerByService.has(mapping.service)) { + firstContainerByService.set(mapping.service, mapping); + } + } + await Promise.all( + [...firstContainerByService.entries()].map(async ([service, mapping]) => { + const runtimeContainer = mapping.container; + const logContainer = this.log.child({ + container: runtimeContainer.name, + }); + const watcher = this.getWatcher(runtimeContainer); + const { dockerApi } = watcher; + const registry = this.resolveRegistryManager(runtimeContainer, logContainer, { + allowAnonymousFallback: true, + }); + const auth = await registry.getAuthPull(); + const newImage = this.getNewImageFullName(registry, runtimeContainer); + composeFileOnceRuntimeContextByService.set(service, { + dockerApi, + registry, + auth, + newImage, + }); + await this.pullImage(dockerApi, auth, newImage, logContainer); + }), + ); + return composeFileOnceRuntimeContextByService; + } + /** * Process a specific compose file with its associated containers. * @param composeFile * @param containers - * @returns {Promise} + * @returns {Promise} true if runtime updates were applied, false otherwise */ - async processComposeFile(composeFile, containers, composeFiles = [composeFile]) { + async processComposeFile( + composeFile, + containers, + composeFiles = [composeFile], + ): Promise { const composeFileChain = this.normalizeComposeFileChain(composeFile, composeFiles); const composeFileChainSummary = composeFileChain.join(', '); this.log.info(`Processing compose file: ${composeFileChainSummary}`); @@ -1408,13 +1668,19 @@ class Dockercompose extends Docker { const compose = await this.getComposeFileChainAsObject(composeFileChain, composeByFile); // Filter containers that belong to this compose file - const containersFiltered = containers.filter((container) => - doesContainerBelongToCompose(compose, container), - ); + const containersFiltered = containers.filter((container) => { + const belongs = doesContainerBelongToCompose(compose, container); + if (!belongs) { + this.log.warn( + `Container ${container.name} not found in compose file ${composeFileChainSummary} (image mismatch)`, + ); + } + return belongs; + }); if (containersFiltered.length === 0) { this.log.warn(`No containers found in compose file ${composeFileChainSummary}`); - return; + return false; } // [{ container, current: '1.0.0', update: '2.0.0' }, {...}] @@ -1425,7 +1691,11 @@ class Dockercompose extends Docker { return undefined; } const runtimeImage = this.getContainerRuntimeImageReference(container); - const composeUpdate = this.getComposeMutationImageReference(container, map.update); + const composeUpdate = this.getComposeMutationImageReference( + container, + map.update, + map.current, + ); return { container, runtimeImage, @@ -1453,8 +1723,10 @@ class Dockercompose extends Docker { ); if (mappingsNeedingRuntimeUpdate.length === 0) { - this.log.info(`All containers in ${composeFileChainSummary} are already up to date`); - return; + this.log.info( + `All containers in ${composeFileChainSummary} are already up to date (checked: ${versionMappings.map((m) => m.container.name).join(', ') || 'none'})`, + ); + return false; } // Dry-run? @@ -1480,6 +1752,10 @@ class Dockercompose extends Docker { // Replace only the targeted compose service image values. const serviceImageUpdates = this.buildComposeServiceImageUpdates(composeUpdates); + const parsedComposeFileObject = this.buildUpdatedComposeFileObjectForValidation( + composeByFile.get(writableComposeFile), + serviceImageUpdates, + ); await this.mutateComposeFile( writableComposeFile, (composeFileText, composeFileMetadata) => @@ -1494,6 +1770,7 @@ class Dockercompose extends Docker { ), { composeFiles: composeFileChain, + parsedComposeFileObject, }, ); } @@ -1502,43 +1779,9 @@ class Dockercompose extends Docker { const composeFileOnceHandledServices = new Set(); const composeFileOnceEnabled = this.configuration.composeFileOnce === true && this.configuration.dryrun !== true; - const composeFileOnceRuntimeContextByService = new Map< - string, - NonNullable - >(); - if (composeFileOnceEnabled) { - const firstContainerByService = new Map< - string, - (typeof mappingsNeedingRuntimeUpdate)[number] - >(); - for (const mapping of mappingsNeedingRuntimeUpdate) { - if (!firstContainerByService.has(mapping.service)) { - firstContainerByService.set(mapping.service, mapping); - } - } - await Promise.all( - [...firstContainerByService.entries()].map(async ([service, mapping]) => { - const runtimeContainer = mapping.container; - const logContainer = this.log.child({ - container: runtimeContainer.name, - }); - const watcher = this.getWatcher(runtimeContainer); - const { dockerApi } = watcher; - const registry = this.resolveRegistryManager(runtimeContainer, logContainer, { - allowAnonymousFallback: true, - }); - const auth = await registry.getAuthPull(); - const newImage = this.getNewImageFullName(registry, runtimeContainer); - composeFileOnceRuntimeContextByService.set(service, { - dockerApi, - registry, - auth, - newImage, - }); - await this.pullImage(dockerApi, auth, newImage, logContainer); - }), - ); - } + const composeFileOnceRuntimeContextByService = composeFileOnceEnabled + ? await this.buildComposeFileOnceRuntimeContextByService(mappingsNeedingRuntimeUpdate) + : new Map>(); // Refresh all containers requiring a runtime update via the shared // lifecycle orchestrator (security gate, hooks, prune/backup, events). @@ -1563,6 +1806,7 @@ class Dockercompose extends Docker { composeFileOnceHandledServices.add(service); } } + return true; } async resolveComposeServiceContext(container, currentImage) { @@ -1609,7 +1853,7 @@ class Dockercompose extends Docker { const currentServiceImage = mapping?.current || (compose as Record)?.services?.[service]?.image; const targetServiceImage = mapping - ? this.getComposeMutationImageReference(container, mapping.update) + ? this.getComposeMutationImageReference(container, mapping.update, currentServiceImage) : preview.newImage; const composePreview = { files: composeFiles, @@ -2068,6 +2312,7 @@ class Dockercompose extends Docker { export default Dockercompose; export { + hasExplicitRegistryHost as testable_hasExplicitRegistryHost, normalizeImplicitLatest as testable_normalizeImplicitLatest, normalizePostStartHooks as testable_normalizePostStartHooks, normalizePostStartEnvironmentValue as testable_normalizePostStartEnvironmentValue, diff --git a/app/triggers/providers/mqtt/Hass.test.ts b/app/triggers/providers/mqtt/Hass.test.ts index 394b8d73..28444070 100644 --- a/app/triggers/providers/mqtt/Hass.test.ts +++ b/app/triggers/providers/mqtt/Hass.test.ts @@ -6,6 +6,7 @@ import { registerWatcherStop, } from '../../../event/index.js'; import log from '../../../log/index.js'; +import * as containerStore from '../../../store/container.js'; import Hass from './Hass.js'; const MOCK_VERSION = '1.4.0-test'; @@ -95,7 +96,7 @@ test('publishDiscoveryMessage must publish a discovery message expected by HA', }, icon: 'mdi:docker', entity_picture: - 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/drydock.png', + 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/whale-logo.png', state_topic: 'my/state', myOption: true, }), @@ -124,7 +125,7 @@ test('addContainerSensor must publish sensor discovery message expected by HA', }, icon: 'mdi:docker', entity_picture: - 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/drydock.png', + 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/whale-logo.png', state_topic: 'topic/watcher-name/container-name', force_update: true, value_template: '{{ value_json.image_tag_value }}', @@ -137,6 +138,114 @@ test('addContainerSensor must publish sensor discovery message expected by HA', ); }); +test.each([ + { + displayIcon: 'sh:nextcloud', + expectedPicture: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/nextcloud.png', + }, + { + displayIcon: 'hl:nextcloud', + expectedPicture: 'https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/png/nextcloud.png', + }, + { + displayIcon: 'si:nextcloud', + expectedPicture: 'https://cdn.jsdelivr.net/npm/simple-icons@latest/icons/nextcloud.svg', + }, + { + displayIcon: 'sh: ', + expectedPicture: + 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/whale-logo.png', + }, +])('addContainerSensor should map $displayIcon to entity_picture URL', async ({ + displayIcon, + expectedPicture, +}) => { + await hass.addContainerSensor({ + name: 'container-name', + watcher: 'watcher-name', + displayIcon, + }); + + const discoveryCall = mqttClientMock.publish.mock.calls[0]; + const discoveryPayload = JSON.parse(discoveryCall[1]); + expect(discoveryPayload.entity_picture).toBe(expectedPicture); +}); + +test('addContainerSensor should use direct URL icon as entity_picture', async () => { + await hass.addContainerSensor({ + name: 'container-name', + watcher: 'watcher-name', + displayIcon: 'https://example.com/custom/icon.png', + }); + + const discoveryCall = mqttClientMock.publish.mock.calls[0]; + const discoveryPayload = JSON.parse(discoveryCall[1]); + expect(discoveryPayload.entity_picture).toBe('https://example.com/custom/icon.png'); +}); + +test('addContainerSensor should strip file extension from icon slug', async () => { + await hass.addContainerSensor({ + name: 'container-name', + watcher: 'watcher-name', + displayIcon: 'sh:nextcloud.png', + }); + + const discoveryCall = mqttClientMock.publish.mock.calls[0]; + const discoveryPayload = JSON.parse(discoveryCall[1]); + expect(discoveryPayload.entity_picture).toBe( + 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/nextcloud.png', + ); +}); + +test('addContainerSensor should ignore empty dd.display.picture', async () => { + await hass.addContainerSensor({ + name: 'container-name', + watcher: 'watcher-name', + displayIcon: 'sh:nextcloud', + labels: { + 'dd.display.picture': ' ', + }, + }); + + const discoveryCall = mqttClientMock.publish.mock.calls[0]; + const discoveryPayload = JSON.parse(discoveryCall[1]); + expect(discoveryPayload.entity_picture).toBe( + 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/nextcloud.png', + ); +}); + +test('addContainerSensor should ignore non-URL dd.display.picture', async () => { + await hass.addContainerSensor({ + name: 'container-name', + watcher: 'watcher-name', + displayIcon: 'sh:nextcloud', + labels: { + 'dd.display.picture': 'not-a-url', + }, + }); + + const discoveryCall = mqttClientMock.publish.mock.calls[0]; + const discoveryPayload = JSON.parse(discoveryCall[1]); + expect(discoveryPayload.entity_picture).toBe( + 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/nextcloud.png', + ); +}); + +test('addContainerSensor should prefer dd.display.picture over icon-derived entity_picture', async () => { + await hass.addContainerSensor({ + name: 'container-name', + watcher: 'watcher-name', + displayIcon: 'sh:nextcloud', + labels: { + 'dd.display.picture': 'https://images.example.com/nextcloud.png', + }, + }); + + const discoveryCall = mqttClientMock.publish.mock.calls[0]; + const discoveryPayload = JSON.parse(discoveryCall[1]); + expect(discoveryPayload.entity_picture).toBe('https://images.example.com/nextcloud.png'); +}); + test.each( containerData, )('removeContainerSensor must publish sensor discovery message expected by HA', async ({ @@ -180,7 +289,7 @@ test.each(containerData)('updateContainerSensors must publish all sensors expect }, icon: 'mdi:docker', entity_picture: - 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/drydock.png', + 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/whale-logo.png', state_topic: 'topic/total_count', }), { retain: true }, @@ -202,7 +311,7 @@ test.each(containerData)('updateContainerSensors must publish all sensors expect }, icon: 'mdi:docker', entity_picture: - 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/drydock.png', + 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/whale-logo.png', state_topic: 'topic/update_count', }), { retain: true }, @@ -224,7 +333,7 @@ test.each(containerData)('updateContainerSensors must publish all sensors expect }, icon: 'mdi:docker', entity_picture: - 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/drydock.png', + 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/whale-logo.png', state_topic: 'topic/update_status', payload_on: 'true', payload_off: 'false', @@ -248,7 +357,7 @@ test.each(containerData)('updateContainerSensors must publish all sensors expect }, icon: 'mdi:docker', entity_picture: - 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/drydock.png', + 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/whale-logo.png', state_topic: 'topic/watcher-name/total_count', }), { retain: true }, @@ -270,7 +379,7 @@ test.each(containerData)('updateContainerSensors must publish all sensors expect }, icon: 'mdi:docker', entity_picture: - 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/drydock.png', + 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/whale-logo.png', state_topic: 'topic/watcher-name/update_count', }), { retain: true }, @@ -292,7 +401,7 @@ test.each(containerData)('updateContainerSensors must publish all sensors expect }, icon: 'mdi:docker', entity_picture: - 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/drydock.png', + 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/whale-logo.png', state_topic: 'topic/watcher-name/update_status', payload_on: 'true', payload_off: 'false', @@ -347,6 +456,26 @@ test.each(containerData)('updateContainerSensors must publish all sensors expect ); }); +test('updateContainerSensors should use container count queries instead of full list cloning', async () => { + const getContainersSpy = vi.spyOn(containerStore, 'getContainers'); + const getContainerCountSpy = vi.spyOn(containerStore, 'getContainerCount'); + + await hass.updateContainerSensors({ + name: 'container-name', + watcher: 'watcher-name', + displayIcon: 'mdi:docker', + }); + + expect(getContainerCountSpy).toHaveBeenCalledWith(); + expect(getContainerCountSpy).toHaveBeenCalledWith({ updateAvailable: true }); + expect(getContainerCountSpy).toHaveBeenCalledWith({ watcher: 'watcher-name' }); + expect(getContainerCountSpy).toHaveBeenCalledWith({ + watcher: 'watcher-name', + updateAvailable: true, + }); + expect(getContainersSpy).not.toHaveBeenCalled(); +}); + test.each( containerData, )('removeContainerSensor must publish all sensor removal messages expected by HA', async ({ @@ -385,7 +514,7 @@ test('updateWatcherSensors must publish all watcher sensor messages expected by }, icon: 'mdi:docker', entity_picture: - 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/drydock.png', + 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/whale-logo.png', state_topic: 'topic/watcher-name/running', payload_on: 'true', payload_off: 'false', diff --git a/app/triggers/providers/mqtt/Hass.ts b/app/triggers/providers/mqtt/Hass.ts index 800c0f90..2d3453fb 100644 --- a/app/triggers/providers/mqtt/Hass.ts +++ b/app/triggers/providers/mqtt/Hass.ts @@ -14,6 +14,8 @@ const HASS_MANUFACTURER = 'drydock'; const HASS_ENTITY_VALUE_TEMPLATE = '{{ value_json.image_tag_value }}'; const HASS_LATEST_VERSION_TEMPLATE = '{% if value_json.update_kind_kind == "digest" %}{{ value_json.result_digest[:15] }}{% else %}{{ value_json.result_tag }}{% endif %}'; +const HASS_DEFAULT_ENTITY_PICTURE = + 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/whale-logo.png'; interface HassClient { publish: ( @@ -66,13 +68,79 @@ function getHaDevice() { * @return {*} */ function sanitizeIcon(icon) { - return icon - .replaceAll('mdi-', 'mdi:') - .replaceAll('fa-', 'fa:') - .replaceAll('fab-', 'fab:') - .replaceAll('far-', 'far:') - .replaceAll('fas-', 'fas:') - .replaceAll('si-', 'si:'); + if (typeof icon !== 'string') { + return ''; + } + const normalized = icon.trim(); + if (!normalized || normalized.startsWith('http://') || normalized.startsWith('https://')) { + return normalized; + } + return normalized + .replace(/^mdi-/i, 'mdi:') + .replace(/^fa-/i, 'fa:') + .replace(/^fab-/i, 'fab:') + .replace(/^far-/i, 'far:') + .replace(/^fas-/i, 'fas:') + .replace(/^hl-/i, 'hl:') + .replace(/^sh-/i, 'sh:') + .replace(/^si-/i, 'si:'); +} + +function normalizeIconSlug(slug: string, extension: string): string { + const normalizedSlug = slug.trim().toLowerCase(); + const suffix = `.${extension}`; + if (normalizedSlug.endsWith(suffix)) { + return normalizedSlug.slice(0, -suffix.length); + } + return normalizedSlug; +} + +function resolveEntityPicture(icon?: string): string { + const sanitizedIcon = sanitizeIcon(icon); + if (!sanitizedIcon) { + return HASS_DEFAULT_ENTITY_PICTURE; + } + if (sanitizedIcon.startsWith('http://') || sanitizedIcon.startsWith('https://')) { + return sanitizedIcon; + } + + const iconMatch = sanitizedIcon.match(/^(sh|hl|si):(.+)$/i); + if (!iconMatch) { + return HASS_DEFAULT_ENTITY_PICTURE; + } + + const provider = iconMatch[1].toLowerCase(); + const rawSlug = iconMatch[2]; + const cdnMap: Record = { + sh: { ext: 'png', base: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png' }, + hl: { ext: 'png', base: 'https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/png' }, + si: { ext: 'svg', base: 'https://cdn.jsdelivr.net/npm/simple-icons@latest/icons' }, + }; + // Provider is guaranteed to be sh|hl|si by the regex above + const cdn = cdnMap[provider]; + const slug = normalizeIconSlug(rawSlug, cdn.ext); + return `${cdn.base}/${slug}.${cdn.ext}`; +} + +function resolveEntityPictureOverride(container: { + displayPicture?: string; + labels?: Record; +}): string | undefined { + const configuredPicture = + container.displayPicture || + container.labels?.['dd.display.picture'] || + container.labels?.['wud.display.picture']; + if (typeof configuredPicture !== 'string') { + return undefined; + } + const normalized = configuredPicture.trim(); + if (!normalized) { + return undefined; + } + if (!normalized.startsWith('http://') && !normalized.startsWith('https://')) { + return undefined; + } + return normalized; } class Hass { @@ -148,6 +216,7 @@ class Hass { kind: 'update', topic: this.getContainerStateTopic({ container }), }; + const entityPictureOverride = resolveEntityPictureOverride(container); this.log.info(`Add hass container update sensor [${containerStateSensor.topic}]`); if (this.configuration.hass.discovery) { await this.publishDiscoveryMessage({ @@ -159,6 +228,7 @@ class Hass { stateTopic: containerStateSensor.topic, name: container.displayName, icon: sanitizeIcon(container.displayIcon), + entityPicture: entityPictureOverride, options: { force_update: true, value_template: HASS_ENTITY_VALUE_TEMPLATE, @@ -296,19 +366,19 @@ class Hass { } // Count all containers - const totalCount = containerStore.getContainers().length; - const updateCount = containerStore.getContainers({ + const totalCount = containerStore.getContainerCount(); + const updateCount = containerStore.getContainerCount({ updateAvailable: true, - }).length; + }); // Count all containers belonging to the current watcher - const watcherTotalCount = containerStore.getContainers({ + const watcherTotalCount = containerStore.getContainerCount({ watcher: container.watcher, - }).length; - const watcherUpdateCount = containerStore.getContainers({ + }); + const watcherUpdateCount = containerStore.getContainerCount({ watcher: container.watcher, updateAvailable: true, - }).length; + }); // Publish sensors await this.updateSensor({ @@ -388,6 +458,7 @@ class Hass { * @param kind * @param name * @param icon + * @param entityPicture * @param options * @returns {Promise<*>} */ @@ -397,6 +468,7 @@ class Hass { kind, name, icon, + entityPicture, options = {}, }: { discoveryTopic: string; @@ -404,6 +476,7 @@ class Hass { kind: string; name: string; icon?: string; + entityPicture?: string; options?: Record; }) { const entityId = getHassEntityId(stateTopic); @@ -415,8 +488,7 @@ class Hass { name: name || entityId, device: getHaDevice(), icon: icon || sanitizeIcon('mdi:docker'), - entity_picture: - 'https://raw.githubusercontent.com/CodesWhat/drydock/main/docs/assets/drydock.png', + entity_picture: entityPicture || resolveEntityPicture(icon), state_topic: stateTopic, ...options, }), diff --git a/app/triggers/providers/mqtt/Mqtt.test.ts b/app/triggers/providers/mqtt/Mqtt.test.ts index fbf20f30..21606958 100644 --- a/app/triggers/providers/mqtt/Mqtt.test.ts +++ b/app/triggers/providers/mqtt/Mqtt.test.ts @@ -31,7 +31,7 @@ const configurationValid = { discovery: false, enabled: false, prefix: 'homeassistant', - attributes: 'full', + attributes: 'short', filter: { include: '', exclude: '', @@ -115,7 +115,7 @@ test('validateConfiguration should default hass.discovery to true when hass.enab enabled: true, prefix: 'homeassistant', discovery: true, - attributes: 'full', + attributes: 'short', filter: { include: '', exclude: '', @@ -165,7 +165,7 @@ test('initTrigger should init Mqtt client', async () => { enabled: true, discovery: true, prefix: 'homeassistant', - attributes: 'full', + attributes: 'short', filter: { include: '', exclude: '', @@ -248,7 +248,7 @@ test('initTrigger should read TLS files when configured', async () => { enabled: false, discovery: false, prefix: 'homeassistant', - attributes: 'full', + attributes: 'short', filter: { include: '', exclude: '', @@ -306,7 +306,7 @@ test('initTrigger should execute registered container event callbacks', async () enabled: false, discovery: false, prefix: 'homeassistant', - attributes: 'full', + attributes: 'short', filter: { include: '', exclude: '', @@ -335,7 +335,7 @@ test('deregister then initTrigger should not duplicate container event callbacks enabled: false, discovery: false, prefix: 'homeassistant', - attributes: 'full', + attributes: 'short', filter: { include: '', exclude: '', @@ -367,12 +367,12 @@ describe('hass.attributes validation', () => { expect(validated.hass.attributes).toBe('short'); }); - test('should default hass.attributes to full', () => { + test('should default hass.attributes to short', () => { const validated = mqtt.validateConfiguration({ url: configurationValid.url, clientid: 'dd', }); - expect(validated.hass.attributes).toBe('full'); + expect(validated.hass.attributes).toBe('short'); }); test('should reject invalid hass.attributes value', () => { @@ -601,7 +601,7 @@ describe('trigger filtering', () => { expect(publishedPayload).toHaveProperty('labels_com_docker_compose_project', 'app'); }); - test('should default hass.attributes to full when not provided in runtime config', async () => { + test('should default hass.attributes to short when not provided in runtime config', async () => { mqtt.configuration = { topic: 'dd/container', exclude: '', @@ -615,7 +615,8 @@ describe('trigger filtering', () => { await mqtt.trigger(containerWithSecurity); const publishedPayload = JSON.parse(mqtt.client.publish.mock.calls[0][1]); - expect(publishedPayload).toHaveProperty('security_scan_vulnerabilities_0_id', 'CVE-2024-0001'); - expect(publishedPayload).toHaveProperty('details_ports_0', '80/tcp'); + expect(publishedPayload).not.toHaveProperty('security_scan_vulnerabilities_0_id'); + expect(publishedPayload).not.toHaveProperty('details_ports_0'); + expect(publishedPayload).toHaveProperty('name', 'filtered-test'); }); }); diff --git a/app/triggers/providers/mqtt/Mqtt.ts b/app/triggers/providers/mqtt/Mqtt.ts index 50e80ca9..e53cc70c 100644 --- a/app/triggers/providers/mqtt/Mqtt.ts +++ b/app/triggers/providers/mqtt/Mqtt.ts @@ -86,7 +86,7 @@ class Mqtt extends Trigger { enabled: false, prefix: hassDefaultPrefix, discovery: false, - attributes: 'full', + attributes: 'short', filter: { include: '', exclude: '', @@ -144,7 +144,7 @@ class Mqtt extends Trigger { attributes: this.joi .string() .valid(...HASS_ATTRIBUTE_PRESET_VALUES) - .default('full'), + .default('short'), filter: this.joi .object({ include: this.joi.string().allow('').default(''), @@ -159,7 +159,7 @@ class Mqtt extends Trigger { enabled: false, prefix: hassDefaultPrefix, discovery: false, - attributes: 'full', + attributes: 'short', filter: { include: '', exclude: '', @@ -286,7 +286,7 @@ class Mqtt extends Trigger { return { mode: 'exclude', stage: 'container', - paths: HASS_ATTRIBUTE_PRESETS[this.configuration.hass?.attributes ?? 'full'], + paths: HASS_ATTRIBUTE_PRESETS[this.configuration.hass?.attributes ?? 'short'], }; } diff --git a/app/watchers/providers/docker/docker-image-details-orchestration.test.ts b/app/watchers/providers/docker/docker-image-details-orchestration.test.ts index c0622abf..9bd47d26 100644 --- a/app/watchers/providers/docker/docker-image-details-orchestration.test.ts +++ b/app/watchers/providers/docker/docker-image-details-orchestration.test.ts @@ -539,4 +539,35 @@ describe('docker image details orchestration module', () => { expect(getContainersSpy).toHaveBeenCalledWith({ watcher: 'docker-test', name: 'service' }); expect(deleteContainerSpy).toHaveBeenCalledWith('old-container-id'); }); + + test('skips same-name dedupe when the discovered container name is empty', async () => { + vi.spyOn(storeContainer, 'getContainer').mockReturnValue(undefined); + const getContainersSpy = vi.spyOn(storeContainer, 'getContainers').mockReturnValue([ + { + id: 'old-container-id', + watcher: 'docker-test', + name: '', + } as any, + ]); + const deleteContainerSpy = vi + .spyOn(storeContainer, 'deleteContainer') + .mockImplementation(() => {}); + + const { watcher } = createWatcher(); + + const result = await addImageDetailsToContainerOrchestration( + watcher as any, + createDockerSummaryContainer({ + Id: 'new-container-id', + Names: [], + }), + {}, + createHelpers() as any, + ); + + expect(result?.id).toBe('new-container-id'); + expect(result?.name).toBe(''); + expect(getContainersSpy).not.toHaveBeenCalled(); + expect(deleteContainerSpy).not.toHaveBeenCalled(); + }); }); diff --git a/app/watchers/providers/docker/docker-image-details-orchestration.ts b/app/watchers/providers/docker/docker-image-details-orchestration.ts index 58076599..9bddc461 100644 --- a/app/watchers/providers/docker/docker-image-details-orchestration.ts +++ b/app/watchers/providers/docker/docker-image-details-orchestration.ts @@ -344,12 +344,14 @@ export async function addImageDetailsToContainerOrchestration( updateAvailable: false, updateKind: { kind: 'unknown' }, } as Container); - const containersWithSameName = storeContainer.getContainers({ - watcher: watcher.name, - name: containerToReturn.name, - }); - containersWithSameName - .filter((staleContainer) => staleContainer.id !== containerToReturn.id) - .forEach((staleContainer) => storeContainer.deleteContainer(staleContainer.id)); + if (typeof containerToReturn.name === 'string' && containerToReturn.name !== '') { + const containersWithSameName = storeContainer.getContainers({ + watcher: watcher.name, + name: containerToReturn.name, + }); + containersWithSameName + .filter((staleContainer) => staleContainer.id !== containerToReturn.id) + .forEach((staleContainer) => storeContainer.deleteContainer(staleContainer.id)); + } return containerToReturn; } diff --git a/apps/demo/.gitignore b/apps/demo/.gitignore new file mode 100644 index 00000000..e985853e --- /dev/null +++ b/apps/demo/.gitignore @@ -0,0 +1 @@ +.vercel diff --git a/apps/demo/public/mockServiceWorker.js b/apps/demo/public/mockServiceWorker.js index a255338c..5a021619 100644 --- a/apps/demo/public/mockServiceWorker.js +++ b/apps/demo/public/mockServiceWorker.js @@ -21,6 +21,10 @@ addEventListener('activate', (event) => { }); addEventListener('message', async (event) => { + if (event.origin !== self.location.origin) { + return; + } + const clientId = Reflect.get(event.source || {}, 'id'); if (!clientId || !self.clients) { diff --git a/apps/demo/src/main.ts b/apps/demo/src/main.ts index 3afa25b9..bd940f17 100644 --- a/apps/demo/src/main.ts +++ b/apps/demo/src/main.ts @@ -6,12 +6,25 @@ * 3. Boot the real Vue UI (imported from ../../ui/src via Vite alias) */ +import { DEFAULTS } from '@/preferences/schema'; import { FakeEventSource } from './mocks/sse'; // Patch EventSource BEFORE any UI code loads — the SSE service // creates an EventSource in AppLayout, so this must happen first. (globalThis as unknown as { EventSource: typeof FakeEventSource }).EventSource = FakeEventSource; +function getParentOrigin(): string | null { + if (!document.referrer) { + return null; + } + + try { + return new URL(document.referrer).origin; + } catch { + return null; + } +} + async function boot() { // Start MSW — must be running before the UI makes any fetch() calls const { worker } = await import('./mocks/browser'); @@ -23,12 +36,28 @@ async function boot() { // Import demo CSS for Tailwind @source directive await import('./demo.css'); + // Default demo theme to 'system' variant so it follows the user's OS + // light/dark preference, matching the surrounding website. + if (!localStorage.getItem('dd-preferences')) { + localStorage.setItem( + 'dd-preferences', + JSON.stringify({ + ...structuredClone(DEFAULTS), + theme: { family: 'one-dark', variant: 'system' }, + }), + ); + } + // Now boot the real UI await import('@/main'); // Tell the parent frame (website) we loaded successfully if (window.parent !== window) { - window.parent.postMessage({ type: 'drydock-demo-ready' }, '*'); + const parentOrigin = getParentOrigin(); + + if (parentOrigin) { + window.parent.postMessage({ type: 'drydock-demo-ready' }, parentOrigin); + } } // Auto-fill login credentials so demo visitors just click "Sign in". diff --git a/apps/demo/src/mocks/handlers/icons.ts b/apps/demo/src/mocks/handlers/icons.ts index 6282554b..923b3b52 100644 --- a/apps/demo/src/mocks/handlers/icons.ts +++ b/apps/demo/src/mocks/handlers/icons.ts @@ -48,6 +48,7 @@ export const iconHandlers = [ // Try primary provider let upstream = await tryFetch(config.url(slug)); + let usedDockerFallback = false; // Selfhst miss → try homarr fallback if (!upstream && provider === 'selfhst') { @@ -57,6 +58,7 @@ export const iconHandlers = [ // Still nothing → Docker icon as final fallback if (!upstream) { upstream = await tryFetch(DOCKER_FALLBACK_URL); + usedDockerFallback = upstream !== null; } if (!upstream) { @@ -69,7 +71,7 @@ export const iconHandlers = [ return new HttpResponse(buffer, { headers: { 'Content-Type': contentType, - 'Cache-Control': 'public, max-age=31536000, immutable', + 'Cache-Control': usedDockerFallback ? 'no-store' : 'public, max-age=31536000, immutable', }, }); }), diff --git a/apps/demo/src/mocks/sse.ts b/apps/demo/src/mocks/sse.ts index 03b61856..292c1b09 100644 --- a/apps/demo/src/mocks/sse.ts +++ b/apps/demo/src/mocks/sse.ts @@ -75,6 +75,9 @@ export class FakeEventSource { private dispatch(type: string, data: string): void { const event = new MessageEvent(type, { data }); + if (type === 'message') { + this.onmessage?.(event); + } this.listeners.get(type)?.forEach((fn) => fn(event)); } } diff --git a/apps/demo/vercel.json b/apps/demo/vercel.json index d3890290..0500e238 100644 --- a/apps/demo/vercel.json +++ b/apps/demo/vercel.json @@ -8,7 +8,6 @@ { "source": "/(.*)", "headers": [ - { "key": "X-Frame-Options", "value": "ALLOW-FROM https://drydock.codeswhat.com" }, { "key": "Content-Security-Policy", "value": "frame-ancestors 'self' https://drydock.codeswhat.com https://*.vercel.app" diff --git a/apps/web/app/page.tsx b/apps/web/app/page.tsx index ae3c200e..53eefb1f 100644 --- a/apps/web/app/page.tsx +++ b/apps/web/app/page.tsx @@ -234,11 +234,7 @@ const roadmap = [ status: "planned" as const, dotColor: "border-orange-400 bg-orange-50 text-orange-500 dark:border-orange-500 dark:bg-orange-950 dark:text-orange-400", - items: [ - "Notification templates", - "Release notes in notifications", - "MS Teams & Matrix triggers", - ], + items: ["Notification templates", "Release notes in notifications", "Deprecation removals"], }, { version: "v1.7.0", diff --git a/apps/web/components/demo-section.tsx b/apps/web/components/demo-section.tsx index 5ff619d0..80a62f3a 100644 --- a/apps/web/components/demo-section.tsx +++ b/apps/web/components/demo-section.tsx @@ -168,9 +168,23 @@ export function DemoSection() { }; if (navigator.share) { - await navigator.share(shareData); - } else { - await navigator.clipboard.writeText(shareData.url); + try { + await navigator.share(shareData); + return; + } catch (error) { + // Ignore user-cancelled share prompts. + if (error instanceof DOMException && error.name === "AbortError") { + return; + } + } + } + + if (navigator.clipboard?.writeText) { + try { + await navigator.clipboard.writeText(shareData.url); + } catch (error) { + console.warn("Failed to copy demo URL to clipboard", error); + } } } @@ -204,7 +218,11 @@ export function DemoSection() { {/* Action Buttons (inline only) */} {mode === "inline" && (
- @@ -261,7 +279,11 @@ export function DemoSection() {
- diff --git a/content/docs/current/changelog/index.mdx b/content/docs/current/changelog/index.mdx index fb2ef9bc..ad2f4d0f 100644 --- a/content/docs/current/changelog/index.mdx +++ b/content/docs/current/changelog/index.mdx @@ -13,6 +13,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- **Audit log for container state changes** — External container lifecycle events (start, stop, restart via Portainer or CLI) now generate `container-update` audit entries with the new status, so the audit log reflects all state changes, not just Drydock-initiated actions. ([#120](https://github.com/CodesWhat/drydock/discussions/120)) +- **mTLS client certificate support** — Registry providers now accept `CLIENTCERT` and `CLIENTKEY` options for mutual TLS authentication with private registries that require client certificates. + +### Fixed + +- **Log level setting had no effect** — `DD_LOG_LEVEL=debug` was correctly parsed but debug messages were silently dropped because pino's multistream destinations defaulted to `info` level. Stream destinations now inherit the configured log level. ([#134](https://github.com/CodesWhat/drydock/issues/134)) +- **Server feature flags not loaded after login** — Feature flags (`containeractions`, `delete`) were permanently stuck as disabled when authentication was required, because the pre-login bootstrap fetch failure marked the flags as "loaded" and never retried. Now failed fetches allow automatic retry after login. ([#120](https://github.com/CodesWhat/drydock/discussions/120)) +- **Compose trigger silently skips containers** — Multiple failure paths in the compose trigger were logged at `debug` level, making it nearly impossible to diagnose why a trigger reports success but containers don't update. Key diagnostic messages (compose file mismatch, label inspect failure, no containers matched) promoted to `warn` level, and the "already up to date" message now includes container names. ([#84](https://github.com/CodesWhat/drydock/discussions/84)) +- **Fallback icon cached permanently** — The Docker placeholder icon was served with `immutable` cache headers, causing browsers to cache it permanently even after the real provider icon becomes available. Fallback responses now use `no-store`. +- **Basic auth upgrade compatibility restored** — v1.4 now accepts legacy v1.3.9 Basic auth hashes (`{SHA}`, `$apr1$`/`$1$`, `crypt`, and plain fallback) to preserve smooth upgrades. Legacy formats remain deprecated and continue showing a migration banner, with removal still planned for v1.6.0. +- **Compose trigger rejects lowercase env var keys** — Configuration keys like `COMPOSEFILEONCE`, `DIGESTPINNING`, and `RECONCILIATIONMODE` were lowercased by the env parser but the Joi schema expected camelCase. Schema now maps lowercase keys to their camelCase equivalents. ([#120](https://github.com/CodesWhat/drydock/discussions/120)) +- **Compose trigger strips docker.io prefix** — When a compose file uses an explicit `docker.io/` registry prefix, compose mutations now preserve it instead of stripping it to a bare library path. ([#120](https://github.com/CodesWhat/drydock/discussions/120)) +- **Compose trigger fails when FILE points to directory** — `DD_TRIGGER_DOCKERCOMPOSE_{name}_FILE` now accepts directories, automatically probing for `compose.yaml`, `compose.yml`, `docker-compose.yaml`, or `docker-compose.yml` inside the directory. ([#84](https://github.com/CodesWhat/drydock/discussions/84)) +- **Container healthcheck fails with TLS backend** — The Dockerfile healthcheck now detects `DD_SERVER_TLS_ENABLED=true` and switches to `curl --insecure https://` for self-signed certificates. Also skips the healthcheck entirely when `DD_SERVER_ENABLED=false`. ([#120](https://github.com/CodesWhat/drydock/discussions/120)) +- **Agent CAFILE ignored without CERTFILE** — The agent subsystem now loads the CA certificate from `CAFILE` even when `CERTFILE` is not provided, fixing TLS verification for agents behind reverse proxies with custom CA chains. +- **Service worker accepts cross-origin postMessage** — The demo service worker now validates `postMessage` origins against the current host, preventing potential cross-origin message injection. + +### Changed + +- **MQTT HASS_ATTRIBUTES default changed to `short`** — The MQTT trigger `HASS_ATTRIBUTES` preset now defaults to `short` instead of `full`, excluding large SBOM documents, scan vulnerabilities, details, and labels from Home Assistant entity payloads. Users who need the full payload can set `DD_TRIGGER_MQTT_{name}_HASS_ATTRIBUTES=full` explicitly. +- **Basic auth argon2id PHC compatibility** — Basic authentication now accepts PHC-format argon2id hashes (`$argon2id$v=19$m=...,t=...,p=...$salt$hash`) in addition to the existing Drydock `argon2id$memory$passes$parallelism$salt$hash` format. Hash-generation docs now recommend the standard `argon2` CLI command first, with Node.js as a secondary option. + ## [1.4.0] — 2026-02-28 ### Added diff --git a/content/docs/current/configuration/authentications/basic/index.mdx b/content/docs/current/configuration/authentications/basic/index.mdx index a0240010..960908ad 100644 --- a/content/docs/current/configuration/authentications/basic/index.mdx +++ b/content/docs/current/configuration/authentications/basic/index.mdx @@ -13,7 +13,7 @@ The `basic` authentication lets you protect drydock access using the [Http Basic | Env var | Required | Description | Supported values | Default value when missing | | --- | :---: | --- | --- | --- | | `DD_AUTH_BASIC_{auth_name}_USER` | 🔴 | Username | | | -| `DD_AUTH_BASIC_{auth_name}_HASH` | 🔴 | Argon2id password hash | `argon2id$memory$passes$parallelism$salt$hash` | | +| `DD_AUTH_BASIC_{auth_name}_HASH` | 🔴 | Argon2id password hash | `$argon2id$v=19$m=65536,t=3,p=4$salt$hash` (preferred) or `argon2id$memory$passes$parallelism$salt$hash` (compatible) | | Hash values contain `$` characters. In Docker Compose YAML, double each `$` as `$$`. In Bash, use single quotes around the value. @@ -58,28 +58,22 @@ docker run \ ## How to create a password hash -### Using the Drydock container - -The simplest approach — use the argon2id hasher built into the Drydock image: +### Using argon2 CLI (recommended) ```bash -docker run --rm codeswhat/drydock node -e ' - const c = require("node:crypto"); - const s = c.randomBytes(32); - const h = c.argon2Sync("argon2id", { message: process.argv[1], nonce: s, memory: 65536, passes: 3, parallelism: 4, tagLength: 64 }); - console.log("argon2id$65536$3$4$" + s.toString("base64") + "$" + h.toString("base64")); -' "yourpassword" +echo -n "yourpassword" | argon2 $(openssl rand -base64 32) -id -m 16 -t 3 -p 4 -l 64 -e ``` -### Using Node.js locally (requires Node 24+) +### Alternative: using Node.js locally (requires Node 24+, no argon2 CLI install) ```bash node -e ' const c = require("node:crypto"); + const toPhc = (b) => b.toString("base64").replace(/\+/g, "-").replace(/\//g, "_").replace(/=+$/, ""); const s = c.randomBytes(32); const h = c.argon2Sync("argon2id", { message: process.argv[1], nonce: s, memory: 65536, passes: 3, parallelism: 4, tagLength: 64 }); - console.log("argon2id$65536$3$4$" + s.toString("base64") + "$" + h.toString("base64")); + console.log("$argon2id$v=19$m=65536,t=3,p=4$" + toPhc(s) + "$" + toPhc(h)); ' "yourpassword" ``` -Legacy `{SHA}` hashes from WUD/htpasswd are still accepted but deprecated. They will be removed in v1.6.0. Use the commands above to generate an argon2id hash and update your `DD_AUTH_BASIC_*_HASH` values. +Legacy htpasswd hash formats from WUD/v1.3.x — `{SHA}` (SHA-1), `$apr1$` (Apache APR1-MD5), `$1$` (MD5-crypt), DES crypt, and plain text — are still accepted at runtime but deprecated. They will be removed in v1.6.0. Use the commands above to generate an argon2id hash and update your `DD_AUTH_BASIC_*_HASH` values. diff --git a/content/docs/current/configuration/registries/artifactory/index.mdx b/content/docs/current/configuration/registries/artifactory/index.mdx index b938e83b..3c41a628 100644 --- a/content/docs/current/configuration/registries/artifactory/index.mdx +++ b/content/docs/current/configuration/registries/artifactory/index.mdx @@ -19,6 +19,8 @@ The `artifactory` registry lets you configure a [JFrog Artifactory](https://jfro | `DD_REGISTRY_ARTIFACTORY_{REGISTRY_NAME}_AUTH` | ⚪ | Base64 encoded `login:password` string | DD_REGISTRY_ARTIFACTORY_\{REGISTRY_NAME\}_LOGIN/PASSWORD must not be defined | | | `DD_REGISTRY_ARTIFACTORY_{REGISTRY_NAME}_CAFILE` | ⚪ | Path to custom CA certificate file | | | | `DD_REGISTRY_ARTIFACTORY_{REGISTRY_NAME}_INSECURE` | ⚪ | Allow insecure (non-TLS) connections | `true`, `false` | `false` | +| `DD_REGISTRY_ARTIFACTORY_{REGISTRY_NAME}_CLIENTCERT` | ⚪ | Path to client certificate file for mTLS | | | +| `DD_REGISTRY_ARTIFACTORY_{REGISTRY_NAME}_CLIENTKEY` | ⚪ | Path to client key file for mTLS | DD_REGISTRY_ARTIFACTORY_\{REGISTRY_NAME\}_CLIENTCERT must be defined | | ## Examples diff --git a/content/docs/current/configuration/registries/custom/index.mdx b/content/docs/current/configuration/registries/custom/index.mdx index 9782b337..32f385e5 100644 --- a/content/docs/current/configuration/registries/custom/index.mdx +++ b/content/docs/current/configuration/registries/custom/index.mdx @@ -19,6 +19,8 @@ The `custom` registry lets you configure a self-hosted [Docker Registry](https:/ | `DD_REGISTRY_CUSTOM_{REGISTRY_NAME}_AUTH` | ⚪ | Base64-encoded `login:password` string | DD_REGISTRY_CUSTOM_\{REGISTRY_NAME\}_LOGIN/PASSWORD must not be defined | | | `DD_REGISTRY_CUSTOM_{REGISTRY_NAME}_CAFILE` | ⚪ | Path to custom CA certificate file | | | | `DD_REGISTRY_CUSTOM_{REGISTRY_NAME}_INSECURE` | ⚪ | Allow insecure (non-TLS) connections | `true`, `false` | `false` | +| `DD_REGISTRY_CUSTOM_{REGISTRY_NAME}_CLIENTCERT` | ⚪ | Path to client certificate file for mTLS | | | +| `DD_REGISTRY_CUSTOM_{REGISTRY_NAME}_CLIENTKEY` | ⚪ | Path to client key file for mTLS | DD_REGISTRY_CUSTOM_\{REGISTRY_NAME\}_CLIENTCERT must be defined | | ## Examples diff --git a/content/docs/current/configuration/registries/forgejo/index.mdx b/content/docs/current/configuration/registries/forgejo/index.mdx index 5ea62462..8323e77f 100644 --- a/content/docs/current/configuration/registries/forgejo/index.mdx +++ b/content/docs/current/configuration/registries/forgejo/index.mdx @@ -19,6 +19,8 @@ The `forgejo` registry lets you configure a self-hosted [Forgejo](https://forgej | `DD_REGISTRY_FORGEJO_{REGISTRY_NAME}_AUTH` | ⚪ | Base64-encoded `login:password` string | DD_REGISTRY_FORGEJO_\{REGISTRY_NAME\}_LOGIN/PASSWORD must not be defined | | | `DD_REGISTRY_FORGEJO_{REGISTRY_NAME}_CAFILE` | ⚪ | Path to custom CA certificate file | | | | `DD_REGISTRY_FORGEJO_{REGISTRY_NAME}_INSECURE` | ⚪ | Allow insecure (non-TLS) connections | `true`, `false` | `false` | +| `DD_REGISTRY_FORGEJO_{REGISTRY_NAME}_CLIENTCERT` | ⚪ | Path to client certificate file for mTLS | | | +| `DD_REGISTRY_FORGEJO_{REGISTRY_NAME}_CLIENTKEY` | ⚪ | Path to client key file for mTLS | DD_REGISTRY_FORGEJO_\{REGISTRY_NAME\}_CLIENTCERT must be defined | | ## Examples diff --git a/content/docs/current/configuration/registries/gitea/index.mdx b/content/docs/current/configuration/registries/gitea/index.mdx index 6c8500b8..762292d1 100644 --- a/content/docs/current/configuration/registries/gitea/index.mdx +++ b/content/docs/current/configuration/registries/gitea/index.mdx @@ -19,6 +19,8 @@ The `gitea` registry lets you configure a self-hosted [Gitea](https://gitea.com) | `DD_REGISTRY_GITEA_{REGISTRY_NAME}_AUTH` | ⚪ | Base64-encoded `login:password` string | DD_REGISTRY_GITEA_\{REGISTRY_NAME\}_LOGIN/PASSWORD must not be defined | | | `DD_REGISTRY_GITEA_{REGISTRY_NAME}_CAFILE` | ⚪ | Path to custom CA certificate file | | | | `DD_REGISTRY_GITEA_{REGISTRY_NAME}_INSECURE` | ⚪ | Allow insecure (non-TLS) connections | `true`, `false` | `false` | +| `DD_REGISTRY_GITEA_{REGISTRY_NAME}_CLIENTCERT` | ⚪ | Path to client certificate file for mTLS | | | +| `DD_REGISTRY_GITEA_{REGISTRY_NAME}_CLIENTKEY` | ⚪ | Path to client key file for mTLS | DD_REGISTRY_GITEA_\{REGISTRY_NAME\}_CLIENTCERT must be defined | | ## Examples diff --git a/content/docs/current/configuration/registries/harbor/index.mdx b/content/docs/current/configuration/registries/harbor/index.mdx index d674c49e..5b0cec3d 100644 --- a/content/docs/current/configuration/registries/harbor/index.mdx +++ b/content/docs/current/configuration/registries/harbor/index.mdx @@ -19,6 +19,8 @@ The `harbor` registry lets you configure a self-hosted [Harbor](https://goharbor | `DD_REGISTRY_HARBOR_{REGISTRY_NAME}_AUTH` | ⚪ | Base64 encoded `login:password` string | DD_REGISTRY_HARBOR_\{REGISTRY_NAME\}_LOGIN/PASSWORD must not be defined | | | `DD_REGISTRY_HARBOR_{REGISTRY_NAME}_CAFILE` | ⚪ | Path to custom CA certificate file | | | | `DD_REGISTRY_HARBOR_{REGISTRY_NAME}_INSECURE` | ⚪ | Allow insecure (non-TLS) connections | `true`, `false` | `false` | +| `DD_REGISTRY_HARBOR_{REGISTRY_NAME}_CLIENTCERT` | ⚪ | Path to client certificate file for mTLS | | | +| `DD_REGISTRY_HARBOR_{REGISTRY_NAME}_CLIENTKEY` | ⚪ | Path to client key file for mTLS | DD_REGISTRY_HARBOR_\{REGISTRY_NAME\}_CLIENTCERT must be defined | | ## Examples diff --git a/content/docs/current/configuration/registries/nexus/index.mdx b/content/docs/current/configuration/registries/nexus/index.mdx index 8d30d443..2a61489f 100644 --- a/content/docs/current/configuration/registries/nexus/index.mdx +++ b/content/docs/current/configuration/registries/nexus/index.mdx @@ -19,6 +19,8 @@ The `nexus` registry lets you configure a [Sonatype Nexus](https://www.sonatype. | `DD_REGISTRY_NEXUS_{REGISTRY_NAME}_AUTH` | ⚪ | Base64 encoded `login:password` string | DD_REGISTRY_NEXUS_\{REGISTRY_NAME\}_LOGIN/PASSWORD must not be defined | | | `DD_REGISTRY_NEXUS_{REGISTRY_NAME}_CAFILE` | ⚪ | Path to custom CA certificate file | | | | `DD_REGISTRY_NEXUS_{REGISTRY_NAME}_INSECURE` | ⚪ | Allow insecure (non-TLS) connections | `true`, `false` | `false` | +| `DD_REGISTRY_NEXUS_{REGISTRY_NAME}_CLIENTCERT` | ⚪ | Path to client certificate file for mTLS | | | +| `DD_REGISTRY_NEXUS_{REGISTRY_NAME}_CLIENTKEY` | ⚪ | Path to client key file for mTLS | DD_REGISTRY_NEXUS_\{REGISTRY_NAME\}_CLIENTCERT must be defined | | ## Examples diff --git a/content/docs/current/configuration/server/index.mdx b/content/docs/current/configuration/server/index.mdx index 39fd7731..73d22aa2 100644 --- a/content/docs/current/configuration/server/index.mdx +++ b/content/docs/current/configuration/server/index.mdx @@ -44,6 +44,10 @@ For production deployments, set an explicit trusted origin: - `DD_SERVER_CORS_ORIGIN=https://drydock.example.com` - `DD_SERVER_CORS_ORIGIN=https://ops.example.com` +## Container Healthcheck + +The official Docker image includes a built-in `HEALTHCHECK` that polls the `/health` endpoint. When `DD_SERVER_TLS_ENABLED=true`, the healthcheck automatically switches to HTTPS (with `--insecure` for self-signed certificates). No additional configuration is needed. + ## Plain HTTP Deployments When `DD_SERVER_TLS_ENABLED` is not set or is `false`, drydock automatically adjusts its security headers for plain HTTP: diff --git a/content/docs/current/configuration/triggers/docker-compose/index.mdx b/content/docs/current/configuration/triggers/docker-compose/index.mdx index 34e65e11..2db0598e 100644 --- a/content/docs/current/configuration/triggers/docker-compose/index.mdx +++ b/content/docs/current/configuration/triggers/docker-compose/index.mdx @@ -26,7 +26,7 @@ The trigger will: | Env var | Required | Description | Supported values | Default value when missing | | --- | :---: | --- | --- | --- | -| `DD_TRIGGER_DOCKERCOMPOSE_{trigger_name}_FILE` | ⚪ | The docker-compose.yml file location (can also be set per container via the `dd.compose.file` label) | | | +| `DD_TRIGGER_DOCKERCOMPOSE_{trigger_name}_FILE` | ⚪ | The docker-compose.yml file location or directory (can also be set per container via the `dd.compose.file` label). When a directory is given, Drydock probes for `compose.yaml`, `compose.yml`, `docker-compose.yaml`, and `docker-compose.yml` in order. | | | | `DD_TRIGGER_DOCKERCOMPOSE_{trigger_name}_BACKUP` | ⚪ | Backup the docker-compose.yml file as `.back` before updating? | `true`, `false` | `false` | | `DD_TRIGGER_DOCKERCOMPOSE_{trigger_name}_PRUNE` | ⚪ | If the old image must be pruned after upgrade | `true`, `false` | `false` | | `DD_TRIGGER_DOCKERCOMPOSE_{trigger_name}_DRYRUN` | ⚪ | When enabled, only pull the new image ahead of time | `true`, `false` | `false` | @@ -39,6 +39,8 @@ The trigger will: This trigger also supports the [common configuration variables](/docs/configuration/triggers/#common-trigger-configuration). but only supports the `batch` mode. +The env var keys for `COMPOSEFILEONCE`, `COMPOSEFILELABEL`, `RECONCILIATIONMODE`, and `DIGESTPINNING` are case-insensitive — both the lowercased form (e.g. `composefileonce`) and the camelCase form (e.g. `composeFileOnce`) are accepted. + Legacy compatibility: compose file label fallback `wud.compose.file` is still accepted when `dd.compose.file` is not present. Prefer `dd.compose.file` for new configs, and use `node dist/index.js config migrate` to rewrite existing labels. ## Auto-detection diff --git a/content/docs/current/configuration/triggers/mqtt/index.mdx b/content/docs/current/configuration/triggers/mqtt/index.mdx index af37f6c3..0d7b663c 100644 --- a/content/docs/current/configuration/triggers/mqtt/index.mdx +++ b/content/docs/current/configuration/triggers/mqtt/index.mdx @@ -22,7 +22,7 @@ The `mqtt` trigger lets you send container update notifications to an MQTT broke | `DD_TRIGGER_MQTT_{trigger_name}_HASS_ENABLED` | ⚪ | Enable [Home-assistant](https://www.home-assistant.io/) integration and deliver additional topics | `true`, `false` | `false` | | `DD_TRIGGER_MQTT_{trigger_name}_HASS_DISCOVERY` | ⚪ | Enable [Home-assistant](https://www.home-assistant.io/) integration including discovery | `true`, `false` | `false` | | `DD_TRIGGER_MQTT_{trigger_name}_HASS_PREFIX` | ⚪ | Base topic for hass entity discovery | | `homeassistant` | -| `DD_TRIGGER_MQTT_{trigger_name}_HASS_ATTRIBUTES` | ⚪ | Attribute preset controlling which container fields are included in MQTT payloads. `full` sends everything; `short` excludes large fields like SBOM documents, scan vulnerabilities, details, and labels. | `full`, `short` | `full` | +| `DD_TRIGGER_MQTT_{trigger_name}_HASS_ATTRIBUTES` | ⚪ | Attribute preset controlling which container fields are included in MQTT payloads. `full` sends everything; `short` excludes large fields like SBOM documents, scan vulnerabilities, details, and labels. | `full`, `short` | `short` | | `DD_TRIGGER_MQTT_{trigger_name}_HASS_FILTER_INCLUDE` | ⚪ | Comma-separated list of flattened MQTT attribute keys to keep (include-mode). When set, only these keys are published. | Comma-separated flattened keys | | | `DD_TRIGGER_MQTT_{trigger_name}_HASS_FILTER_EXCLUDE` | ⚪ | Comma-separated list of flattened MQTT attribute keys to remove (exclude-mode). Used when `HASS_FILTER_INCLUDE` is empty. | Comma-separated flattened keys | | | `DD_TRIGGER_MQTT_{trigger_name}_EXCLUDE` | ⚪ | Legacy comma-separated dot-paths to exclude from the nested container object before flattening. Used only when both `HASS_FILTER_INCLUDE` and `HASS_FILTER_EXCLUDE` are empty. | Comma-separated dot-paths | | @@ -36,6 +36,8 @@ The `mqtt` trigger lets you send container update notifications to an MQTT broke You want to customize the name & icon of the Home-Assistant entity? [Use the `dd.display.name` and `dd.display.icon` labels](/docs/configuration/watchers/#labels). +For Home Assistant, the `entity_picture` is automatically derived from the `dd.display.icon` label. Icons with `sh:`, `hl:`, or `si:` prefixes map to CDN image URLs. You can also set `dd.display.picture` to provide a direct URL override. + ## Examples ### Post a message to a local mosquitto broker @@ -133,22 +135,23 @@ docker run \ ```json { - "id":"31a61a8305ef1fc9a71fa4f20a68d7ec88b28e32303bbc4a5f192e851165b816", - "name":"homeassistant", - "watcher":"local", - "include_tags":"^\\d+\\.\\d+.\\d+$", - "image_id":"sha256:d4a6fafb7d4da37495e5c9be3242590be24a87d7edcc4f79761098889c54fca6", - "image_registry_url":"123456789.dkr.ecr.eu-west-1.amazonaws.com", - "image_name":"test", - "image_tag_value":"2021.6.4", - "image_tag_semver":true, - "image_digest_watch":false, - "image_digest_repo":"sha256:ca0edc3fb0b4647963629bdfccbb3ccfa352184b45a9b4145832000c2878dd72", - "image_architecture":"amd64", - "image_os":"linux", - "image_created":"2021-06-12T05:33:38.440Z", - "result_tag":"2021.6.5", - "updateAvailable":"2021.6.5" + "id": "31a61a8305ef1fc9a71fa4f20a68d7ec88b28e32303bbc4a5f192e851165b816", + "name": "homeassistant", + "watcher": "local", + "image_id": "sha256:d4a6fafb7d4da37495e5c9be3242590be24a87d7edcc4f79761098889c54fca6", + "image_registry_url": "123456789.dkr.ecr.eu-west-1.amazonaws.com", + "image_name": "test", + "image_tag_value": "2021.6.4", + "image_tag_semver": true, + "image_digest_watch": false, + "image_digest_repo": "sha256:ca0edc3fb0b4647963629bdfccbb3ccfa352184b45a9b4145832000c2878dd72", + "image_architecture": "amd64", + "image_os": "linux", + "image_created": "2021-06-12T05:33:38.440Z", + "display_name": "Home Assistant", + "display_icon": "sh:homeassistant", + "result_tag": "2021.6.5", + "update_available": true } ``` @@ -252,7 +255,7 @@ When no explicit include/exclude list is configured, `HASS_ATTRIBUTES` is used: | Preset | Behavior | | --- | --- | -| `full` (default) | Sends the entire container object — no filtering | -| `short` | Excludes `security.sbom.documents`, `security.updateSbom.documents`, `security.scan.vulnerabilities`, `security.updateScan.vulnerabilities`, `details`, and `labels` | +| `full` | Sends the entire container object — no filtering | +| `short` (default) | Excludes `security.sbom.documents`, `security.updateSbom.documents`, `security.scan.vulnerabilities`, `security.updateScan.vulnerabilities`, `details`, and `labels` | `HASS_FILTER_INCLUDE` and `HASS_FILTER_EXCLUDE` match flattened MQTT keys (snake_case + underscore delimiter), because filtering is applied after flattening to the payload shape Home Assistant receives. diff --git a/content/docs/current/configuration/watchers/index.mdx b/content/docs/current/configuration/watchers/index.mdx index 7a4b286b..e658b15f 100644 --- a/content/docs/current/configuration/watchers/index.mdx +++ b/content/docs/current/configuration/watchers/index.mdx @@ -423,6 +423,7 @@ To fine-tune the behaviour of drydock _per container_, you can add labels on the | Label | Required | Description | Supported values | Default value when missing | | --- | :---: | --- | --- | --- | | `dd.display.icon` | ⚪ | Custom display icon for the container | Valid [Fontawesome Icon](https://fontawesome.com/), [Homarr Labs Icon](https://dashboardicons.com/), [Selfh.st Icon](https://selfh.st/icons/), or [Simple Icon](https://simpleicons.org/) (see details below). `mdi:` icons are auto-resolved but not recommended. | `fab fa-docker` | +| `dd.display.picture` | ⚪ | Custom entity picture URL for Home Assistant MQTT integration. When set to an HTTP/HTTPS URL, overrides the icon-derived `entity_picture` in HASS discovery payloads. | Valid HTTP or HTTPS URL | | | `dd.display.name` | ⚪ | Custom display name for the container | Valid String | Container name | | `dd.group` | ⚪ | Group name for stack/group views in the UI (falls back to `com.docker.compose.project` if not set) | Valid String | | | `dd.inspect.tag.path` | ⚪ | Docker inspect path used to derive a local semver tag | Slash-separated path in `docker inspect` output | | diff --git a/lefthook.yml b/lefthook.yml index 57ac61c4..1ca298fc 100644 --- a/lefthook.yml +++ b/lefthook.yml @@ -23,56 +23,58 @@ pre-commit: glob: '*.{ts,js,json,vue,css}' run: npx biome format --write --no-errors-on-unmatched {staged_files} && git add {staged_files} priority: 2 + coverage: + glob: '*.{ts,vue}' + run: ./scripts/pre-commit-coverage.sh + priority: 3 + timeout: 2m pre-push: piped: true commands: - # ── Clean tree gate: fail if working tree is dirty ───────────────── - # Catches untracked files (which hang qlty on interactive prompts), - # uncommitted changes (which pass locally but fail CI), and stashed - # changes (which hide work that should be committed or discarded). + # ── Clean tree gate: block push if uncommitted changes exist ─────── + # CI only sees committed state. Hard-fail so you don't push code + # that hasn't been tested in its committed form. clean-tree: run: | dirty=$(git status --porcelain 2>/dev/null) - stash=$(git stash list 2>/dev/null) - fail=0 if [ -n "$dirty" ]; then - echo "❌ Working tree is not clean:" - echo "$dirty" + echo "❌ Working tree has uncommitted changes (CI won't see these):" echo "" - echo "Commit, stash, or remove these files before pushing." - fail=1 - fi - if [ -n "$stash" ]; then - echo "⚠️ Stash is not empty:" - echo "$stash" + echo "$dirty" echo "" - echo "Apply or drop stashed changes — they won't be in CI." - fail=1 + echo "What do we want to do with these files?" + exit 1 fi - exit $fail + fail_text: "Uncommitted changes detected — decide what to do with them before pushing" priority: 0 + timeout: 10s # ── Lint gate: fast checks that catch formatting/lint issues ────── ts-nocheck: run: node scripts/check-ts-nocheck-allowlist.mjs priority: 1 + timeout: 15s biome: run: npx biome check . priority: 2 + timeout: 30s qlty: - run: CI=1 qlty check --all --no-progress + run: CI=1 qlty check --all --no-progress /dev/null 2>&1' priority: 6 + timeout: 30s diff --git a/scripts/pre-commit-coverage.sh b/scripts/pre-commit-coverage.sh new file mode 100755 index 00000000..cec1c842 --- /dev/null +++ b/scripts/pre-commit-coverage.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash +# Pre-commit coverage gate: runs tests related to staged files and checks +# that each staged source file maintains coverage thresholds. +# +# Only activates when .ts/.vue source files in app/ or ui/ are staged. +# Uses vitest --changed to find affected tests, and --coverage.include +# to scope coverage measurement to only the staged files. +# +# Thresholds: 100% lines/functions/statements, 95% branches. +# Branch threshold is slightly relaxed because v8 coverage reports +# phantom uncovered branches on ternaries and exhaustive if-chains. +# The pre-push hook enforces full 100% globally via `npm test`. +set -euo pipefail + +cd "$(git rev-parse --show-toplevel)" + +# Collect staged .ts/.vue source files (excludes deletions and test files) +staged_app=() +staged_ui=() + +while IFS= read -r file; do + case "$file" in + app/*.test.ts) ;; # skip test files — we measure source coverage + app/*.ts) staged_app+=("$file") ;; + ui/src/*.spec.ts) ;; # skip test files + ui/src/*.ts | ui/src/*.vue) staged_ui+=("$file") ;; + esac +done < <(git diff --cached --name-only --diff-filter=d) + +# Skip if no relevant source files staged +if [[ ${#staged_app[@]} -eq 0 && ${#staged_ui[@]} -eq 0 ]]; then + echo "⏭ No app/ui source files staged — skipping coverage check" + exit 0 +fi + +pids=() +labels=() +fail=0 + +run() { + local label=$1 + shift + "$@" & + pids+=($!) + labels+=("$label") +} + +# Common coverage flags: scope to staged files, per-file thresholds +COVERAGE_FLAGS="--coverage --coverage.thresholds.perFile --coverage.thresholds.branches=95" + +if [[ ${#staged_app[@]} -gt 0 ]]; then + # Build --coverage.include patterns for each staged file (paths relative to app/) + include_args=() + for f in "${staged_app[@]}"; do + include_args+=(--coverage.include "${f#app/}") + done + echo "🧪 Running coverage for ${#staged_app[@]} staged app file(s)..." + # shellcheck disable=SC2086 + run "app-coverage" bash -c "cd app && npx vitest run --changed $COVERAGE_FLAGS ${include_args[*]}" +fi + +if [[ ${#staged_ui[@]} -gt 0 ]]; then + # Build --coverage.include patterns for each staged file (paths relative to ui/) + include_args=() + for f in "${staged_ui[@]}"; do + include_args+=(--coverage.include "${f#ui/}") + done + echo "🧪 Running coverage for ${#staged_ui[@]} staged ui file(s)..." + # shellcheck disable=SC2086 + run "ui-coverage" bash -c "cd ui && npx vitest run --changed $COVERAGE_FLAGS ${include_args[*]}" +fi + +for i in "${!pids[@]}"; do + if ! wait "${pids[$i]}"; then + echo "❌ FAILED: ${labels[$i]} — coverage threshold not met" >&2 + fail=1 + fi +done + +if [[ $fail -eq 0 ]]; then + echo "✅ Coverage check passed" +fi + +exit $fail diff --git a/test/qa-proxy-certs/cert.pem b/test/qa-proxy-certs/cert.pem new file mode 100644 index 00000000..ca624588 --- /dev/null +++ b/test/qa-proxy-certs/cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCTCCAfGgAwIBAgIUIxsKgAJTBH7amTLrNF+H7knrdBMwDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI2MDMxMTE2MzQxMFoXDTI2MDQx +MDE2MzQxMFowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAyHQlopOX6rEBQwx77NSD6FhpzFojscgQYK9IYAY+QLGI +DzSGeC7FeO+WhvqBhMnEhen6pjXITSqWNwo3OYBXp4GpinnQXSdQ5yYh9k5Zg1fZ +jWPwfERRmrjSenbrxt4TycKxDuS288EMUHt0g3Ur3NNood5/oX8aNSPAPyE3nuYl +4Kvhs6sfpMAs2VBfOqaBo4TgKbXIY4TKAQiEHHXOSX6bMmz2P2aqhrK0T5LGIrJp +x4rsiDabxyGBFHcD7GsSSqYLSVSYdBU9XuS2YEco0U3LiV5kuih5u2ZYh2SrL0+I +LboimfhJ9XZLk4hWKqdTI3fEVgF5J4ckPQaBMA7tlwIDAQABo1MwUTAdBgNVHQ4E +FgQUsYmvKjV1p1uiTNnkEApxA14wB3gwHwYDVR0jBBgwFoAUsYmvKjV1p1uiTNnk +EApxA14wB3gwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAFrKq +DXtD5cfv3hyru3aaSL/7FZSVjwMT+xXWis2+h8dnOsiIZOn8p+lxZvfeHvOSurGn +7E3i6L+xbZqnFSeUfc5j14HlICkjIavKjB9J2884v5SmPrRRRSdnMOZNdnMbwbjo +N/N2XxvNgyBN4KDB8yWqtP5+LWuRrl8JfpjQRbyi2BY4HVI4qpjHeHF1qomaMHsb +p5vha07jtHR4u6gmXaie3fhcMGbocpjEEHooWNILhjVkFL9evjvC3KTpFt/fQxPt +2EnXAPlLI7RRRhPf/Q0mR6mhqGlvwZ5r7d2VOM8r422BF2b0bvjCB4nMOTjArF6s +mCFNiwLp0HQWlCtIdg== +-----END CERTIFICATE----- diff --git a/test/qa-proxy-certs/key.pem b/test/qa-proxy-certs/key.pem new file mode 100644 index 00000000..5fd615aa --- /dev/null +++ b/test/qa-proxy-certs/key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQDIdCWik5fqsQFD +DHvs1IPoWGnMWiOxyBBgr0hgBj5AsYgPNIZ4LsV475aG+oGEycSF6fqmNchNKpY3 +Cjc5gFengamKedBdJ1DnJiH2TlmDV9mNY/B8RFGauNJ6duvG3hPJwrEO5LbzwQxQ +e3SDdSvc02ih3n+hfxo1I8A/ITee5iXgq+Gzqx+kwCzZUF86poGjhOAptchjhMoB +CIQcdc5JfpsybPY/ZqqGsrRPksYismnHiuyINpvHIYEUdwPsaxJKpgtJVJh0FT1e +5LZgRyjRTcuJXmS6KHm7ZliHZKsvT4gtuiKZ+En1dkuTiFYqp1Mjd8RWAXknhyQ9 +BoEwDu2XAgMBAAECggEAIIYj4xySfhxpKYFZjCQxIN5TkDoh54MgESfvlND+gc2s +V1c86Dwig0xQfqcfo7V/IwFDT3uTe93IkLaiHjzSPBYANb0tDyhFarPXt9ifkn2L +CXNpPihxmyqY0BECkXpP+KETzGhUvPkaL+bQEVJOIx7UL8tjpwbx8CRUWYngtfG0 +z+I46Vg9BLwivGTLpgyuRMq48Gh9Dcw+kmIacfSYY8TbauLc3Ls6EoDg7BAzOkB4 +9hNLgBRRT+95zmpHv1++j17AIohr3apvxDVar9Ub+/jpfMUatUwG8oH+Ef8QmRNG +gKfEf3wUPKA3lF3FZKKe/HsKGYDuQAhfRReSKeI+DQKBgQD73mANjWpjyuI2lAtt +ZsW7PTCh9pHB5hFVnNw2jtr3wvAprh+F8yr+X00+7IlTfM2VcrV9g3LFFLUDFIEE +AzAKCz1fTAEKn9WlTx/yW6V16mj7gwhGhWayjRikbNsTiEHK7wdNmsX09j7kFmTw +X8xriPJcbnObP/XqL1aYL/l+ewKBgQDLvd/n8wrC4az1VLmAhedW+2BxgAmdu8L3 +CuAMylUrSM99PEOGn/zFBIZrN3VeUWX5TyhuDwNjCQayp3jCIZohJn73DFLbuKCU +yK9ICkUiquCvSmTQ88Xy+/bmVgwZlMituvdq56ldZOXPQvDey34kL37nWxOe4KF7 +Z9Cxpz/wlQJ/bTT+cOHgP2S4sPGAAu+MzKK7c351tJ0M5xIcaYuHZeYgO/JDuNC2 +05R+6cmlwY1blnEmQEW+fbV8xtkdF6BKNBCri6ZozTKAcCzerTcPhxEcc/FcpTcy +UDjddm28j7uEy3jYsc1qB8y9eCg9m/vtprK6Y1mAxs/00JW19kbW5wKBgC6B+UqJ +QDucKE8YOAAOkPBaEXnXMFrBMZAS+3Hv/eETjcmYqBFjE+AlWEnBLxmImy5900zM +QImq1cySTg6CfRx0HSdnuMJPMtjDtr9LGN0BBKj+4mSQQO9mdMW/fqOYQvblZvUQ +TKj1D0Bwl2tEKFc6QE2vjJsHy9TPrePfucjBAoGBAOnzNd++P/x6/xIPHtxbs72q +EelJZduYhIYcLmbMzuu+MlYQUyrQ++ctbLC/nCpQa8aDG90eDeChoLDFs1xv+WKj ++d4SXcGwPEA2Qc2LYyDtNt9G3+inBb1WMM2DcdFF92umGnw8LI7Wijl6toAMc1ZC +IDZ131VHqUzkRkw52K86 +-----END PRIVATE KEY----- diff --git a/test/qa-proxy-compose.yml b/test/qa-proxy-compose.yml new file mode 100644 index 00000000..1e9b6b29 --- /dev/null +++ b/test/qa-proxy-compose.yml @@ -0,0 +1,47 @@ +services: + # Nginx reverse proxy — TLS-terminating proxy in front of drydock + # Simulates the common self-hosted setup (NPM, Traefik, Caddy, etc.) + proxy: + image: nginx:alpine + container_name: qa-proxy + ports: + - "8443:443" + volumes: + - ./qa-proxy-nginx.conf:/etc/nginx/conf.d/default.conf:ro + - ./qa-proxy-certs:/etc/nginx/certs:ro + depends_on: + - drydock + + drydock: + image: drydock:dev + container_name: drydock-qa-proxy + user: root + ports: + - "3333:3000" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - ./qa-proxy-compose.yml:/drydock/qa-compose.yml:rw + environment: + - DD_RUN_AS_ROOT=true + - DD_ALLOW_INSECURE_ROOT=true + - DD_LOG_FORMAT=text + - DD_WATCHER_LOCAL_WATCHBYDEFAULT=false + - DD_WATCHER_LOCAL_CRON=*/2 * * * * + - DD_TRIGGER_DOCKER_LOCAL_AUTO=false + - DD_TRIGGER_DOCKERCOMPOSE_QA_FILE=/drydock/qa-compose.yml + - DD_TRIGGER_DOCKERCOMPOSE_QA_AUTO=false + - DD_AUTH_BASIC_ADMIN_USER=admin + - "DD_AUTH_BASIC_ADMIN_HASH=argon2id$$65536$$3$$4$$zUAK9+ktVWNHyQdv3SaOSgMv3T02F1Zj8D+t1un7D98=$$KEmn6d94w03YxIlw7U7l/ikD9lW+H3IC1N7xbAhOB9yKofA3HoxUBmuvBawvShhv337zDv4+g9hobNWeQEWwkQ==" + - DD_SESSION_SECRET=qa-test-session-secret + # Test scenario: trustproxy=1 (required for reverse proxy) + - DD_SERVER_TRUSTPROXY=1 + - DD_PUBLIC_URL=https://localhost:8443 + + # Test container with compose trigger label + nginx-test: + image: nginx:1.25.5 + pull_policy: never + container_name: nginx-compose-test + labels: + - dd.watch=true + - dd.display.name=Nginx Compose Test diff --git a/test/qa-proxy-nginx.conf b/test/qa-proxy-nginx.conf new file mode 100644 index 00000000..e1e811d6 --- /dev/null +++ b/test/qa-proxy-nginx.conf @@ -0,0 +1,16 @@ +server { + listen 443 ssl; + server_name localhost; + + ssl_certificate /etc/nginx/certs/cert.pem; + ssl_certificate_key /etc/nginx/certs/key.pem; + + location / { + proxy_pass http://drydock:3000; + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $http_host; + } +} diff --git a/test/qa-rc12-fixes.yml b/test/qa-rc12-fixes.yml new file mode 100644 index 00000000..714d5ae8 --- /dev/null +++ b/test/qa-rc12-fixes.yml @@ -0,0 +1,204 @@ +# QA E2E — rc.12 tracker fixes +# Verifies all 14 rc.12 fixes end-to-end against running drydock instances. +# +# Usage: +# docker build -t drydock:dev . +# docker compose -f test/qa-rc12-fixes.yml up -d +# docker compose -f test/qa-rc12-fixes.yml ps # wait for all healthy +# # run Playwright MCP scenarios +# docker compose -f test/qa-rc12-fixes.yml down -v +# +# Scenarios covered: +# 1. Compose directory resolution (208245ec) +# 2. docker.io prefix preservation (16598ba0) +# 3. Compose config case-sensitivity (14b468fe) +# 4a. Legacy SHA-1 hash login (8ba1a6f2) +# 4b. Legacy APR1 hash login (8ba1a6f2) +# 4c. Legacy DES crypt hash login (8ba1a6f2) +# 4d. Legacy plaintext hash login (8ba1a6f2) +# 5. TLS healthcheck (4e6c5d96) +# 6. Log level propagation (b0e48186) +# 7. Feature flags after login (82aa55d2) +# 8. Container-update audit (632e478c) +# 9. MQTT HASS_ATTRIBUTES (8401c205) +# 10. Trigger warn logging (98547dd3) +# 13. Icon cache headers (2b342a5e) + +services: + # ── Main drydock instance ─────────────────────────────── + # Covers: Scenario 1, 2, 3, 6, 7, 8, 9, 10, 13 + drydock-rc12: + image: drydock:dev + container_name: drydock-rc12 + user: root + ports: + - "3400:3000" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - ./qa-rc12-stacks:/drydock/stacks:rw + environment: + - DD_RUN_AS_ROOT=true + - DD_ALLOW_INSECURE_ROOT=true + - DD_LOG_FORMAT=text + # Scenario 6: log level propagation + - DD_LOG_LEVEL=debug + - DD_WATCHER_LOCAL_WATCHBYDEFAULT=false + - DD_WATCHER_LOCAL_CRON=*/2 * * * * + - DD_SESSION_SECRET=qa-rc12-session-secret + - DD_PUBLIC_URL=http://localhost:3400 + # Auth (argon2id — normal login for main instance) + - DD_AUTH_BASIC_ADMIN_USER=admin + - "DD_AUTH_BASIC_ADMIN_HASH=argon2id$$65536$$3$$4$$zUAK9+ktVWNHyQdv3SaOSgMv3T02F1Zj8D+t1un7D98=$$KEmn6d94w03YxIlw7U7l/ikD9lW+H3IC1N7xbAhOB9yKofA3HoxUBmuvBawvShhv337zDv4+g9hobNWeQEWwkQ==" + # Scenario 1: compose directory resolution — FILE points to directory + - DD_TRIGGER_DOCKERCOMPOSE_DOCKGE_FILE=/drydock/stacks/filebrowser + # Scenario 3: lowercase env var keys for compose config + - DD_TRIGGER_DOCKERCOMPOSE_DOCKGE_COMPOSEFILEONCE=true + - DD_TRIGGER_DOCKERCOMPOSE_DOCKGE_DIGESTPINNING=false + - DD_TRIGGER_DOCKERCOMPOSE_DOCKGE_RECONCILIATIONMODE=warn + - DD_TRIGGER_DOCKERCOMPOSE_DOCKGE_AUTO=false + # Scenario 9: MQTT HASS attributes + - DD_TRIGGER_MQTT_QA_URL=mqtt://mosquitto:1883 + - DD_TRIGGER_MQTT_QA_HASS_ENABLED=true + - DD_TRIGGER_MQTT_QA_HASS_PREFIX=homeassistant + - DD_TRIGGER_MQTT_QA_HASS_DISCOVERY=true + - DD_TRIGGER_MQTT_QA_HASS_ATTRIBUTES=short + # Docker trigger (manual only) + - DD_TRIGGER_DOCKER_LOCAL_AUTO=false + depends_on: + mosquitto: + condition: service_healthy + + # ── Scenario 4a: Legacy SHA-1 hash login ──────────────── + # Password: myPassword + # Hash: {SHA}VBPuJHI7uixaa6LQGWx4s+5GKNE= + drydock-sha1: + image: drydock:dev + container_name: drydock-sha1 + user: root + ports: + - "3401:3000" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + - DD_RUN_AS_ROOT=true + - DD_ALLOW_INSECURE_ROOT=true + - DD_LOG_FORMAT=text + - DD_WATCHER_LOCAL_WATCHBYDEFAULT=false + - DD_WATCHER_LOCAL_CRON=*/5 * * * * + - DD_SESSION_SECRET=qa-sha1-session-secret + - DD_PUBLIC_URL=http://localhost:3401 + - DD_AUTH_BASIC_ADMIN_USER=admin + - "DD_AUTH_BASIC_ADMIN_HASH={SHA}VBPuJHI7uixaa6LQGWx4s+5GKNE=" + + # ── Scenario 4b: Legacy APR1 hash login ───────────────── + # Password: myPassword + # Hash: $apr1$r31.....$HqJZimcKQFAMYayBlzkrA/ + drydock-apr1: + image: drydock:dev + container_name: drydock-apr1 + user: root + ports: + - "3402:3000" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + - DD_RUN_AS_ROOT=true + - DD_ALLOW_INSECURE_ROOT=true + - DD_LOG_FORMAT=text + - DD_WATCHER_LOCAL_WATCHBYDEFAULT=false + - DD_WATCHER_LOCAL_CRON=*/5 * * * * + - DD_SESSION_SECRET=qa-apr1-session-secret + - DD_PUBLIC_URL=http://localhost:3402 + - DD_AUTH_BASIC_ADMIN_USER=admin + - "DD_AUTH_BASIC_ADMIN_HASH=$$apr1$$r31.....$$HqJZimcKQFAMYayBlzkrA/" + + # ── Scenario 4c: Legacy DES crypt hash login ──────────── + # Password: myPassword + # Hash: rqXexS6ZhobKA + drydock-crypt: + image: drydock:dev + container_name: drydock-crypt + user: root + ports: + - "3403:3000" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + - DD_RUN_AS_ROOT=true + - DD_ALLOW_INSECURE_ROOT=true + - DD_LOG_FORMAT=text + - DD_WATCHER_LOCAL_WATCHBYDEFAULT=false + - DD_WATCHER_LOCAL_CRON=*/5 * * * * + - DD_SESSION_SECRET=qa-crypt-session-secret + - DD_PUBLIC_URL=http://localhost:3403 + - DD_AUTH_BASIC_ADMIN_USER=admin + - DD_AUTH_BASIC_ADMIN_HASH=rqXexS6ZhobKA + + # ── Scenario 4d: Legacy plaintext hash login ──────────── + # Password: plaintext-password (the hash IS the password) + drydock-plain: + image: drydock:dev + container_name: drydock-plain + user: root + ports: + - "3404:3000" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + - DD_RUN_AS_ROOT=true + - DD_ALLOW_INSECURE_ROOT=true + - DD_LOG_FORMAT=text + - DD_WATCHER_LOCAL_WATCHBYDEFAULT=false + - DD_WATCHER_LOCAL_CRON=*/5 * * * * + - DD_SESSION_SECRET=qa-plain-session-secret + - DD_PUBLIC_URL=http://localhost:3404 + - DD_AUTH_BASIC_ADMIN_USER=admin + - DD_AUTH_BASIC_ADMIN_HASH=plaintext-password + + # ── Scenario 5: TLS backend + healthcheck ─────────────── + drydock-tls: + image: drydock:dev + container_name: drydock-tls + user: root + ports: + - "3405:3000" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - ./qa-proxy-certs:/certs:ro + environment: + - DD_RUN_AS_ROOT=true + - DD_ALLOW_INSECURE_ROOT=true + - DD_LOG_FORMAT=text + - DD_WATCHER_LOCAL_WATCHBYDEFAULT=false + - DD_WATCHER_LOCAL_CRON=*/5 * * * * + - DD_SESSION_SECRET=qa-tls-session-secret + - DD_PUBLIC_URL=https://localhost:3405 + - DD_SERVER_TLS_ENABLED=true + - DD_SERVER_TLS_CERT=/certs/cert.pem + - DD_SERVER_TLS_KEY=/certs/key.pem + - DD_AUTH_BASIC_ADMIN_USER=admin + - "DD_AUTH_BASIC_ADMIN_HASH=argon2id$$65536$$3$$4$$zUAK9+ktVWNHyQdv3SaOSgMv3T02F1Zj8D+t1un7D98=$$KEmn6d94w03YxIlw7U7l/ikD9lW+H3IC1N7xbAhOB9yKofA3HoxUBmuvBawvShhv337zDv4+g9hobNWeQEWwkQ==" + + # ── MQTT broker (Mosquitto) — Scenario 9 ──────────────── + mosquitto: + image: eclipse-mosquitto:2 + container_name: mosquitto + ports: + - "1883:1883" + volumes: + - ./mosquitto.conf:/mosquitto/config/mosquitto.conf:ro + healthcheck: + test: ["CMD-SHELL", "mosquitto_sub -t '$$SYS/#' -C 1 -W 3 || exit 1"] + interval: 5s + timeout: 5s + retries: 5 + start_period: 5s + + # ── Watched container — Scenario 8 (audit), 10 (warn logging) ── + qa-nginx: + image: nginx:1.25.5 + pull_policy: never + container_name: qa-nginx + labels: + - dd.watch=true + - dd.display.name=QA Nginx diff --git a/test/qa-rc12-stacks/filebrowser/compose.yaml b/test/qa-rc12-stacks/filebrowser/compose.yaml new file mode 100644 index 00000000..5afccb2f --- /dev/null +++ b/test/qa-rc12-stacks/filebrowser/compose.yaml @@ -0,0 +1,7 @@ +services: + filebrowser: + image: docker.io/filebrowser/filebrowser:v2.30.0 + container_name: qa-filebrowser + labels: + - dd.watch=true + - dd.display.name=Filebrowser diff --git a/test/qa-traefik-compose.yml b/test/qa-traefik-compose.yml new file mode 100644 index 00000000..7acfc9dd --- /dev/null +++ b/test/qa-traefik-compose.yml @@ -0,0 +1,61 @@ +services: + # Traefik reverse proxy — TLS-terminating, matching rj10rd/Pangolin stack + traefik: + image: traefik:latest + container_name: qa-traefik + command: + - --api.insecure=true + - --providers.docker=true + - --providers.docker.exposedByDefault=false + - --providers.file.filename=/etc/traefik/dynamic.yml + - --entrypoints.websecure.address=:443 + ports: + - "8443:443" + - "8080:8080" # Traefik dashboard + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - ./qa-proxy-certs:/certs:ro + - ./qa-traefik-dynamic.yml:/etc/traefik/dynamic.yml:ro + + drydock: + image: drydock:dev + container_name: drydock-qa-traefik + user: root + expose: + - "3000" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - ./qa-traefik-compose.yml:/drydock/qa-compose.yml:rw + environment: + - DD_RUN_AS_ROOT=true + - DD_ALLOW_INSECURE_ROOT=true + - DD_LOG_FORMAT=text + - DD_LOG_LEVEL=debug + - DD_WATCHER_LOCAL_WATCHBYDEFAULT=false + - DD_WATCHER_LOCAL_CRON=*/2 * * * * + # Docker trigger (manual only) + - DD_TRIGGER_DOCKER_LOCAL_AUTO=false + # Compose trigger — file must be writable inside container + - DD_TRIGGER_DOCKERCOMPOSE_QA_FILE=/drydock/qa-compose.yml + - DD_TRIGGER_DOCKERCOMPOSE_QA_AUTO=false + - DD_AUTH_BASIC_ADMIN_USER=admin + - "DD_AUTH_BASIC_ADMIN_HASH=argon2id$$65536$$3$$4$$zUAK9+ktVWNHyQdv3SaOSgMv3T02F1Zj8D+t1un7D98=$$KEmn6d94w03YxIlw7U7l/ikD9lW+H3IC1N7xbAhOB9yKofA3HoxUBmuvBawvShhv337zDv4+g9hobNWeQEWwkQ==" + - DD_SESSION_SECRET=qa-test-session-secret + # Required for reverse proxy + - DD_SERVER_TRUSTPROXY=1 + - DD_PUBLIC_URL=https://localhost:8443 + labels: + - traefik.enable=true + - traefik.http.routers.drydock.rule=Host(`localhost`) + - traefik.http.routers.drydock.entrypoints=websecure + - traefik.http.routers.drydock.tls=true + - traefik.http.services.drydock.loadbalancer.server.port=3000 + + # Test container with compose trigger + watch labels + nginx-test: + image: nginx:1.25.5 + pull_policy: never + container_name: nginx-compose-test + labels: + - dd.watch=true + - dd.display.name=Nginx Compose Test diff --git a/test/qa-traefik-dynamic.yml b/test/qa-traefik-dynamic.yml new file mode 100644 index 00000000..c46de37e --- /dev/null +++ b/test/qa-traefik-dynamic.yml @@ -0,0 +1,9 @@ +tls: + certificates: + - certFile: /certs/cert.pem + keyFile: /certs/key.pem + stores: + default: + defaultCertificate: + certFile: /certs/cert.pem + keyFile: /certs/key.pem diff --git a/ui/src/boot/icon-bundle.json b/ui/src/boot/icon-bundle.json index 84da0d54..d951cf07 100644 --- a/ui/src/boot/icon-bundle.json +++ b/ui/src/boot/icon-bundle.json @@ -339,6 +339,11 @@ "width": 512, "height": 512 }, + "fa6-solid:expand": { + "body": "", + "width": 448, + "height": 512 + }, "fa6-solid:book": { "body": "", "width": 448, @@ -1059,6 +1064,16 @@ "width": 256, "height": 256 }, + "ph:frame-corners": { + "body": "", + "width": 256, + "height": 256 + }, + "ph:frame-corners-duotone": { + "body": "", + "width": 256, + "height": 256 + }, "ph:book-open": { "body": "", "width": 256, @@ -1434,6 +1449,11 @@ "width": 24, "height": 24 }, + "lucide:scan": { + "body": "", + "width": 24, + "height": 24 + }, "lucide:book-open": { "body": "", "width": 24, @@ -1799,6 +1819,11 @@ "width": 24, "height": 24 }, + "tabler:scan": { + "body": "", + "width": 24, + "height": 24 + }, "tabler:book": { "body": "", "width": 24, @@ -2134,6 +2159,11 @@ "width": 24, "height": 24 }, + "heroicons:viewfinder-circle": { + "body": "", + "width": 24, + "height": 24 + }, "heroicons:book-open": { "body": "", "width": 24, @@ -2464,6 +2494,11 @@ "width": 24, "height": 24 }, + "iconoir:frame-select": { + "body": "", + "width": 24, + "height": 24 + }, "iconoir:book": { "body": "", "width": 24, diff --git a/ui/src/components/DataCardGrid.vue b/ui/src/components/DataCardGrid.vue index 927f1135..28a1efb6 100644 --- a/ui/src/components/DataCardGrid.vue +++ b/ui/src/components/DataCardGrid.vue @@ -46,7 +46,7 @@ function onCardKeydown(event: KeyboardEvent, item: Record) { backgroundColor: 'var(--dd-bg-card)', border: selectedKey != null && getKey(item, itemKey) === selectedKey ? '1.5px solid var(--color-drydock-secondary)' - : '1px solid var(--dd-border-strong)', + : 'none', borderRadius: 'var(--dd-radius)', overflow: 'hidden', }" diff --git a/ui/src/components/DataFilterBar.stories.ts b/ui/src/components/DataFilterBar.stories.ts index 43c225a1..9d19aa17 100644 --- a/ui/src/components/DataFilterBar.stories.ts +++ b/ui/src/components/DataFilterBar.stories.ts @@ -26,7 +26,7 @@ const renderWithFilters = (args: Story['args']) => ({ diff --git a/ui/src/components/DataFilterBar.vue b/ui/src/components/DataFilterBar.vue index c35ca435..d20afd72 100644 --- a/ui/src/components/DataFilterBar.vue +++ b/ui/src/components/DataFilterBar.vue @@ -34,20 +34,18 @@ function viewModeLabel(id: string): string {
{{ filteredCount }}/{{ totalCount }} -
+ aria-label="View mode">
diff --git a/ui/src/components/DataTable.vue b/ui/src/components/DataTable.vue index b34a95ce..7c7d275e 100644 --- a/ui/src/components/DataTable.vue +++ b/ui/src/components/DataTable.vue @@ -25,6 +25,8 @@ const props = withDefaults( virtualRowHeight?: number; virtualOverscan?: number; virtualMaxHeight?: string; + /** Optional max-height for scroll area when virtualScroll is false (e.g., '340px') */ + maxHeight?: string; }>(), { showActions: false, @@ -318,20 +320,20 @@ function handleHeaderKeydown(event: KeyboardEvent, col: DataTableColumn) {