diff --git a/.codacy.yml b/.codacy.yml index b00c8590..c338c01d 100644 --- a/.codacy.yml +++ b/.codacy.yml @@ -2,6 +2,14 @@ exclude_paths: - "docs/plans/**" - "scripts/**" - "apps/api/tests/**" + - "apps/web/src/*.test.ts" + - "apps/web/src/*.test.tsx" + - "apps/web/src/**/*.test.ts" + - "apps/web/src/**/*.test.tsx" - "apps/web/e2e/**" + - "apps/desktop/src/*.test.ts" + - "apps/desktop/src/**/*.test.ts" + - "services/worker/test_*.py" + - "apps/desktop/src/styles.css" - "apps/desktop/src/text.ts" - "apps/desktop/src/text.test.ts" diff --git a/.github/workflows/codecov-analytics.yml b/.github/workflows/codecov-analytics.yml index 0c91594a..6fe97f72 100644 --- a/.github/workflows/codecov-analytics.yml +++ b/.github/workflows/codecov-analytics.yml @@ -27,6 +27,11 @@ jobs: exit 1 fi + - name: Install system dependencies for desktop Rust coverage + run: | + sudo apt-get update + sudo apt-get install -y libgtk-3-dev libwebkit2gtk-4.1-dev librsvg2-dev patchelf + - name: Set up Python uses: actions/setup-python@v6 with: @@ -37,6 +42,9 @@ jobs: with: node-version: '20' + - name: Set up Rust + uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 + - name: Install Python deps run: | python -m venv .venv @@ -59,32 +67,59 @@ jobs: --cov=apps/api/app \ --cov=services/worker \ --cov=packages/media-core/src/media_core \ + --cov=scripts \ --cov-report=xml:coverage/python-coverage.xml \ apps/api/tests services/worker packages/media-core/tests - name: Run web coverage working-directory: apps/web - run: | - npx vitest run --coverage \ - --coverage.thresholds.lines=0 \ - --coverage.thresholds.functions=0 \ - --coverage.thresholds.branches=0 \ - --coverage.thresholds.statements=0 + run: npm run test:coverage - name: Run desktop TS coverage working-directory: apps/desktop + run: npm run test:coverage + + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov --locked + + - name: Run desktop Rust coverage run: | - npx vitest run --coverage \ - --coverage.thresholds.lines=0 \ - --coverage.thresholds.functions=0 \ - --coverage.thresholds.branches=0 \ - --coverage.thresholds.statements=0 + mkdir -p coverage + cd apps/desktop/src-tauri + cargo llvm-cov --workspace --all-features --lcov --output-path ../../../coverage/desktop-rust.lcov + + - name: Upload Python coverage to Codecov + uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: coverage/python-coverage.xml + flags: api,worker,media-core,scripts + fail_ci_if_error: true + verbose: true + + - name: Upload web coverage to Codecov + uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: apps/web/coverage/lcov.info + flags: web + fail_ci_if_error: true + verbose: true + + - name: Upload desktop TS coverage to Codecov + uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: apps/desktop/coverage/lcov.info + flags: desktop-ts + fail_ci_if_error: true + verbose: true - - name: Upload coverage to Codecov + - name: Upload desktop Rust coverage to Codecov uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de with: token: ${{ secrets.CODECOV_TOKEN }} - files: coverage/python-coverage.xml,apps/web/coverage/lcov.info,apps/desktop/coverage/lcov.info - flags: api,worker,media-core,web,desktop-ts + files: coverage/desktop-rust.lcov + flags: desktop-rust fail_ci_if_error: true verbose: true diff --git a/.github/workflows/coverage-100.yml b/.github/workflows/coverage-100.yml index e589a0bb..f0c78e72 100644 --- a/.github/workflows/coverage-100.yml +++ b/.github/workflows/coverage-100.yml @@ -59,6 +59,7 @@ jobs: --cov=apps/api/app \ --cov=services/worker \ --cov=packages/media-core/src/media_core \ + --cov=scripts \ --cov-report=xml:coverage/python-coverage.xml \ apps/api/tests services/worker packages/media-core/tests @@ -82,8 +83,11 @@ jobs: - name: Enforce 100% coverage run: | .venv/bin/python scripts/quality/assert_coverage_100.py \ + --xml "python=coverage/python-coverage.xml" \ --lcov "web=apps/web/coverage/lcov.info" \ --lcov "desktop-ts=apps/desktop/coverage/lcov.info" \ + --lcov "desktop-rust=coverage/desktop-rust.lcov" \ + --inventory-root . \ --out-json "coverage-100/coverage.json" \ --out-md "coverage-100/coverage.md" diff --git a/.github/workflows/desktop-release.yml b/.github/workflows/desktop-release.yml index dd5a0dd7..4f63dcbb 100644 --- a/.github/workflows/desktop-release.yml +++ b/.github/workflows/desktop-release.yml @@ -36,7 +36,9 @@ jobs: with: node-version: "20" cache: "npm" - cache-dependency-path: apps/desktop/package-lock.json + cache-dependency-path: | + apps/desktop/package-lock.json + apps/web/package-lock.json - name: Install Rust stable uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 @@ -50,6 +52,14 @@ jobs: sudo apt-get install -y libwebkit2gtk-4.1-dev librsvg2-dev patchelf sudo apt-get install -y libappindicator3-dev || sudo apt-get install -y libayatana-appindicator3-dev + - name: Install web dependencies + working-directory: apps/web + run: npm ci + + - name: Build hosted web dist for desktop runtime bundle + working-directory: apps/web + run: npm run build + - name: Install desktop dependencies working-directory: apps/desktop run: npm ci diff --git a/apps/api/app/api.py b/apps/api/app/api.py index bf88a331..2f7de915 100644 --- a/apps/api/app/api.py +++ b/apps/api/app/api.py @@ -11,28 +11,25 @@ from datetime import datetime, timedelta, timezone from functools import lru_cache from pathlib import Path -from typing import Annotated, Any, List, Optional -from uuid import uuid4 - -try: - from celery import Celery -except ModuleNotFoundError: # pragma: no cover - allows API tests without optional celery install - class Celery: # type: ignore[override] - def __init__(self, *args, **kwargs): - pass - - def send_task(self, *_args, **_kwargs): - raise RuntimeError("Celery is not installed in this environment.") -from fastapi import APIRouter, Depends, File, Form, Header, Query, Request, UploadFile, status, Response -from uuid import UUID +from typing import Any, Iterable, List, Optional, Set +from uuid import UUID, uuid4 +from fastapi import APIRouter, Depends, File, Form, Header, Query, Request, Response, UploadFile, status +from fastapi.responses import FileResponse, StreamingResponse from sqlmodel import Field, Session, SQLModel, select +from typing_extensions import Annotated from app.auth_api import PrincipalDep, ensure_default_plans from app.billing import get_plan_policy -from app.database import get_session from app.config import get_settings +from app.database import get_session from app.errors import ApiError, ErrorCode, ErrorResponse, conflict, not_found, quota_exceeded, server_error, unauthorized +from app.local_queue import ( + diagnostics as local_queue_diagnostics, + dispatch_task as dispatch_local_task, + is_local_queue_mode, + revoke_task as revoke_local_task, +) from app.models import ( Job, JobStatus, @@ -52,11 +49,76 @@ def send_task(self, *_args, **_kwargs): ) from app.rate_limit import enforce_rate_limit from app.security import AuthPrincipal -from fastapi.responses import FileResponse, StreamingResponse - from app.share_links import build_share_token_with_ttl, parse_and_validate_share_token from app.storage import LocalStorageBackend, get_storage, is_remote_uri +try: + from celery import Celery as _RealCelery +except ModuleNotFoundError: # pragma: no cover - allows API tests without optional celery install + _RealCelery = None + + +_MISSING_CELERY_MESSAGE = "Celery is not installed in this environment." + + +class _MissingCeleryControl: + @staticmethod + def ping(*_args, **_kwargs): + raise RuntimeError(_MISSING_CELERY_MESSAGE) + + @staticmethod + def revoke(*_args, **_kwargs): + raise RuntimeError(_MISSING_CELERY_MESSAGE) + + +class _MissingCelery: + def __init__(self, *args, **kwargs): + self.control = _MissingCeleryControl() + + @staticmethod + def send_task(*_args, **_kwargs): + raise RuntimeError(_MISSING_CELERY_MESSAGE) + + +Celery = _RealCelery or _MissingCelery +try: + from kombu.exceptions import OperationalError as _KombuOperationalError +except ModuleNotFoundError: # pragma: no cover - optional dependency + _KombuOperationalError = RuntimeError + +try: + from redis.exceptions import ConnectionError as _RedisConnectionError +except ModuleNotFoundError: # pragma: no cover - optional dependency + _RedisConnectionError = ConnectionError + +KombuOperationalError = _KombuOperationalError +RedisConnectionError = _RedisConnectionError + +_CELERY_BOOTSTRAP_EXCEPTIONS = ( + RuntimeError, + ValueError, + TypeError, + AttributeError, + OSError, + ImportError, + ModuleNotFoundError, + KombuOperationalError, + RedisConnectionError, + ConnectionError, +) + +_CELERY_RUNTIME_EXCEPTIONS = ( + RuntimeError, + TimeoutError, + ValueError, + TypeError, + AttributeError, + OSError, + KombuOperationalError, + RedisConnectionError, + ConnectionError, +) + router = APIRouter(prefix="/api/v1") logger = logging.getLogger("reframe.api") _DEFAULT_BINARY_MEDIA_TYPE = "application/octet-stream" @@ -66,9 +128,9 @@ def send_task(self, *_args, **_kwargs): @lru_cache(maxsize=1) -def get_celery_app() -> Celery: +def get_celery_app() -> Any: settings = get_settings() - app = Celery("reframe_api", broker=settings.broker_url, backend=settings.result_backend) + app: Any = Celery("reframe_api", broker=settings.broker_url, backend=settings.result_backend) # Fail fast when broker/backend are unavailable so API diagnostics and tests do not hang. app.conf.broker_connection_retry_on_startup = False app.conf.broker_connection_max_retries = 0 @@ -126,6 +188,8 @@ def _resolve_task_queue(task_name: str, *args) -> str: def enqueue_job(job: Job, task_name: str, *args) -> str: try: queue = _resolve_task_queue(task_name, *args) + if is_local_queue_mode(): + return dispatch_local_task(task_name, *args, queue=queue) result = get_celery_app().send_task(task_name, args=args, queue=queue) return result.id except Exception as exc: # pragma: no cover - defensive @@ -221,7 +285,15 @@ def _safe_redirect_url(url: str) -> str: def _safe_local_asset_path(*, media_root: str, uri: str) -> Path: media_root_path = Path(media_root).resolve() - candidate = LocalStorageBackend(media_root=media_root_path).resolve_local_path(uri or "") + try: + candidate = LocalStorageBackend(media_root=media_root_path).resolve_local_path(uri or "") + except ValueError as exc: + raise ApiError( + status_code=status.HTTP_403_FORBIDDEN, + code=ErrorCode.PERMISSION_DENIED, + message="Asset path escapes media root", + details={"uri": uri}, + ) from exc resolved = candidate.resolve(strict=False) try: resolved.relative_to(media_root_path) @@ -627,34 +699,70 @@ def _truthy_env(name: str) -> bool: return raw in {"1", "true", "yes", "on"} +def _append_diag_error(existing: str | None, message: str) -> str: + return f"{existing}; {message}" if existing else message + + +def _populate_worker_diag_local_queue(worker_diag: WorkerDiagnostics) -> None: + diag = local_queue_diagnostics() + worker_diag.ping_ok = bool(diag.get("ping_ok")) + worker_diag.workers = sorted({str(item) for item in (diag.get("workers") or []) if item}) + worker_diag.system_info = diag.get("system_info") + worker_diag.error = str(diag.get("error")) if diag.get("error") else None + + +def _iter_worker_pongs(pongs: object) -> Iterable[object]: + if isinstance(pongs, (list, tuple)): + return pongs + return () + + +def _collect_celery_worker_names(pongs: object) -> List[str]: + names: Set[str] = set() + for item in _iter_worker_pongs(pongs): + if isinstance(item, dict): + names.update(str(name) for name in item.keys() if name) + return sorted(names) + + +def _populate_worker_diag_celery(worker_diag: WorkerDiagnostics) -> None: + try: + app = get_celery_app() + except _CELERY_BOOTSTRAP_EXCEPTIONS as exc: # pragma: no cover - best effort + worker_diag.error = f"Celery unavailable: {exc}" + return + + try: + pongs = app.control.ping(timeout=1.0) + worker_diag.workers = _collect_celery_worker_names(pongs) + worker_diag.ping_ok = bool(worker_diag.workers) + except _CELERY_RUNTIME_EXCEPTIONS as exc: + worker_diag.error = f"Worker ping failed: {exc}" + return + + if not worker_diag.ping_ok: + return + + try: + res = app.send_task("tasks.system_info") + worker_diag.system_info = res.get(timeout=3.0) + except _CELERY_RUNTIME_EXCEPTIONS as exc: + worker_diag.error = _append_diag_error( + worker_diag.error, + f"Worker diagnostics task failed: {exc}", + ) + + @router.get("/system/status", response_model=SystemStatusResponse, tags=["System"]) def system_status() -> SystemStatusResponse: settings = get_settings() storage = get_storage(media_root=settings.media_root) worker_diag = WorkerDiagnostics() - try: - app = get_celery_app() - try: - pongs = app.control.ping(timeout=1.0) - workers = [] - for item in pongs or []: - if isinstance(item, dict): - workers.extend(item.keys()) - worker_diag.workers = sorted(set(workers)) - worker_diag.ping_ok = bool(worker_diag.workers) - except Exception as exc: - worker_diag.error = f"Worker ping failed: {exc}" - - if worker_diag.ping_ok: - try: - res = app.send_task("tasks.system_info") - worker_diag.system_info = res.get(timeout=3.0) - except Exception as exc: - msg = f"Worker diagnostics task failed: {exc}" - worker_diag.error = f"{worker_diag.error}; {msg}" if worker_diag.error else msg - except Exception as exc: # pragma: no cover - best effort - worker_diag.error = f"Celery unavailable: {exc}" + if is_local_queue_mode(): + _populate_worker_diag_local_queue(worker_diag) + else: + _populate_worker_diag_celery(worker_diag) return SystemStatusResponse( api_version=settings.api_version, @@ -1628,8 +1736,11 @@ def create_workflow_run(payload: WorkflowRunCreateRequest, session: SessionDep, session.commit() try: - result = get_celery_app().send_task("tasks.run_workflow_pipeline", args=[str(run.id)]) - run.task_id = result.id + if is_local_queue_mode(): + run.task_id = dispatch_local_task("tasks.run_workflow_pipeline", str(run.id), queue=_celery_queue_name("CPU")) + else: + result = get_celery_app().send_task("tasks.run_workflow_pipeline", args=[str(run.id)]) + run.task_id = result.id session.add(run) session.commit() session.refresh(run) @@ -1671,7 +1782,10 @@ def cancel_workflow_run(run_id: UUID, session: SessionDep, principal: PrincipalD session.add(run) if run.task_id: try: - get_celery_app().control.revoke(run.task_id, terminate=False) + if is_local_queue_mode(): + revoke_local_task(run.task_id) + else: + get_celery_app().control.revoke(run.task_id, terminate=False) except Exception: pass pending_steps = session.exec( @@ -2857,6 +2971,7 @@ async def upload_asset( tmp_dir.mkdir(parents=True, exist_ok=True) tmp_path = tmp_dir / filename total = 0 + exceeded = False with tmp_path.open("wb") as out: while True: chunk = await file.read(1024 * 1024) @@ -2864,15 +2979,19 @@ async def upload_asset( break total += len(chunk) if max_bytes and total > max_bytes: - tmp_path.unlink(missing_ok=True) - raise ApiError( - status_code=status.HTTP_413_CONTENT_TOO_LARGE, - code=ErrorCode.VALIDATION_ERROR, - message="Upload too large", - details={"max_upload_bytes": max_bytes, "uploaded_bytes": total}, - ) + exceeded = True + break out.write(chunk) + if exceeded: + tmp_path.unlink(missing_ok=True) + raise ApiError( + status_code=status.HTTP_413_CONTENT_TOO_LARGE, + code=ErrorCode.VALIDATION_ERROR, + message="Upload too large", + details={"max_upload_bytes": max_bytes, "uploaded_bytes": total}, + ) + rel_dir = _scoped_tmp_rel_dir(storage, principal) uri = storage.write_file(rel_dir=rel_dir, filename=filename, source_path=tmp_path, content_type=file.content_type) if not isinstance(storage, LocalStorageBackend): @@ -3015,3 +3134,4 @@ def download_asset(asset_id: UUID, session: SessionDep, principal: PrincipalDep) def list_style_presets(session: SessionDep, principal: PrincipalDep) -> List[SubtitleStylePreset]: presets = session.exec(select(SubtitleStylePreset)).all() return presets + diff --git a/apps/api/app/config.py b/apps/api/app/config.py index 032ba505..fbf9e4ff 100644 --- a/apps/api/app/config.py +++ b/apps/api/app/config.py @@ -109,6 +109,11 @@ class Settings(BaseSettings): validation_alias=AliasChoices("APP_BASE_URL", "REFRAME_APP_BASE_URL"), description="Public frontend URL used for OAuth/billing redirects.", ) + desktop_web_dist: str = Field( + default="", + validation_alias=AliasChoices("DESKTOP_WEB_DIST", "REFRAME_DESKTOP_WEB_DIST"), + description="Optional absolute path to built desktop web assets mounted at '/'.", + ) api_base_url: str = Field( default="http://localhost:8000", validation_alias=AliasChoices("API_BASE_URL", "REFRAME_API_BASE_URL"), diff --git a/apps/api/app/local_queue.py b/apps/api/app/local_queue.py new file mode 100644 index 00000000..0b8c76ac --- /dev/null +++ b/apps/api/app/local_queue.py @@ -0,0 +1,109 @@ +from __future__ import absolute_import + +import os +from concurrent.futures import Future, ThreadPoolExecutor +from functools import lru_cache +from threading import Lock +from typing import Any, Dict, Optional, Tuple +from uuid import uuid4 + + +def _truthy(value: Optional[str]) -> bool: + return (value or "").strip().lower() in {"1", "true", "yes", "on"} + + +def is_local_queue_mode() -> bool: + return _truthy(os.getenv("REFRAME_LOCAL_QUEUE_MODE") or os.getenv("LOCAL_QUEUE_MODE")) + + +@lru_cache(maxsize=1) +def _executor() -> ThreadPoolExecutor: + workers_raw = (os.getenv("REFRAME_LOCAL_QUEUE_WORKERS") or "4").strip() + try: + workers = max(1, int(workers_raw)) + except ValueError: + workers = 4 + return ThreadPoolExecutor(max_workers=workers, thread_name_prefix="reframe-local-queue") + + +@lru_cache(maxsize=1) +def _worker_tasks() -> Dict[str, Any]: + from services.worker import worker as worker_module + + # Celery task registry gives us the same task names that send_task dispatches. + return dict(worker_module.celery_app.tasks) + + +_pending_lock = Lock() +_pending: Dict[str, Future] = {} + + +def _run_task(task_name: str, args: Tuple[Any, ...]) -> Any: + tasks = _worker_tasks() + task = tasks.get(task_name) + if task is None: + raise RuntimeError(f"Local queue task not found: {task_name}") + return task.run(*args) + + +def dispatch_task(task_name: str, *args: Any, queue: Optional[str] = None) -> str: + if not is_local_queue_mode(): + raise RuntimeError("Local queue mode is not enabled") + + task_id = f"local-{uuid4()}" + + def _wrapped() -> None: + _run_task(task_name, args) + + future = _executor().submit(_wrapped) + with _pending_lock: + _pending[task_id] = future + + def _cleanup(_fut: Future) -> None: + with _pending_lock: + _pending.pop(task_id, None) + + future.add_done_callback(_cleanup) + return task_id + + +def revoke_task(task_id: str) -> bool: + with _pending_lock: + future = _pending.get(task_id) + if future is None: + return False + return future.cancel() + + +def diagnostics() -> Dict[str, Any]: + if not is_local_queue_mode(): + return { + "ping_ok": False, + "workers": [], + "system_info": None, + "error": "Local queue mode is disabled", + } + + info: Optional[Dict[str, Any]] = None + error: Optional[str] = None + try: + task = _worker_tasks().get("tasks.system_info") + if task is None: + raise RuntimeError("tasks.system_info is unavailable") + info = task.run() + except (RuntimeError, ValueError, TypeError, AttributeError, OSError) as exc: # pragma: no cover - defensive + error = f"Local diagnostics failed: {exc}" + + with _pending_lock: + queued = len(_pending) + + workers = ["local-queue"] + if queued > 0: + workers.append(f"pending:{queued}") + + return { + "ping_ok": True, + "workers": workers, + "system_info": info, + "error": error, + } diff --git a/apps/api/app/main.py b/apps/api/app/main.py index 1e4093fb..f1d03d28 100644 --- a/apps/api/app/main.py +++ b/apps/api/app/main.py @@ -1,25 +1,83 @@ +from __future__ import division + import logging +import os +import stat import time from contextlib import asynccontextmanager from pathlib import Path from uuid import uuid4 from fastapi import FastAPI +from fastapi import HTTPException from fastapi import Request -from fastapi.staticfiles import StaticFiles +from fastapi.responses import FileResponse from fastapi.responses import JSONResponse +from fastapi.staticfiles import StaticFiles -from app.config import get_settings -from app.database import create_db_and_tables from app.api import router as api_router from app.auth_api import router as auth_router from app.billing_api import router as billing_router +from app.cleanup import start_cleanup_loop from app.collaboration_api import router as collaboration_router -from app.identity_api import router as identity_router -from app.publish_api import router as publish_router +from app.config import get_settings +from app.database import create_db_and_tables from app.errors import ApiError, ErrorResponse -from app.cleanup import start_cleanup_loop +from app.identity_api import router as identity_router from app.logging_config import setup_logging +from app.publish_api import router as publish_router + + +_RESERVED_DESKTOP_PREFIXES = ( + "api", + "docs", + "openapi.json", + "redoc", + "media", + "health", + "healthz", +) +def _is_reserved_desktop_path(normalized: str) -> bool: + return any( + normalized == reserved or normalized.startswith(f"{reserved}/") + for reserved in _RESERVED_DESKTOP_PREFIXES + ) + + +def _has_path_traversal(normalized: str) -> bool: + segments = [part for part in normalized.replace("\\", "/").split("/") if part] + return any(part == ".." for part in segments) + + +def _mount_desktop_web(api_app: FastAPI, desktop_web_dist: str) -> None: + raw = (desktop_web_dist or "").strip() + if not raw: + return + + web_dist = Path(raw).resolve() + index_path = web_dist / "index.html" + if not os.path.isfile(index_path): + return + static_files = StaticFiles(directory=str(web_dist), check_dir=False) + + @api_app.get("/", include_in_schema=False) + def desktop_index() -> FileResponse: + return FileResponse(index_path) + + _ = desktop_index + + @api_app.get("/{full_path:path}", include_in_schema=False, responses={404: {"description": "Not Found"}}) + def desktop_spa(full_path: str) -> FileResponse: + normalized = (full_path or "").lstrip("/") + if _has_path_traversal(normalized) or _is_reserved_desktop_path(normalized): + raise HTTPException(status_code=404) + + candidate, stat_result = static_files.lookup_path(normalized) + if stat_result is not None and stat.S_ISREG(stat_result.st_mode): + return FileResponse(candidate) + return FileResponse(index_path) + + _ = desktop_spa def create_app() -> FastAPI: @@ -110,6 +168,8 @@ async def api_error_handler(_, exc: ApiError): def health() -> dict[str, str]: return {"status": "ok", "version": settings.api_version} + _mount_desktop_web(app, settings.desktop_web_dist) + return app diff --git a/apps/api/app/publish_api.py b/apps/api/app/publish_api.py index 404a7c9a..40477f5b 100644 --- a/apps/api/app/publish_api.py +++ b/apps/api/app/publish_api.py @@ -21,6 +21,7 @@ def send_task(self, *_args, **_kwargs): from app.auth_api import PrincipalDep from app.config import get_settings +from app.local_queue import dispatch_task as dispatch_local_task, is_local_queue_mode from app.database import get_session from app.errors import ApiError, ErrorCode, ErrorResponse, conflict, not_found, unauthorized from app.models import AutomationRunEvent, MediaAsset, PublishConnection, PublishJob @@ -179,6 +180,8 @@ def _celery_app() -> Celery: def _dispatch_publish_task(job: PublishJob) -> str: + if is_local_queue_mode(): + return dispatch_local_task("tasks.publish_asset", str(job.id)) result = _celery_app().send_task("tasks.publish_asset", args=[str(job.id)]) return result.id diff --git a/apps/api/tests/conftest.py b/apps/api/tests/conftest.py index 584ffef3..678d92ca 100644 --- a/apps/api/tests/conftest.py +++ b/apps/api/tests/conftest.py @@ -39,7 +39,7 @@ def test_client(monkeypatch: pytest.MonkeyPatch, tmp_path: Path): media_root.mkdir(parents=True, exist_ok=True) db_path = tmp_path / "reframe-test.db" - db_url = f"sqlite:////{str(db_path).lstrip('/')}" + db_url = f"sqlite:///{db_path.as_posix()}" monkeypatch.setenv("DATABASE_URL", db_url) monkeypatch.setenv("REFRAME_MEDIA_ROOT", str(media_root)) diff --git a/apps/api/tests/test_api_internal_helpers.py b/apps/api/tests/test_api_internal_helpers.py new file mode 100644 index 00000000..55825d4c --- /dev/null +++ b/apps/api/tests/test_api_internal_helpers.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +import asyncio +from datetime import datetime, timezone +from pathlib import Path +from types import SimpleNamespace +from uuid import uuid4 + +import pytest +from sqlmodel import select + +from app import api +from app.errors import ApiError +from app.models import Job, OrgBudgetPolicy + + +def test_queue_and_gpu_helpers(monkeypatch): + monkeypatch.setenv("REFRAME_ENABLE_GPU_QUEUE", "true") + monkeypatch.setenv("REFRAME_ASSUME_GPU_FOR_TRANSCRIBE_BACKENDS", "true") + monkeypatch.setenv("REFRAME_CELERY_QUEUE_GPU", "gpuq") + monkeypatch.setenv("REFRAME_CELERY_QUEUE_CPU", "cpuq") + monkeypatch.setenv("REFRAME_CELERY_QUEUE_DEFAULT", "defq") + + assert api._env_truthy("ENABLE_GPU_QUEUE") is True + assert api._celery_queue_name("GPU") == "gpuq" + assert api._celery_queue_name("CPU") == "cpuq" + assert api._celery_queue_name("DEFAULT") == "defq" + + assert api._task_prefers_gpu("tasks.generate_captions", {"backend": "faster_whisper"}) is True + assert api._task_prefers_gpu("tasks.transcribe_video", {"device": "cuda"}) is True + assert api._task_prefers_gpu("tasks.merge_video_audio", {}) is False + + assert api._resolve_task_queue("tasks.generate_captions", {"backend": "faster_whisper"}) == "gpuq" + assert api._resolve_task_queue("tasks.generate_shorts", {}) == "cpuq" + assert api._resolve_task_queue("tasks.unknown", {}) == "defq" + + +def test_scope_and_org_access_helpers(): + org_id = uuid4() + principal = SimpleNamespace(org_id=org_id) + query = select(Job) + scoped = api._scope_query_by_org(query, Job, principal) + assert "org_id" in str(scoped) + + api._assert_org_access(principal=principal, entity_org_id=org_id, entity="job", entity_id="1") + + with pytest.raises(ApiError): + api._assert_org_access(principal=principal, entity_org_id=uuid4(), entity="job", entity_id="2") + + +def test_idempotency_and_redirect_helpers(monkeypatch): + assert api._resolve_idempotency_key(" abc ", None) == "abc" + assert api._resolve_idempotency_key(None, "hdr") == "hdr" + assert api._resolve_idempotency_key("", "") is None + + with pytest.raises(ApiError): + api._resolve_idempotency_key("x" * 129, None) + + assert api._is_forbidden_ip_host("127.0.0.1") is True + assert api._is_forbidden_ip_host("8.8.8.8") is False + + assert api._safe_redirect_url("https://example.com/file.txt#frag") == "https://example.com/file.txt" + + with pytest.raises(ApiError): + api._safe_redirect_url("http://example.com/file.txt") + with pytest.raises(ApiError): + api._safe_redirect_url("https://user:pass@example.com/file.txt") + with pytest.raises(ApiError): + api._safe_redirect_url("https://localhost/file.txt") + with pytest.raises(ApiError): + api._safe_redirect_url("https://127.0.0.1/file.txt") + + +def test_local_asset_stream_and_path_helpers(tmp_path: Path): + media_root = tmp_path / "media" + media_root.mkdir(parents=True, exist_ok=True) + file_path = media_root / "a.bin" + file_path.write_bytes(b"hello") + + resolved = api._safe_local_asset_path(media_root=str(media_root), uri="a.bin") + assert resolved == file_path + + with pytest.raises(ApiError): + api._safe_local_asset_path(media_root=str(media_root), uri="../escape.bin") + + response = api._stream_local_file(file_path=file_path, mime_type="application/octet-stream") + assert response.headers["Content-Disposition"].startswith("attachment;") + + async def _collect() -> bytes: + data = bytearray() + async for chunk in response.body_iterator: + data.extend(chunk) + return bytes(data) + + assert asyncio.run(_collect()) == b"hello" + + +def test_cost_budget_and_datetime_helpers(monkeypatch): + assert api._coerce_non_negative_float("12.5") == 12.5 + assert api._coerce_non_negative_float("-1") == 0.0 + assert api._coerce_non_negative_float("2", scale=0.5) == 1.0 + + assert api._extract_estimated_minutes({"expected_minutes": 7}) == 7.0 + assert api._extract_estimated_minutes({"duration_seconds": 180}) == 3.0 + assert api._extract_estimated_minutes({}) == 0.0 + + assert api._estimate_job_submission_cost_cents(job_type="captions", payload={"duration_seconds": 120}) == 29 + assert api._estimate_job_submission_cost_cents(job_type="unknown", payload={"estimated_cost_cents": 17}) == 17 + + assert api._optional_int(None) is None + assert api._optional_int("8") == 8 + + assert api._budget_projected_status(current_month_estimated_cost_cents=15, soft_limit=20, hard_limit=30) == "ok" + assert api._budget_projected_status(current_month_estimated_cost_cents=25, soft_limit=20, hard_limit=30) == "soft_limit_exceeded" + assert api._budget_projected_status(current_month_estimated_cost_cents=35, soft_limit=20, hard_limit=30) == "hard_limit_exceeded" + + dt_naive = datetime(2026, 3, 1, 1, 2, 3) + aware = api._coerce_aware_datetime(dt_naive) + assert aware is not None and aware.tzinfo is not None + + dt_aware = datetime(2026, 3, 1, 1, 2, 3, tzinfo=timezone.utc) + assert api._coerce_aware_datetime(dt_aware) == dt_aware + + assert api._coerce_aware_datetime(None) is None + + principal_admin = SimpleNamespace(role="admin") + principal_owner = SimpleNamespace(role="owner") + principal_member = SimpleNamespace(role="member") + api._require_org_manager_role(principal_admin) + api._require_org_manager_role(principal_owner) + with pytest.raises(ApiError): + api._require_org_manager_role(principal_member) + + policy = OrgBudgetPolicy( + org_id=uuid4(), + monthly_soft_limit_cents=100, + monthly_hard_limit_cents=150, + enforce_hard_limit=True, + ) + view = api._serialize_budget_policy( + policy=policy, + org_id=policy.org_id, + current_month_estimated_cost_cents=120, + ) + assert view.projected_status == "soft_limit_exceeded" + + assert api._month_start_utc().day == 1 + diff --git a/apps/api/tests/test_coverage_wave_api_helpers.py b/apps/api/tests/test_coverage_wave_api_helpers.py new file mode 100644 index 00000000..425a6d08 --- /dev/null +++ b/apps/api/tests/test_coverage_wave_api_helpers.py @@ -0,0 +1,406 @@ +from __future__ import annotations + +import builtins +import logging +import sys +import os +import threading +import time +from types import SimpleNamespace + +import pytest + +from app import billing, cleanup, local_queue, logging_config, storage + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +@pytest.fixture(autouse=True) +def _reset_local_queue_state(monkeypatch): + local_queue._executor.cache_clear() + if hasattr(local_queue._worker_tasks, "cache_clear"): + local_queue._worker_tasks.cache_clear() + with local_queue._pending_lock: + local_queue._pending.clear() + monkeypatch.delenv("REFRAME_LOCAL_QUEUE_MODE", raising=False) + monkeypatch.delenv("LOCAL_QUEUE_MODE", raising=False) + monkeypatch.delenv("REFRAME_LOCAL_QUEUE_WORKERS", raising=False) + yield + local_queue._executor.cache_clear() + if hasattr(local_queue._worker_tasks, "cache_clear"): + local_queue._worker_tasks.cache_clear() + with local_queue._pending_lock: + local_queue._pending.clear() + + +def test_local_queue_truthy_and_mode_detection(monkeypatch): + _expect(local_queue._truthy("1"), "Expected truthy helper to treat 1 as true") + _expect(not local_queue._truthy("0"), "Expected truthy helper to treat 0 as false") + _expect(not local_queue.is_local_queue_mode(), "Expected local queue mode disabled by default") + monkeypatch.setenv("REFRAME_LOCAL_QUEUE_MODE", "true") + _expect(local_queue.is_local_queue_mode(), "Expected local queue mode via REFRAME_LOCAL_QUEUE_MODE") + monkeypatch.delenv("REFRAME_LOCAL_QUEUE_MODE", raising=False) + monkeypatch.setenv("LOCAL_QUEUE_MODE", "yes") + _expect(local_queue.is_local_queue_mode(), "Expected local queue mode via LOCAL_QUEUE_MODE") + + +def test_local_queue_dispatch_and_revoke(monkeypatch): + monkeypatch.setenv("REFRAME_LOCAL_QUEUE_MODE", "true") + calls: list[tuple[str, tuple[object, ...]]] = [] + ready = threading.Event() + + def fake_run_task(task_name: str, args: tuple[object, ...]) -> None: + calls.append((task_name, args)) + ready.set() + + monkeypatch.setattr(local_queue, "_run_task", fake_run_task) + + task_id = local_queue.dispatch_task("tasks.echo", "hello", queue="high") + _expect(task_id.startswith("local-"), "Expected local queue task id prefix") + _expect(ready.wait(timeout=2), "Expected dispatched task to execute") + + for _ in range(20): + with local_queue._pending_lock: + if task_id not in local_queue._pending: + break + time.sleep(0.02) + + _expect(calls == [("tasks.echo", ("hello",))], "Expected _run_task dispatch call") + _expect(not local_queue.revoke_task("missing"), "Expected revoke false for missing task") + + +def test_local_queue_dispatch_requires_enabled(): + with pytest.raises(RuntimeError): + local_queue.dispatch_task("tasks.echo") + + +def test_local_queue_diagnostics_enabled_and_error_paths(monkeypatch): + monkeypatch.setenv("REFRAME_LOCAL_QUEUE_MODE", "true") + + monkeypatch.setattr( + local_queue, + "_worker_tasks", + lambda: {"tasks.system_info": SimpleNamespace(run=lambda: {"ffmpeg": {"present": True}})}, + ) + payload = local_queue.diagnostics() + _expect(payload["ping_ok"] is True, "Expected diagnostics ping ok") + _expect(payload["system_info"] == {"ffmpeg": {"present": True}}, "Expected system_info payload") + _expect(payload["error"] is None, "Expected no diagnostics error") + + monkeypatch.setattr(local_queue, "_worker_tasks", lambda: {}) + payload_no_task = local_queue.diagnostics() + _expect(payload_no_task["ping_ok"] is True, "Expected diagnostics ping true in local mode") + _expect(payload_no_task["system_info"] is None, "Expected missing system_info") + _expect("unavailable" in str(payload_no_task["error"]), "Expected unavailable error message") + + monkeypatch.delenv("REFRAME_LOCAL_QUEUE_MODE", raising=False) + disabled = local_queue.diagnostics() + _expect(disabled["ping_ok"] is False, "Expected disabled diagnostics ping false") + _expect("disabled" in str(disabled["error"]).lower(), "Expected disabled diagnostics error") + + +def test_local_storage_backend_file_lifecycle(tmp_path): + backend = storage.LocalStorageBackend(media_root=tmp_path, public_prefix="/media") + uri = backend.write_bytes(rel_dir="org-a/tmp", filename="hello.txt", data=b"hello") + _expect(uri == "/media/org-a/tmp/hello.txt", "Expected media URI for written bytes") + local_path = backend.resolve_local_path(uri) + _expect(local_path.read_bytes() == b"hello", "Expected file contents after write_bytes") + + source = tmp_path / "source.bin" + source.write_bytes(b"abc") + uri_file = backend.write_file(rel_dir="org-a/out", filename="copy.bin", source_path=source) + _expect(uri_file == "/media/org-a/out/copy.bin", "Expected media URI for write_file") + _expect(backend.get_download_url(uri_file) == uri_file, "Expected direct URI for local download") + _expect(backend.resolve_local_path("/media/org-a/out/copy.bin").read_bytes() == b"abc", "Expected copied bytes") + + backend.delete_uri(uri_file) + _expect(not backend.resolve_local_path(uri_file).exists(), "Expected deleted URI to remove file") + + with pytest.raises(ValueError): + backend.resolve_local_path("/media/../../escape.txt") + with pytest.raises(ValueError): + backend.resolve_local_path("https://example.test/file.bin") + with pytest.raises(ValueError): + backend.create_presigned_upload(rel_dir="a", filename="b", content_type=None, expires_seconds=60) + with pytest.raises(ValueError): + backend.create_multipart_upload(rel_dir="a", filename="b", content_type=None) + with pytest.raises(ValueError): + backend.sign_multipart_part(key="k", provider_upload_id="u", part_number=1, expires_seconds=60) + with pytest.raises(ValueError): + backend.complete_multipart_upload(key="k", provider_upload_id="u", parts=[]) + with pytest.raises(ValueError): + backend.abort_multipart_upload(key="k", provider_upload_id="u") + + +def test_storage_helpers_and_get_storage_modes(monkeypatch, tmp_path): + _expect(storage.is_remote_uri("https://example.test/a"), "Expected https URI to be treated as remote") + _expect(storage.is_remote_uri("s3://bucket/key"), "Expected s3 URI to be treated as remote") + _expect(not storage.is_remote_uri("/media/a"), "Expected local path to be non-remote") + _expect(storage._join_key("/a/", "b", "c/") == "a/b/c", "Expected normalized key join") + + monkeypatch.setenv("S3_BUCKET", "") + monkeypatch.setenv("REFRAME_STORAGE_BACKEND", "local") + local_backend = storage.get_storage(media_root=tmp_path) + _expect(isinstance(local_backend, storage.LocalStorageBackend), "Expected local storage backend") + + monkeypatch.setenv("REFRAME_STORAGE_BACKEND", "unknown-backend") + with pytest.raises(ValueError): + storage.get_storage(media_root=tmp_path) + + monkeypatch.setenv("REFRAME_STORAGE_BACKEND", "s3") + monkeypatch.setenv("REFRAME_OFFLINE_MODE", "true") + with pytest.raises(RuntimeError): + storage.get_storage(media_root=tmp_path) + + +def test_s3_storage_backend_core_paths(monkeypatch, tmp_path): + class FakeClient: + def __init__(self): + self.calls: list[tuple[str, tuple, dict]] = [] + + def put_object(self, **kwargs): + self.calls.append(("put_object", (), kwargs)) + + def upload_file(self, *args, **kwargs): + self.calls.append(("upload_file", args, kwargs)) + + def generate_presigned_url(self, op, Params=None, ExpiresIn=None): + self.calls.append(("generate_presigned_url", (op,), {"Params": Params, "ExpiresIn": ExpiresIn})) + return f"https://upload.example/{op}" + + def create_multipart_upload(self, **kwargs): + self.calls.append(("create_multipart_upload", (), kwargs)) + return {"UploadId": "upload-1"} + + def complete_multipart_upload(self, **kwargs): + self.calls.append(("complete_multipart_upload", (), kwargs)) + + def abort_multipart_upload(self, **kwargs): + self.calls.append(("abort_multipart_upload", (), kwargs)) + + def delete_object(self, **kwargs): + self.calls.append(("delete_object", (), kwargs)) + + fake_client = FakeClient() + + class FakeSession: + def client(self, *_args, **_kwargs): + return fake_client + + class FakeSessionFactory: + def Session(self, **_kwargs): + return FakeSession() + + monkeypatch.setattr(storage, "_ensure_boto3", lambda: SimpleNamespace(session=FakeSessionFactory())) + + backend = storage.S3StorageBackend( + bucket="bucket-a", + prefix="tenant", + endpoint_url="https://s3.example.test", + public_base_url="https://cdn.example.test/assets", + public_downloads=True, + presign_expires_seconds=300, + ) + + src = tmp_path / "in.bin" + src.write_bytes(b"data") + + uri = backend.write_bytes(rel_dir="org", filename="bytes.bin", data=b"1", content_type="application/octet-stream") + _expect(uri == "s3://bucket-a/tenant/org/bytes.bin", "Expected S3 URI for write_bytes") + uri_file = backend.write_file(rel_dir="org", filename="file.bin", source_path=src, content_type="application/octet-stream") + _expect(uri_file == "s3://bucket-a/tenant/org/file.bin", "Expected S3 URI for write_file") + _expect(backend.get_download_url(uri_file) == "https://cdn.example.test/assets/tenant/org/file.bin", "Expected public download URL path") + _expect(backend.get_download_url("s3://other-bucket/file") is None, "Expected None for foreign-bucket URI") + + presigned = backend.create_presigned_upload( + rel_dir="org", + filename="upload.bin", + content_type="application/octet-stream", + expires_seconds=120, + ) + _expect(presigned["method"] == "PUT", "Expected PUT method for presigned upload") + + multi = backend.create_multipart_upload(rel_dir="org", filename="multi.bin", content_type=None) + _expect(multi["upload_id"] == "upload-1", "Expected multipart upload id") + + part = backend.sign_multipart_part( + key=multi["key"], + provider_upload_id=multi["upload_id"], + part_number=1, + expires_seconds=60, + ) + _expect(part["method"] == "PUT", "Expected multipart part PUT upload") + + backend.complete_multipart_upload( + key=multi["key"], + provider_upload_id=multi["upload_id"], + parts=[{"part_number": 2, "etag": "b"}, {"part_number": 1, "etag": "a"}], + ) + with pytest.raises(ValueError): + backend.complete_multipart_upload(key=multi["key"], provider_upload_id=multi["upload_id"], parts=[{"part_number": 0}]) + + backend.abort_multipart_upload(key=multi["key"], provider_upload_id=multi["upload_id"]) + backend.delete_uri(uri) + + with pytest.raises(ValueError): + backend.resolve_local_path(uri) + + ops = [name for name, _args, _kwargs in fake_client.calls] + _expect("put_object" in ops, "Expected put_object call") + _expect("upload_file" in ops, "Expected upload_file call") + _expect("create_multipart_upload" in ops, "Expected create_multipart_upload call") + _expect("complete_multipart_upload" in ops, "Expected complete_multipart_upload call") + _expect("abort_multipart_upload" in ops, "Expected abort_multipart_upload call") + + +def test_json_formatter_and_setup_logging_paths(): + formatter = logging_config.JsonFormatter() + + try: + raise RuntimeError("boom") + except RuntimeError: + record = logging.LogRecord( + name="reframe.test", + level=logging.ERROR, + pathname=__file__, + lineno=1, + msg="failure: %s", + args=("x",), + exc_info=sys.exc_info(), + ) + record.user_id = "u-1" + rendered = formatter.format(record) + _expect('"message": "failure: x"' in rendered, "Expected rendered log message") + _expect('"user_id": "u-1"' in rendered, "Expected extra log field") + _expect("exc_info" in rendered, "Expected formatted exception info") + + logger = logging.getLogger("reframe") + setattr(logger, "_reframe_configured", False) + for handler in list(logger.handlers): + logger.removeHandler(handler) + + logging_config.setup_logging(log_format="plain", log_level="warning") + first_count = len(logger.handlers) + logging_config.setup_logging(log_format="json", log_level="debug") + _expect(len(logger.handlers) == first_count, "Expected setup logging to be idempotent") + + +def test_cleanup_old_files_and_loop_start(tmp_path): + target = tmp_path / "tmp" + target.mkdir(parents=True, exist_ok=True) + + old_file = target / "old.txt" + new_file = target / "new.txt" + old_file.write_text("old", encoding="utf-8") + new_file.write_text("new", encoding="utf-8") + + old_ts = time.time() - 10_000 + os.utime(old_file, (old_ts, old_ts)) + + cleanup._remove_old_files(target, older_than=cleanup.timedelta(seconds=1)) + _expect(not old_file.exists(), "Expected old file cleanup") + _expect(new_file.exists(), "Expected newer file to remain") + + thread = cleanup.start_cleanup_loop(str(tmp_path), interval_seconds=60, ttl_hours=24) + _expect(thread is not None, "Expected cleanup thread") + _expect(thread.daemon, "Expected cleanup loop thread daemonized") + _expect((tmp_path / "tmp").exists(), "Expected tmp directory creation") + + +def test_billing_plan_and_stripe_paths(monkeypatch): + free = billing.get_plan_policy("unknown-plan") + _expect(free.code == "free", "Expected free fallback policy") + _expect(billing.get_plan_policy("enterprise").seat_limit == 200, "Expected enterprise policy lookup") + + class _Settings: + enable_billing = False + stripe_secret_key = "" + + monkeypatch.setattr(billing, "get_settings", lambda: _Settings()) + with pytest.raises(RuntimeError): + billing.build_checkout_session( + customer_id=None, + price_id="price_x", + success_url="https://ok", + cancel_url="https://cancel", + ) + + class _SettingsEnabledNoKey: + enable_billing = True + stripe_secret_key = "" + + monkeypatch.setattr(billing, "get_settings", lambda: _SettingsEnabledNoKey()) + with pytest.raises(RuntimeError): + billing.build_customer_portal_session(customer_id="cus_1", return_url="https://ret") + + class _SettingsEnabled: + enable_billing = True + stripe_secret_key = "sk_test_123" + + checkout_calls: list[dict] = [] + modify_calls: list[tuple[str, dict]] = [] + portal_calls: list[dict] = [] + + class _CheckoutSession: + @staticmethod + def create(**kwargs): + checkout_calls.append(kwargs) + return {"id": "cs_1", "url": "https://checkout"} + + class _Subscription: + @staticmethod + def modify(sub_id: str, **kwargs): + modify_calls.append((sub_id, kwargs)) + + class _PortalSession: + @staticmethod + def create(**kwargs): + portal_calls.append(kwargs) + return {"id": "bps_1", "url": "https://portal"} + + fake_stripe = SimpleNamespace( + api_key=None, + checkout=SimpleNamespace(Session=_CheckoutSession), + Subscription=_Subscription, + billing_portal=SimpleNamespace(Session=_PortalSession), + ) + + monkeypatch.setattr(billing, "get_settings", lambda: _SettingsEnabled()) + monkeypatch.setattr(billing, "_get_stripe", lambda: fake_stripe) + + checkout = billing.build_checkout_session( + customer_id="cus_1", + price_id="price_1", + quantity=0, + success_url="https://ok", + cancel_url="https://cancel", + metadata={"org_id": "x"}, + ) + _expect(checkout["id"] == "cs_1", "Expected checkout id") + _expect(checkout_calls[0]["line_items"][0]["quantity"] == 1, "Expected quantity coercion to minimum 1") + + billing.update_subscription_seat_limit(subscription_id="sub_1", quantity=0) + _expect(modify_calls[0][0] == "sub_1", "Expected subscription id for seat update") + _expect(modify_calls[0][1]["items"][0]["quantity"] == 1, "Expected seat quantity minimum to 1") + + portal = billing.build_customer_portal_session(customer_id="cus_1", return_url="https://return") + _expect(portal["url"] == "https://portal", "Expected portal URL") + _expect(portal_calls[0]["customer"] == "cus_1", "Expected portal customer id") + + +def test_get_stripe_import_error(monkeypatch): + real_import = builtins.__import__ + + def fake_import(name, *args, **kwargs): + if name == "stripe": + raise ImportError("missing stripe") + return real_import(name, *args, **kwargs) + + monkeypatch.setattr(builtins, "__import__", fake_import) + with pytest.raises(RuntimeError): + billing._get_stripe() + + diff --git a/apps/api/tests/test_desktop_embedded_web.py b/apps/api/tests/test_desktop_embedded_web.py new file mode 100644 index 00000000..2d82819b --- /dev/null +++ b/apps/api/tests/test_desktop_embedded_web.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +from pathlib import Path + +from fastapi.testclient import TestClient + + +def _reset_settings_caches() -> None: + from app.api import get_celery_app + from app.config import get_settings + from app.database import get_engine + + get_settings.cache_clear() + get_engine.cache_clear() + get_celery_app.cache_clear() + + +def test_desktop_embedded_web_mount_serves_index_and_assets(monkeypatch, tmp_path: Path): + web_dist = tmp_path / "web-dist" + assets = web_dist / "assets" + assets.mkdir(parents=True, exist_ok=True) + (web_dist / "index.html").write_text("desktop studio", encoding="utf-8") + (assets / "app.js").write_text("console.log('ok');", encoding="utf-8") + + media_root = tmp_path / "media" + media_root.mkdir(parents=True, exist_ok=True) + monkeypatch.setenv("DATABASE_URL", f"sqlite:///{(tmp_path / 'api.db').as_posix()}") + monkeypatch.setenv("REFRAME_MEDIA_ROOT", str(media_root)) + monkeypatch.setenv("REFRAME_DESKTOP_WEB_DIST", str(web_dist)) + + _reset_settings_caches() + + from app.main import create_app + + app = create_app() + with TestClient(app) as client: + root = client.get("/") + assert root.status_code == 200 + assert "desktop studio" in root.text + + js = client.get("/assets/app.js") + assert js.status_code == 200 + assert "console.log" in js.text + + spa = client.get("/projects/123") + assert spa.status_code == 200 + assert "desktop studio" in spa.text + + traversal = client.get("/%2e%2e/%2e%2e/secret.txt") + assert traversal.status_code == 404 + + reserved = client.get("/api/_desktop_shell_test") + assert reserved.status_code == 404 + + +def test_desktop_embedded_web_mount_skips_when_dist_missing(monkeypatch, tmp_path: Path): + media_root = tmp_path / "media" + media_root.mkdir(parents=True, exist_ok=True) + monkeypatch.setenv("DATABASE_URL", f"sqlite:///{(tmp_path / 'api.db').as_posix()}") + monkeypatch.setenv("REFRAME_MEDIA_ROOT", str(media_root)) + monkeypatch.setenv("REFRAME_DESKTOP_WEB_DIST", str(tmp_path / "does-not-exist")) + + _reset_settings_caches() + + from app.main import create_app + + app = create_app() + with TestClient(app) as client: + assert client.get("/").status_code == 404 + assert client.get("/health").status_code == 200 diff --git a/apps/api/tests/test_local_queue_mode.py b/apps/api/tests/test_local_queue_mode.py new file mode 100644 index 00000000..620a2538 --- /dev/null +++ b/apps/api/tests/test_local_queue_mode.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +from uuid import uuid4 + + +def test_enqueue_job_uses_local_queue_when_enabled(monkeypatch): + monkeypatch.setenv("REFRAME_LOCAL_QUEUE_MODE", "true") + + import app.api as api_module + + monkeypatch.setattr(api_module, "dispatch_local_task", lambda task_name, *args, queue=None: "local-123") + + class _Job: + id = uuid4() + + task_id = api_module.enqueue_job(_Job(), "tasks.generate_captions", "job-id", "asset-id", {"backend": "noop"}) + assert task_id == "local-123" + + +def test_system_status_prefers_local_queue_diagnostics(monkeypatch): + monkeypatch.setenv("REFRAME_LOCAL_QUEUE_MODE", "true") + + import app.api as api_module + + monkeypatch.setattr( + api_module, + "local_queue_diagnostics", + lambda: { + "ping_ok": True, + "workers": ["local-queue", "pending:2"], + "system_info": {"ffmpeg": {"present": True, "version": "6.1"}}, + "error": None, + }, + ) + + status = api_module.system_status() + assert status.worker.ping_ok is True + assert status.worker.workers == ["local-queue", "pending:2"] + assert status.worker.system_info == {"ffmpeg": {"present": True, "version": "6.1"}} + + +def test_publish_dispatch_uses_local_queue_when_enabled(monkeypatch): + monkeypatch.setenv("REFRAME_LOCAL_QUEUE_MODE", "true") + + import app.publish_api as publish_api + + monkeypatch.setattr(publish_api, "dispatch_local_task", lambda task_name, *args: "local-publish") + + class _Job: + id = uuid4() + + task_id = publish_api._dispatch_publish_task(_Job()) + assert task_id == "local-publish" diff --git a/apps/api/tests/test_scripts_coverage_truth_wave.py b/apps/api/tests/test_scripts_coverage_truth_wave.py new file mode 100644 index 00000000..5bd0b7e5 --- /dev/null +++ b/apps/api/tests/test_scripts_coverage_truth_wave.py @@ -0,0 +1,282 @@ +from __future__ import annotations + +import json +import os +import sys +from datetime import datetime, timezone +from importlib.util import module_from_spec, spec_from_file_location +from pathlib import Path + +import pytest + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +def _repo_root() -> Path: + return Path(__file__).resolve().parents[3] + + +def _load_script(path: Path, module_name: str): + spec = spec_from_file_location(module_name, path) + _expect(spec is not None and spec.loader is not None, f"Unable to load module at {path}") + module = module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + return module + + +def test_generate_ops_digest_helpers_cover_edge_cases(tmp_path): + module = _load_script(_repo_root() / "scripts" / "generate_ops_digest.py", "generate_ops_digest_cov_wave") + + # Date parsing and windows + now = datetime(2026, 3, 4, tzinfo=timezone.utc) + start = datetime(2026, 3, 1, tzinfo=timezone.utc) + end = datetime(2026, 3, 8, tzinfo=timezone.utc) + _expect(module._parse_dt(None) is None, "Expected None datetime for missing value") + _expect(module._parse_dt("bad-date") is None, "Expected None datetime for invalid value") + _expect(module._in_window(now, start, end), "Expected datetime in window") + _expect(not module._in_window(None, start, end), "Expected None datetime outside window") + + # Link header parsing + link = '; rel="next", ; rel="last"' + _expect(module._next_link(link) == "https://api.example.test/page=2", "Expected next link parsing") + _expect(module._next_link(None) is None, "Expected None next link for missing header") + + # Failure-rate and percentile helpers + _expect(module._failure_rate(0, 0) == 0.0, "Expected 0 failure rate for no runs") + _expect(module._failure_rate(1, 4) == 25.0, "Expected ratio failure rate") + _expect(module._percentile([], 0.95) == 0.0, "Expected empty percentile fallback") + _expect(module._percentile([10, 20, 30], 0) == 10.0, "Expected p0 percentile") + _expect(module._percentile([10, 20, 30], 1) == 30.0, "Expected p1 percentile") + + # Duration helper + run_ok = { + "created_at": "2026-03-02T10:00:00Z", + "run_started_at": "2026-03-02T10:00:00Z", + "updated_at": "2026-03-02T10:05:00Z", + } + _expect(module._run_duration_seconds(run_ok) == 300.0, "Expected run duration computation") + run_bad = { + "run_started_at": "2026-03-02T10:05:00Z", + "updated_at": "2026-03-02T10:00:00Z", + } + _expect(module._run_duration_seconds(run_bad) is None, "Expected invalid backwards duration to be None") + + # Required-check extraction + workflow_runs = [ + {"head_branch": "main", "name": "CI"}, + {"head_branch": "main", "name": "CodeQL"}, + {"head_branch": "main", "name": "CI"}, + ] + explicit_policy = {"required_checks": ["CI", "CI", "", "CodeQL"]} + _expect(module._required_checks(explicit_policy, workflow_runs) == ["CI", "CodeQL"], "Expected deduped explicit checks") + _expect(module._required_checks({}, workflow_runs) == ["CI", "CodeQL"], "Expected discovered checks fallback") + + pass_rate, top_failed = module._required_check_metrics( + [ + {"name": "CI", "conclusion": "success"}, + {"name": "CI", "conclusion": "failure"}, + {"name": "CodeQL", "conclusion": "neutral"}, + {"name": "CodeQL", "conclusion": "cancelled"}, + ], + ["CI", "CodeQL"], + ) + _expect(pass_rate == 25.0, "Expected required-check pass-rate computation") + _expect(top_failed and top_failed[0]["name"] in {"CI", "CodeQL"}, "Expected top failed checks list") + + # Deep merge and policy load paths + base = {"a": {"x": 1}, "b": 2} + merged = module._deep_merge(base, {"a": {"y": 3}, "c": 4}) + _expect(merged == {"a": {"x": 1, "y": 3}, "b": 2, "c": 4}, "Expected deep merge semantics") + + policy_path = tmp_path / "ops-policy.json" + policy_path.write_text(json.dumps({"required_checks": ["CI"], "thresholds": {"main_ci_failure_rate_pct": {"ok_max": 1.0}}}), encoding="utf-8") + loaded_policy, loaded = module._load_policy(policy_path) + _expect(loaded is True, "Expected policy loaded flag") + _expect(loaded_policy["required_checks"] == ["CI"], "Expected loaded required checks") + + # Safe path helper + root = tmp_path / "workspace" + root.mkdir(parents=True, exist_ok=True) + safe = module._safe_workspace_path("docs/out.json", base=root) + _expect(safe == root / "docs" / "out.json", "Expected relative output path under workspace") + with pytest.raises(ValueError): + module._safe_workspace_path("../escape.json", base=root) + + +def test_generate_ops_digest_main_paths(monkeypatch, tmp_path): + module = _load_script(_repo_root() / "scripts" / "generate_ops_digest.py", "generate_ops_digest_main_cov_wave") + + repo = tmp_path / "repo" + repo.mkdir(parents=True, exist_ok=True) + (repo / "docs").mkdir(parents=True, exist_ok=True) + + out_json = repo / "tmp" / "digest.json" + out_md = repo / "tmp" / "digest.md" + policy = repo / "docs" / "ops-health-policy.json" + policy.write_text(json.dumps({"required_checks": ["CI"]}), encoding="utf-8") + + # Missing token path + monkeypatch.delenv("GITHUB_TOKEN", raising=False) + monkeypatch.delenv("GH_TOKEN", raising=False) + def _parse_args_missing_token(): + return type( + "Args", + (), + { + "repo": "Prekzursil/Reframe", + "window_days": 7, + "out_json": str(out_json.relative_to(repo)), + "out_md": str(out_md.relative_to(repo)), + "policy": str(policy.relative_to(repo)), + "api_base": "https://api.github.com", + }, + )() + + monkeypatch.setattr(module, "parse_args", _parse_args_missing_token) + + prev = Path.cwd() + os.chdir(repo) + try: + with pytest.raises(SystemExit): + module.main() + finally: + os.chdir(prev) + + # Successful run path with fake pagination + monkeypatch.setenv("GITHUB_TOKEN", "token") + + pulls = [{"created_at": "2026-03-03T00:00:00Z", "merged_at": "2026-03-03T00:00:00Z"}] + issues = [{"labels": [{"name": "agent:ready"}]}] + runs = [ + { + "head_branch": "main", + "name": "CI", + "created_at": "2026-03-03T01:00:00Z", + "run_started_at": "2026-03-03T01:00:00Z", + "updated_at": "2026-03-03T01:10:00Z", + "conclusion": "success", + } + ] + seq = [pulls, issues, {"workflow_runs": runs}] + monkeypatch.setattr(module, "_request_json", lambda _url, _token: (seq.pop(0), None)) + + prev = Path.cwd() + os.chdir(repo) + try: + rc = module.main() + finally: + os.chdir(prev) + + _expect(rc == 0, "Expected digest main success") + payload = json.loads(out_json.read_text(encoding="utf-8")) + _expect(payload["metrics"]["main_ci_failed_runs"] == 0, "Expected successful CI metrics") + _expect("Weekly Ops Digest" in out_md.read_text(encoding="utf-8"), "Expected markdown output") + + +def test_assert_coverage_inventory_and_cli_paths(tmp_path, monkeypatch, capsys): + module = _load_script(_repo_root() / "scripts" / "quality" / "assert_coverage_100.py", "assert_coverage_cov_wave") + + root = tmp_path / "repo" + root.mkdir(parents=True, exist_ok=True) + + # Build tracked inventory files. + api_file = root / "apps" / "api" / "app" / "core.py" + api_file.parent.mkdir(parents=True, exist_ok=True) + api_file.write_text("VALUE = 1\n", encoding="utf-8") + + web_file = root / "apps" / "web" / "src" / "ui.ts" + web_file.parent.mkdir(parents=True, exist_ok=True) + web_file.write_text("export const VALUE = 1;\n", encoding="utf-8") + + rust_file = root / "apps" / "desktop" / "src-tauri" / "src" / "core.rs" + rust_file.parent.mkdir(parents=True, exist_ok=True) + rust_file.write_text("pub fn f() {}\n", encoding="utf-8") + + monkeypatch.setattr( + module, + "_load_git_tracked_files", + lambda _root: [ + "apps/api/app/core.py", + "apps/web/src/ui.ts", + "apps/desktop/src-tauri/src/core.rs", + ], + ) + + expected = module._collect_expected_inventory(root) + _expect("apps/api/app/core.py" in expected, "Expected API file in inventory") + _expect("apps/web/src/ui.ts" in expected, "Expected web file in inventory") + _expect("apps/desktop/src-tauri/src/core.rs" in expected, "Expected rust file in inventory") + + # Provide LCOV with one uncovered line to verify findings formatting. + lcov = root / "coverage" / "lcov.info" + lcov.parent.mkdir(parents=True, exist_ok=True) + lcov.write_text( + "\n".join( + [ + "TN:", + f"SF:{web_file.as_posix()}", + "DA:1,1", + "DA:2,0", + "end_of_record", + ] + ), + encoding="utf-8", + ) + + stats = module.parse_lcov("web", lcov, base=root) + status, findings, metrics = module.evaluate([stats], expected_inventory=expected) + _expect(status == "fail", "Expected fail status for uncovered inventory") + _expect(metrics["uncovered_files"] >= 1, "Expected uncovered file metric") + _expect(any("coverage inventory" in item for item in findings), "Expected inventory findings") + + # Cover CLI success path with --no-inventory-check. + json_out = root / "out" / "coverage.json" + md_out = root / "out" / "coverage.md" + rc = module.main.__wrapped__ if hasattr(module.main, "__wrapped__") else None + _expect(rc is None, "No wrapper expected") + + def _parse_args_no_inventory(): + return type( + "Args", + (), + { + "xml": [], + "lcov": [f"web={lcov}"], + "out_json": str(json_out), + "out_md": str(md_out), + "inventory_root": str(root), + "no_inventory_check": True, + }, + )() + + monkeypatch.setattr(module, "_parse_args", _parse_args_no_inventory) + exit_code = module.main() + _expect(exit_code == 1, "Expected fail exit code when coverage is below 100") + _expect(json_out.is_file(), "Expected JSON artifact output") + _expect(md_out.is_file(), "Expected markdown artifact output") + + text = capsys.readouterr().out + _expect("Coverage 100 Gate" in text, "Expected CLI markdown output") + + +def test_assert_coverage_path_helpers_and_named_path_parsing(tmp_path): + module = _load_script(_repo_root() / "scripts" / "quality" / "assert_coverage_100.py", "assert_cov_helpers_wave") + + with pytest.raises(ValueError): + module.parse_named_path("invalid") + + name, path = module.parse_named_path("web=coverage/lcov.info") + _expect(name == "web", "Expected parsed name") + _expect(path.as_posix() == "coverage/lcov.info", "Expected parsed path") + + root = tmp_path / "workspace" + root.mkdir(parents=True, exist_ok=True) + safe = module._safe_output_path("coverage/out.json", "fallback.json", base=root) + _expect(safe == root / "coverage" / "out.json", "Expected safe path in workspace") + + with pytest.raises(ValueError): + module._safe_output_path("../escape.json", "fallback.json", base=root) \ No newline at end of file diff --git a/apps/api/tests/test_scripts_coverage_wave.py b/apps/api/tests/test_scripts_coverage_wave.py new file mode 100644 index 00000000..b3fa7930 --- /dev/null +++ b/apps/api/tests/test_scripts_coverage_wave.py @@ -0,0 +1,274 @@ +from __future__ import annotations + +import argparse +import json +import sys +import types +from dataclasses import dataclass +from enum import Enum +from importlib.util import module_from_spec, spec_from_file_location +from pathlib import Path + +import pytest + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +def _repo_root() -> Path: + return Path(__file__).resolve().parents[3] + + +def _load_script(name: str): + scripts_dir = _repo_root() / "scripts" + if str(scripts_dir) not in sys.path: + sys.path.insert(0, str(scripts_dir)) + module_path = scripts_dir / f"{name}.py" + spec = spec_from_file_location(name, module_path) + _expect(spec is not None and spec.loader is not None, f"Unable to load module spec for {name}") + module = module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + return module + + +def test_release_readiness_helpers_and_safe_paths(tmp_path): + module = _load_script("release_readiness_report") + + root = tmp_path / "repo" + root.mkdir(parents=True, exist_ok=True) + + safe = module._safe_output_path("docs/out.json", root / "fallback.json", root=root) + _expect(safe == (root / "docs" / "out.json"), "Expected relative path to resolve under repo root") + + with pytest.raises(ValueError): + module._safe_output_path("../escape.json", root / "fallback.json", root=root) + + rel = module._display_path(root / "docs" / "out.json", root) + _expect(rel.replace("\\", "/") == "docs/out.json", "Expected display path relative to repo root") + + +def test_release_readiness_resolve_status_matrix(): + module = _load_script("release_readiness_report") + + status, blocking, external = module._resolve_status(local_ok=True, updater_ok=True, pyannote_cpu_status="ok") + _expect(status == "READY", "Expected READY when all gates are green") + _expect(blocking == [], "Expected no blocking reasons") + _expect(external == [], "Expected no external blockers") + + status, blocking, external = module._resolve_status( + local_ok=True, + updater_ok=True, + pyannote_cpu_status="blocked_external", + ) + _expect(status == "READY_WITH_EXTERNAL_BLOCKER", "Expected external-blocker readiness status") + _expect(blocking == [], "Expected no blocking reasons for external-only blocker") + _expect(len(external) == 1, "Expected one external blocker detail") + + status, blocking, _external = module._resolve_status(local_ok=False, updater_ok=False, pyannote_cpu_status="failed") + _expect(status == "NOT_READY", "Expected NOT_READY for failed local/updater/pyannote") + _expect(len(blocking) == 3, "Expected three blocking reasons") + + +def test_release_readiness_main_ready_with_external_blocker(monkeypatch): + module = _load_script("release_readiness_report") + + stamp = "2099-01-01" + out_md = "tmp/release-readiness-wave/report.md" + out_json = "tmp/release-readiness-wave/report.json" + + def fake_load_json(path: Path): + text = str(path).replace("\\", "/") + if text.endswith(f"{stamp}-updater-e2e-windows.json"): + return {"success": True, "platform": "windows"} + if text.endswith(f"{stamp}-updater-e2e-macos.json"): + return {"success": True, "platform": "macos"} + if text.endswith(f"{stamp}-updater-e2e-linux.json"): + return {"success": True, "platform": "linux"} + if text.endswith(f"{stamp}-pyannote-benchmark-status.json"): + return {"cpu": {"status": "blocked_external"}, "gpu": {"status": "unknown"}} + return None + + monkeypatch.setattr(module, "_load_json", fake_load_json) + monkeypatch.setattr(module, "_load_latest_updater_result", lambda _plans, _platform: (None, None)) + monkeypatch.setattr(module, "_collect_gh_status", lambda _repo: {"ci": {"conclusion": "success"}, "codeql": {"conclusion": "success"}, "branch_protection": {"required_reviews": 1, "linear_history": True}}) + + rc = module.main( + [ + "--stamp", + stamp, + "--verify-exit", + "0", + "--smoke-hosted-exit", + "0", + "--smoke-local-exit", + "0", + "--smoke-security-exit", + "0", + "--smoke-workflows-exit", + "0", + "--smoke-perf-cost-exit", + "0", + "--diarization-exit", + "0", + "--out-md", + out_md, + "--out-json", + out_json, + ] + ) + + _expect(rc == 0, "Expected READY_WITH_EXTERNAL_BLOCKER to be non-failing") + repo = _repo_root() + payload = json.loads((repo / out_json).read_text(encoding="utf-8")) + _expect(payload["status"] == "READY_WITH_EXTERNAL_BLOCKER", "Expected external blocker status in summary") + _expect(payload.get("external_blocker_tracking", {}).get("issue_url"), "Expected external blocker tracking metadata") + + +def test_upsert_ops_digest_main_create_and_update(monkeypatch, tmp_path): + module = _load_script("upsert_ops_digest_issue") + + repo = _repo_root() + digest_json = repo / "tmp" / "ops-digest" / "digest.json" + digest_md = repo / "tmp" / "ops-digest" / "digest.md" + out_json = repo / "tmp" / "ops-digest" / "out.json" + digest_json.parent.mkdir(parents=True, exist_ok=True) + digest_json.write_text(json.dumps({"metrics": {}, "trends": {}, "health": {}}), encoding="utf-8") + digest_md.write_text("# digest\n", encoding="utf-8") + + monkeypatch.setenv("GITHUB_TOKEN", "token") + + args = argparse.Namespace( + repo="Prekzursil/Reframe", + digest_json=str(digest_json.relative_to(repo)), + digest_md=str(digest_md.relative_to(repo)), + out_json=str(out_json.relative_to(repo)), + title="Weekly Ops Digest (rolling)", + ) + monkeypatch.setattr(module, "parse_args", lambda: args) + + calls = {"mode": "create", "posts": 0, "patches": 0} + + def fake_request(path: str, token: str, method: str = "GET", body=None): + _ = token + if method == "GET" and path.startswith("/repos/Prekzursil/Reframe/issues?"): + return [] if calls["mode"] == "create" else [{"number": 88, "title": "Weekly Ops Digest (rolling)", "html_url": "https://example.test/88"}] + if method == "POST": + calls["posts"] += 1 + return {"number": 88, "html_url": "https://example.test/88"} + if method == "PATCH": + calls["patches"] += 1 + return {"number": 88, "html_url": "https://example.test/88"} + raise AssertionError(f"Unexpected request: {method} {path} body={body!r}") + + monkeypatch.setattr(module, "_request_json", fake_request) + + rc_create = module.main() + _expect(rc_create == 0, "Expected create flow to succeed") + _expect(calls["posts"] == 1, "Expected one POST for create flow") + + calls["mode"] = "update" + rc_update = module.main() + _expect(rc_update == 0, "Expected update flow to succeed") + _expect(calls["patches"] == 1, "Expected one PATCH for update flow") + + +def test_benchmark_diarization_extract_and_main_paths(monkeypatch, tmp_path, capsys): + module = _load_script("benchmark_diarization") + + with pytest.raises(FileNotFoundError): + monkeypatch.setattr(module.shutil, "which", lambda _name: None) + module._extract_wav_16k_mono(tmp_path / "in.wav", tmp_path / "out.wav") + + recorded = {} + monkeypatch.setattr(module.shutil, "which", lambda _name: "ffmpeg") + monkeypatch.setattr(module.subprocess, "run", lambda cmd, check, capture_output, shell: recorded.setdefault("cmd", cmd)) + module._extract_wav_16k_mono(tmp_path / "in.wav", tmp_path / "out.wav") + _expect(recorded["cmd"][0] == "ffmpeg", "Expected ffmpeg command execution") + + fake_path_guard = types.ModuleType("media_core.transcribe.path_guard") + def _validate_media_input_path(value): + return Path(value) + + fake_path_guard.validate_media_input_path = _validate_media_input_path + + class _Backend(Enum): + PYANNOTE = "pyannote" + SPEECHBRAIN = "speechbrain" + + @dataclass + class _Config: + backend: _Backend + model: str + huggingface_token: str | None + min_segment_duration: float + + fake_diarize = types.ModuleType("media_core.diarize") + fake_diarize.DiarizationBackend = _Backend + fake_diarize.DiarizationConfig = _Config + fake_diarize.diarize_audio = lambda _wav, _cfg: ["s1", "s2"] + + monkeypatch.setitem(sys.modules, "media_core.transcribe.path_guard", fake_path_guard) + monkeypatch.setitem(sys.modules, "media_core.diarize", fake_diarize) + + input_file = tmp_path / "sample.wav" + input_file.write_bytes(b"wav") + monkeypatch.setattr(module, "_extract_wav_16k_mono", lambda _inp, out: out.write_bytes(b"wav16")) + monkeypatch.setattr(module, "_get_peak_rss_mb", lambda: 123.4) + + rc_blocked = module.main([ + str(input_file), + "--backend", + "pyannote", + "--model", + "pyannote/speaker-diarization-3.1", + ]) + _expect(rc_blocked == 2, "Expected missing HF token path to return 2") + + rc_ok = module.main([ + str(input_file), + "--backend", + "speechbrain", + "--runs", + "2", + "--format", + "md", + ]) + _expect(rc_ok == 0, "Expected benchmark main success for speechbrain backend") + _expect("Diarization benchmark" in capsys.readouterr().out, "Expected markdown output") + + +def test_transcribe_main_module_paths(monkeypatch, tmp_path, capsys): + module_path = _repo_root() / "packages" / "media-core" / "src" / "media_core" / "transcribe" / "__main__.py" + spec = spec_from_file_location("media_core.transcribe.__main__", module_path) + _expect(spec is not None and spec.loader is not None, "Expected __main__ module spec") + module = module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + + media_file = tmp_path / "audio.wav" + media_file.write_bytes(b"audio") + + monkeypatch.setattr(module, "parse_args", lambda: argparse.Namespace(input=str(media_file), language="en", backend="noop", model="whisper-1", device="cpu")) + monkeypatch.setattr(module, "validate_media_input_path", lambda _p: media_file) + + class _Result: + def model_dump(self): + return {"text": "ok", "words": []} + + monkeypatch.setattr(module, "transcribe_noop", lambda _path, _cfg: _Result()) + _expect(module.main() == 0, "Expected noop backend path to pass") + + monkeypatch.setattr(module, "parse_args", lambda: argparse.Namespace(input=str(media_file), language=None, backend="invalid", model="m", device=None)) + _expect(module.main() == 1, "Expected invalid backend to fail") + + monkeypatch.setattr(module, "parse_args", lambda: argparse.Namespace(input=str(media_file), language="en", backend="noop", model="m", device=None)) + monkeypatch.setattr(module, "validate_media_input_path", lambda _p: (_ for _ in ()).throw(ValueError("bad path"))) + _expect(module.main() == 1, "Expected invalid input path to fail") + + monkeypatch.setattr(module, "validate_media_input_path", lambda _p: media_file) + monkeypatch.setattr(module, "transcribe_noop", lambda _path, _cfg: (_ for _ in ()).throw(RuntimeError("boom"))) + _expect(module.main() == 1, "Expected transcription exception path to fail") + _expect("Tip: use backend 'noop'" in capsys.readouterr().err, "Expected offline tip on transcription failure") diff --git a/apps/api/tests/test_scripts_misc_tooling.py b/apps/api/tests/test_scripts_misc_tooling.py new file mode 100644 index 00000000..dda7a87b --- /dev/null +++ b/apps/api/tests/test_scripts_misc_tooling.py @@ -0,0 +1,255 @@ +from __future__ import annotations + +import argparse +import json +import sys +import types +from dataclasses import dataclass +from importlib.util import module_from_spec, spec_from_file_location +from pathlib import Path + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +def _repo_root() -> Path: + return Path(__file__).resolve().parents[3] + + +def _load_script(name: str): + scripts_dir = _repo_root() / "scripts" + if str(scripts_dir) not in sys.path: + sys.path.insert(0, str(scripts_dir)) + module_path = scripts_dir / f"{name}.py" + spec = spec_from_file_location(name, module_path) + _expect(spec is not None and spec.loader is not None, f"Unable to load module spec for {name}") + module = module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + return module + + +def test_prefetch_whisper_model_missing_dependency(capsys): + module = _load_script("prefetch_whisper_model") + + rc = module.main(["--model", "large-v3"]) + + _expect(rc == 2, "Expected missing faster-whisper dependency to return 2") + _expect("faster-whisper is not installed" in capsys.readouterr().err, "Expected dependency error message") + + +def test_prefetch_whisper_model_success(monkeypatch, capsys): + module = _load_script("prefetch_whisper_model") + + calls: list[tuple[str, dict[str, str]]] = [] + + class FakeWhisperModel: + def __init__(self, model_name: str, **kwargs): + calls.append((model_name, kwargs)) + + fake_backend = types.ModuleType("media_core.transcribe.backends.faster_whisper") + fake_backend._normalize_model_name = lambda value: f"normalized-{value}" + + fake_fw = types.ModuleType("faster_whisper") + fake_fw.WhisperModel = FakeWhisperModel + + monkeypatch.setitem(sys.modules, "media_core.transcribe.backends.faster_whisper", fake_backend) + monkeypatch.setitem(sys.modules, "faster_whisper", fake_fw) + + rc = module.main(["--model", "large-v3", "--device", "cpu"]) + + _expect(rc == 0, "Expected successful prefetch") + _expect(calls == [("normalized-large-v3", {"device": "cpu"})], "Expected normalized model and device kwargs") + _expect("Prefetching faster-whisper model" in capsys.readouterr().out, "Expected prefetch output") + + +def test_install_argos_pack_paths(monkeypatch, capsys): + module = _load_script("install_argos_pack") + + class FakePackage: + def __init__(self, src: str, tgt: str): + self.from_code = src + self.to_code = tgt + + def download(self): + return "/tmp/fake.argosmodel" + + class FakeArgos: + def __init__(self): + self.updated = False + self.installed_path = "" + + def update_package_index(self): + self.updated = True + + def get_available_packages(self): + return [FakePackage("en", "es"), FakePackage("en", "fr")] + + def install_from_path(self, path: str): + self.installed_path = path + + fake_argos = FakeArgos() + monkeypatch.setattr(module, "_ensure_argos", lambda: fake_argos) + + _expect(module.main(["--list"]) == 0, "Expected list flow to pass") + _expect("en->es" in capsys.readouterr().out, "Expected list output to include en->es") + + _expect(module.main([]) == 2, "Expected missing src/tgt to fail") + _expect("--src and --tgt are required" in capsys.readouterr().err, "Expected src/tgt requirement message") + + _expect(module.main(["--src", "en", "--tgt", "de"]) == 3, "Expected unavailable pair to fail") + _expect("No Argos pack found for en->de" in capsys.readouterr().err, "Expected no-pack message") + + _expect(module.main(["--src", "en", "--tgt", "es"]) == 0, "Expected install flow to pass") + _expect(fake_argos.installed_path == "/tmp/fake.argosmodel", "Expected install_from_path invocation") + + +def test_generate_benchmark_sample_main_and_path_guard(monkeypatch, tmp_path): + module = _load_script("generate_benchmark_sample") + + _expect(module._sample_value(2.2) == 0.0, "Expected silent bucket sample to be zero") + + repo = tmp_path / "repo" + repo.mkdir(parents=True, exist_ok=True) + try: + module._safe_output_path("../escape.wav", base=repo) + raise AssertionError("Expected ValueError for escaping output path") + except ValueError: + pass + + out_wav = tmp_path / "sample.wav" + args = argparse.Namespace(out="samples/sample.wav", duration=0.02, sample_rate=8000) + monkeypatch.setattr(module.argparse.ArgumentParser, "parse_args", lambda _self: args) + monkeypatch.setattr(module, "_safe_output_path", lambda *_args, **_kwargs: out_wav) + + rc = module.main() + + _expect(rc == 0, "Expected benchmark sample generation to succeed") + _expect(out_wav.is_file(), "Expected WAV output file to exist") + _expect(out_wav.stat().st_size > 44, "Expected WAV file with audio payload") + + +def test_download_whispercpp_model_behaviors(monkeypatch, tmp_path, capsys): + module = _load_script("download_whispercpp_model") + + _expect(module._normalize_filename("large-v3") == "ggml-large-v3.bin", "Expected normalized ggml filename") + _expect(module._normalize_filename("ggml-base.en.bin") == "ggml-base.en.bin", "Expected pre-prefixed filename") + + try: + module._normalize_filename("bad*name") + raise AssertionError("Expected invalid filename to fail") + except ValueError: + pass + + out_dir = tmp_path / "models" + out_dir.mkdir(parents=True, exist_ok=True) + existing = out_dir / "ggml-large-v3.bin" + existing.write_text("ready", encoding="utf-8") + + monkeypatch.setattr(module, "_resolve_output_dir", lambda *_args, **_kwargs: out_dir) + rc_existing = module.main(["--model", "large-v3"]) + _expect(rc_existing == 0, "Expected existing file fast-path") + _expect("Already present" in capsys.readouterr().out, "Expected already-present message") + + rc_bad_url = module.main(["--base-url", "http://example.com"]) + _expect(rc_bad_url == 2, "Expected non-https base URL to fail") + + downloaded = out_dir / "ggml-small.bin" + monkeypatch.setattr(module, "_download", lambda _url, dest: dest.write_text("model", encoding="utf-8")) + rc_download = module.main(["--model", "small", "--force"]) + _expect(rc_download == 0, "Expected download path to succeed") + _expect(downloaded.is_file(), "Expected downloaded model file") + + +def test_verify_desktop_updater_release_main_paths(monkeypatch, capsys): + module = _load_script("verify_desktop_updater_release") + + payload = { + "version": "0.1.8", + "pub_date": "2026-03-03T00:00:00Z", + "platforms": { + "windows-x86_64": { + "url": "https://example.com/app.exe", + "signature": "A" * 40, + } + }, + } + monkeypatch.setattr(module, "_fetch_bytes", lambda _url: json.dumps(payload).encode("utf-8")) + monkeypatch.setattr(module, "_head_with_retries", lambda _url: 200) + + rc_ok = module.main(["--endpoint", "https://example.com/latest.json"]) + _expect(rc_ok == 0, "Expected updater release verification to pass") + _expect("OK: updater JSON looks valid" in capsys.readouterr().out, "Expected success output") + + monkeypatch.setattr(module, "_head_with_retries", lambda _url: 404) + rc_fail = module.main(["--endpoint", "https://example.com/latest.json"]) + _expect(rc_fail == 1, "Expected inaccessible platform URL to fail") + + +def test_verify_hf_model_access_paths(monkeypatch, tmp_path): + module = _load_script("verify_hf_model_access") + + dotenv_repo = tmp_path / "repo" + dotenv_repo.mkdir(parents=True, exist_ok=True) + (dotenv_repo / ".env").write_text("HF_TOKEN=token-from-env-file\n", encoding="utf-8") + + token = module._load_token("", dotenv_repo) + _expect(token == "token-from-env-file", "Expected token lookup from .env") + + missing = module._probe("https://huggingface.co/x/resolve/main/config.yaml", "", model="x") + _expect(missing.status == "missing_token", "Expected missing-token probe state") + + @dataclass + class _FakeResult: + timestamp_utc: str + status: str + model: str + url: str + http_status: int | None + error: str | None + + monkeypatch.setattr(module, "_probe", lambda _url, _token, model: _FakeResult("ts", "ok", model, _url, 200, None)) + rc_ok = module.main(["--token", "abc", "--model", "pyannote/speaker-diarization-3.1"]) + _expect(rc_ok == 0, "Expected hf probe main success") + + monkeypatch.setattr(module, "_probe", lambda _url, _token, model: _FakeResult("ts", "blocked_403", model, _url, 403, "blocked")) + rc_blocked = module.main(["--token", "abc", "--model", "pyannote/speaker-diarization-3.1"]) + _expect(rc_blocked == 4, "Expected blocked status exit code") + + +def test_desktop_updater_e2e_paths(monkeypatch, tmp_path): + module = _load_script("desktop_updater_e2e") + + repo = tmp_path / "repo" + (repo / "scripts").mkdir(parents=True, exist_ok=True) + monkeypatch.setattr(module, "_repo_root", lambda: repo) + + verify_failure = module.subprocess.CompletedProcess(args=["verify"], returncode=1, stdout="", stderr="err") + + def run_fail(cmd, *, cwd, env=None): + _ = (cmd, cwd, env) + return verify_failure + + monkeypatch.setattr(module, "_run", run_fail) + rc_fail = module.main(["--platform", "linux"]) + _expect(rc_fail == 1, "Expected verify failure to fail wrapper") + + verify_ok = module.subprocess.CompletedProcess(args=["verify"], returncode=0, stdout="ok", stderr="") + helper_ok = module.subprocess.CompletedProcess( + args=["helper"], + returncode=0, + stdout=json.dumps({"success": True, "observed_old_version": "0.1.6", "observed_new_version": "0.1.7"}), + stderr="", + ) + calls = {"count": 0} + + def run_success(cmd, *, cwd, env=None): + _ = (cmd, cwd, env) + calls["count"] += 1 + return verify_ok if calls["count"] == 1 else helper_ok + + monkeypatch.setattr(module, "_run", run_success) + rc_ok = module.main(["--platform", "linux"]) + _expect(rc_ok == 0, "Expected successful updater e2e wrapper") diff --git a/apps/api/tests/test_scripts_quality_cli_wave2.py b/apps/api/tests/test_scripts_quality_cli_wave2.py new file mode 100644 index 00000000..2668a337 --- /dev/null +++ b/apps/api/tests/test_scripts_quality_cli_wave2.py @@ -0,0 +1,360 @@ +from __future__ import annotations + +import argparse +import json +import sys +from importlib.util import module_from_spec, spec_from_file_location +from pathlib import Path + +import pytest + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +def _repo_root() -> Path: + return Path(__file__).resolve().parents[3] + + +def _load_quality(name: str): + script_path = _repo_root() / "scripts" / "quality" / f"{name}.py" + spec = spec_from_file_location(f"quality_{name}_wave2", script_path) + _expect(spec is not None and spec.loader is not None, f"Unable to load module spec for {name}") + module = module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + return module + + +def _load_script(name: str): + script_path = _repo_root() / "scripts" / f"{name}.py" + spec = spec_from_file_location(f"script_{name}_wave2", script_path) + _expect(spec is not None and spec.loader is not None, f"Unable to load module spec for {name}") + module = module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + return module + + +def test_check_quality_secrets_main_pass_and_fail(monkeypatch): + module = _load_quality("check_quality_secrets") + repo = _repo_root() + + out_json_rel = "tmp/quality-wave2/check-quality-secrets.json" + out_md_rel = "tmp/quality-wave2/check-quality-secrets.md" + out_json = repo / out_json_rel + out_md = repo / out_md_rel + + monkeypatch.setattr( + module, + "_parse_args", + lambda: argparse.Namespace(required_secret=[], required_var=[], out_json=out_json_rel, out_md=out_md_rel), + ) + + for name in module.DEFAULT_REQUIRED_SECRETS: + monkeypatch.setenv(name, "x") + for name in module.DEFAULT_REQUIRED_VARS: + monkeypatch.setenv(name, "x") + + rc = module.main() + _expect(rc == 0, "Expected pass when all secrets/vars are set") + _expect(out_json.is_file(), "Expected JSON output file") + _expect(out_md.is_file(), "Expected markdown output file") + + monkeypatch.delenv(module.DEFAULT_REQUIRED_SECRETS[0], raising=False) + rc_fail = module.main() + _expect(rc_fail == 1, "Expected fail when a required secret is missing") + + +def test_check_quality_secrets_safe_output_path_escape(): + module = _load_quality("check_quality_secrets") + with pytest.raises(ValueError): + module._safe_output_path("../escape.json", "fallback.json", base=Path.cwd()) + + +def test_check_required_checks_main_success_and_missing_token(monkeypatch): + module = _load_quality("check_required_checks") + repo = _repo_root() + + out_json_rel = "tmp/quality-wave2/required-checks.json" + out_md_rel = "tmp/quality-wave2/required-checks.md" + out_json = repo / out_json_rel + out_md = repo / out_md_rel + + calls = {"count": 0} + + def fake_api_get(_repo: str, path: str, _token: str): + calls["count"] += 1 + if "check-runs" in path: + if calls["count"] <= 2: + return {"check_runs": [{"name": "Coverage 100 Gate", "status": "in_progress", "conclusion": None}]} + return {"check_runs": [{"name": "Coverage 100 Gate", "status": "completed", "conclusion": "success"}]} + return {"statuses": []} + + monkeypatch.setattr(module, "_api_get", fake_api_get) + monkeypatch.setattr(module.time, "sleep", lambda _s: None) + monkeypatch.setenv("GITHUB_TOKEN", "token") + monkeypatch.setattr( + module, + "_parse_args", + lambda: argparse.Namespace( + repo="Prekzursil/Reframe", + sha="abc123", + required_context=["Coverage 100 Gate"], + timeout_seconds=5, + poll_seconds=1, + out_json=out_json_rel, + out_md=out_md_rel, + ), + ) + + rc = module.main() + _expect(rc == 0, "Expected success after in-progress then successful check run") + _expect(out_json.is_file(), "Expected required-check JSON artifact") + _expect(out_md.is_file(), "Expected required-check markdown artifact") + + monkeypatch.delenv("GITHUB_TOKEN", raising=False) + monkeypatch.delenv("GH_TOKEN", raising=False) + with pytest.raises(SystemExit): + module.main() + + +def test_check_codacy_zero_main_paths(monkeypatch): + module = _load_quality("check_codacy_zero") + repo = _repo_root() + + out_json_rel = "tmp/quality-wave2/codacy.json" + out_md_rel = "tmp/quality-wave2/codacy.md" + out_json = repo / out_json_rel + + monkeypatch.delenv("CODACY_API_TOKEN", raising=False) + monkeypatch.setattr( + module, + "_parse_args", + lambda: argparse.Namespace(repo="Prekzursil/Reframe", pull_request="", out_json=out_json_rel, out_md=out_md_rel), + ) + rc_missing = module.main() + _expect(rc_missing == 1, "Expected fail when CODACY_API_TOKEN is missing") + + monkeypatch.setenv("CODACY_API_TOKEN", "token") + + def fake_request(url: str, token: str, *, method: str = "GET", data=None): + _ = (url, token, method, data) + return {"pagination": {"total": 0}} + + monkeypatch.setattr(module, "_request_json", fake_request) + rc_repo = module.main() + _expect(rc_repo == 0, "Expected pass when repository open issues == 0") + _expect(out_json.is_file(), "Expected codacy JSON output") + + monkeypatch.setattr( + module, + "_parse_args", + lambda: argparse.Namespace(repo="Prekzursil/Reframe", pull_request="abc", out_json=out_json_rel, out_md=out_md_rel), + ) + rc_invalid_pr = module.main() + _expect(rc_invalid_pr == 1, "Expected fail for invalid pull request number") + + +def test_check_sonar_zero_main_wait_and_exception(monkeypatch): + module = _load_quality("check_sonar_zero") + + repo = _repo_root() + out_json_rel = "tmp/quality-wave2/sonar.json" + out_md_rel = "tmp/quality-wave2/sonar.md" + _ = (repo / out_json_rel, repo / out_md_rel) + + sequence = iter([(2, "ERROR"), (0, "OK")]) + + def fake_query(**_kwargs): + return next(sequence) + + monkeypatch.setattr(module, "_query_sonar_status", fake_query) + monkeypatch.setattr(module.time, "sleep", lambda _s: None) + monkeypatch.setenv("SONAR_TOKEN", "token") + monkeypatch.setattr( + module, + "_parse_args", + lambda: argparse.Namespace( + project_key="Prekzursil_Reframe", + token="", + branch="", + pull_request="107", + wait_seconds=15, + require_quality_gate=True, + ignore_open_issues=False, + out_json=out_json_rel, + out_md=out_md_rel, + ), + ) + + rc_wait = module.main() + _expect(rc_wait == 0, "Expected Sonar pass after wait loop resolves to zero") + + monkeypatch.setattr(module, "_query_sonar_status", lambda **_kwargs: (_ for _ in ()).throw(RuntimeError("boom"))) + rc_exc = module.main() + _expect(rc_exc == 1, "Expected Sonar fail on query exception") + + +def test_check_visual_zero_percy_and_applitools_paths(monkeypatch, tmp_path): + module = _load_quality("check_visual_zero") + + monkeypatch.setenv("PERCY_TOKEN", "token") + monkeypatch.setenv("GITHUB_SHA", "abc1234") + monkeypatch.setattr(module, "_percy_request", lambda _path, _token, query=None: {"data": []}) + clock = {"t": 0.0} + monkeypatch.setattr(module.time, "monotonic", lambda: clock.__setitem__("t", clock["t"] + 301.0) or clock["t"]) + monkeypatch.setattr(module.time, "sleep", lambda _s: None) + status, details, findings = module._run_percy(argparse.Namespace(percy_token="", sha="", branch="main")) + _expect(status == "pass", "Expected pass when Percy build is unavailable") + _expect(details.get("lookup_mode") == "unavailable", "Expected unavailable lookup mode") + _expect(findings, "Expected informational finding") + + monkeypatch.setattr( + module, + "_percy_request", + lambda _path, _token, query=None: { + "data": [ + { + "id": "1", + "attributes": { + "created-at": "2026-03-04T00:00:00Z", + "review-state": "unreviewed", + "total-comparisons-diff": 2, + }, + } + ] + }, + ) + monkeypatch.setattr(module.time, "monotonic", lambda: 0.0) + status_fail, _details_fail, findings_fail = module._run_percy(argparse.Namespace(percy_token="", sha="abc1234", branch="main")) + _expect(status_fail == "fail", "Expected fail for unresolved Percy diffs") + _expect(any("unresolved visual diffs" in item for item in findings_fail), "Expected unresolved diff finding") + + missing_status, _missing_details, _missing_findings = module._run_applitools( + argparse.Namespace(applitools_results="", provider="applitools") + ) + _expect(missing_status == "fail", "Expected fail when applitools results path is missing") + + results_path = _repo_root() / "tmp" / "quality-wave2" / "applitools.json" + results_path.parent.mkdir(parents=True, exist_ok=True) + results_path.write_text(json.dumps({"unresolved": 0, "mismatches": 0, "missing": 0}), encoding="utf-8") + ok_status, _ok_details, ok_findings = module._run_applitools( + argparse.Namespace(applitools_results="tmp/quality-wave2/applitools.json", provider="applitools") + ) + _expect(ok_status == "pass", "Expected pass when applitools metrics are zero") + _expect(ok_findings == [], "Expected no findings for zero applitools metrics") + + +def test_percy_auto_approve_main_paths(monkeypatch, capsys): + module = _load_quality("percy_auto_approve") + + monkeypatch.delenv("PERCY_TOKEN", raising=False) + rc_missing = module.main(["--sha", "abc1234"]) + _expect(rc_missing == 1, "Expected missing token failure") + + monkeypatch.setenv("PERCY_TOKEN", "token") + rc_bad_sha = module.main(["--sha", "not-sha"]) + _expect(rc_bad_sha == 1, "Expected invalid SHA failure") + + monkeypatch.setattr(module, "_query_builds", lambda **_kwargs: {"data": []}) + rc_no_build = module.main(["--sha", "abc1234", "--retry-attempts", "1"]) + _expect(rc_no_build == 0, "Expected no-build path to be informational success") + + requested = {"approved": False} + + def fake_request_json(**kwargs): + if kwargs.get("method") == "POST" and kwargs.get("path") == "/reviews": + requested["approved"] = True + return {"ok": True} + return {} + + monkeypatch.setattr( + module, + "_query_builds", + lambda **_kwargs: { + "data": [ + { + "id": "build-1", + "attributes": { + "created-at": "2026-03-04T00:00:00Z", + "state": "finished", + "review-state": "unreviewed", + }, + } + ] + }, + ) + monkeypatch.setattr(module, "_request_json", fake_request_json) + + rc_approve = module.main(["--sha", "abc1234", "--retry-attempts", "1"]) + _expect(rc_approve == 0, "Expected build approval path success") + _expect(requested["approved"], "Expected Percy review approval POST") + _expect("approved=true" in capsys.readouterr().out, "Expected approved output marker") + + +def test_upsert_ops_digest_main_error_paths(monkeypatch): + module = _load_script("upsert_ops_digest_issue") + repo = _repo_root() + + digest_json_rel = "tmp/quality-wave2/digest.json" + digest_md_rel = "tmp/quality-wave2/digest.md" + out_json_rel = "tmp/quality-wave2/digest-out.json" + + digest_json = repo / digest_json_rel + digest_md = repo / digest_md_rel + digest_json.parent.mkdir(parents=True, exist_ok=True) + digest_json.write_text(json.dumps({"metrics": {}, "trends": {}, "health": {}}), encoding="utf-8") + digest_md.write_text("# digest\n", encoding="utf-8") + + monkeypatch.delenv("GITHUB_TOKEN", raising=False) + monkeypatch.delenv("GH_TOKEN", raising=False) + monkeypatch.setattr( + module, + "parse_args", + lambda: argparse.Namespace( + repo="Prekzursil/Reframe", + digest_json=digest_json_rel, + digest_md=digest_md_rel, + out_json=out_json_rel, + title="Weekly Ops Digest (rolling)", + ), + ) + with pytest.raises(SystemExit): + module.main() + + monkeypatch.setenv("GITHUB_TOKEN", "token") + monkeypatch.setattr( + module, + "parse_args", + lambda: argparse.Namespace( + repo="invalid-repo", + digest_json=digest_json_rel, + digest_md=digest_md_rel, + out_json=out_json_rel, + title="Weekly Ops Digest (rolling)", + ), + ) + with pytest.raises(SystemExit): + module.main() + + +def test_release_readiness_run_json_and_collect_status(monkeypatch, tmp_path): + module = _load_script("release_readiness_report") + + monkeypatch.setattr(module.subprocess, "run", lambda *args, **kwargs: (_ for _ in ()).throw(FileNotFoundError("missing"))) + _expect(module._run_json(["gh"], cwd=tmp_path) is None, "Expected None when subprocess binary is missing") + + monkeypatch.setattr(module, "_main_sha", lambda _repo: "abc") + monkeypatch.setattr(module, "_run_json", lambda _cmd, cwd: {"unexpected": True}) + status = module._collect_gh_status(tmp_path) + _expect(status["ci"] is None and status["codeql"] is None, "Expected null workflow snapshots for malformed runs payload") + _expect(isinstance(status["branch_protection"], dict), "Expected branch protection payload dictionary") + _expect(status["branch_protection"].get("required_reviews") is None, "Expected missing required_reviews for malformed payload") + + + + + diff --git a/apps/api/tests/test_scripts_quality_gates.py b/apps/api/tests/test_scripts_quality_gates.py index c6780a87..5b8085bc 100644 --- a/apps/api/tests/test_scripts_quality_gates.py +++ b/apps/api/tests/test_scripts_quality_gates.py @@ -48,7 +48,7 @@ def test_assert_coverage_100_parses_xml_and_lcov(tmp_path): _expect(xml_stats.percent == 100.0, "Expected XML coverage percent to be 100") _expect(lcov_stats.percent == 100.0, "Expected LCOV coverage percent to be 100") - status, findings = module.evaluate([xml_stats, lcov_stats]) + status, findings, _metrics = module.evaluate([xml_stats, lcov_stats], expected_inventory=None) _expect(status == "pass", "Expected pass when all components are at 100%") _expect(findings == [], "Expected no findings for full coverage") @@ -60,7 +60,7 @@ def test_assert_coverage_100_detects_below_target(tmp_path): lcov_path.write_text("TN:\nSF:file.ts\nLF:4\nLH:3\nend_of_record\n", encoding="utf-8") stats = module.parse_lcov("web", lcov_path) - status, findings = module.evaluate([stats]) + status, findings, _metrics = module.evaluate([stats], expected_inventory=None) _expect(status == "fail", "Expected fail when a component is below 100%") _expect(any("below 100%" in item for item in findings), "Expected below-100 finding") @@ -137,3 +137,49 @@ def test_sonar_evaluate_status_still_enforces_quality_gate(): ) _expect(any("quality gate" in item for item in findings), "Expected quality gate finding") + + +def test_assert_coverage_inventory_skips_empty_files(tmp_path, monkeypatch): + module = _load_module("assert_coverage_100") + + empty_init = tmp_path / "apps" / "api" / "app" / "__init__.py" + empty_init.parent.mkdir(parents=True, exist_ok=True) + empty_init.write_text("", encoding="utf-8") + + main_py = empty_init.parent / "main.py" + main_py.write_text("print('ok')\n", encoding="utf-8") + + monkeypatch.setattr( + module, + "_load_git_tracked_files", + lambda _root: ["apps/api/app/__init__.py", "apps/api/app/main.py"], + ) + + expected = module._collect_expected_inventory(tmp_path) + + _expect("apps/api/app/main.py" in expected, "Expected non-empty tracked source file in inventory") + _expect("apps/api/app/__init__.py" not in expected, "Expected empty tracked file to be skipped") + +def test_assert_coverage_inventory_skips_python_metadata_only_file(tmp_path, monkeypatch): + module = _load_module("assert_coverage_100") + + metadata_init = tmp_path / "packages" / "media-core" / "src" / "media_core" / "__init__.py" + metadata_init.parent.mkdir(parents=True, exist_ok=True) + metadata_init.write_text('"""pkg"""\n\n__all__ = []\n', encoding="utf-8") + + logic_file = metadata_init.parent / "core.py" + logic_file.write_text("VALUE = 1\n", encoding="utf-8") + + monkeypatch.setattr( + module, + "_load_git_tracked_files", + lambda _root: [ + "packages/media-core/src/media_core/__init__.py", + "packages/media-core/src/media_core/core.py", + ], + ) + + expected = module._collect_expected_inventory(tmp_path) + + _expect("packages/media-core/src/media_core/__init__.py" not in expected, "Expected metadata-only module file to be skipped") + _expect("packages/media-core/src/media_core/core.py" in expected, "Expected executable module file in inventory") diff --git a/apps/api/tests/test_scripts_quality_gates_extended.py b/apps/api/tests/test_scripts_quality_gates_extended.py new file mode 100644 index 00000000..4edb3995 --- /dev/null +++ b/apps/api/tests/test_scripts_quality_gates_extended.py @@ -0,0 +1,285 @@ +from __future__ import annotations + +import argparse +import sys +from importlib.util import module_from_spec, spec_from_file_location +from pathlib import Path + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +def _load_quality(name: str): + repo_root = Path(__file__).resolve().parents[3] + script_dir = repo_root / "scripts" / "quality" + if str(script_dir) not in sys.path: + sys.path.insert(0, str(script_dir)) + module_path = script_dir / f"{name}.py" + spec = spec_from_file_location(name, module_path) + _expect(spec is not None and spec.loader is not None, f"Unable to load module spec for {name}") + module = module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + return module + + +def test_check_codacy_zero_main_paths(monkeypatch): + module = _load_quality("check_codacy_zero") + + args = argparse.Namespace(repo="owner/repo", pull_request="", out_json="out/codacy.json", out_md="out/codacy.md") + monkeypatch.setattr(module, "_parse_args", lambda: args) + monkeypatch.delenv("CODACY_API_TOKEN", raising=False) + monkeypatch.setenv("GITHUB_REPOSITORY", "owner/repo") + + _expect(module.main() == 1, "Expected missing CODACY_API_TOKEN to fail") + + monkeypatch.setenv("CODACY_API_TOKEN", "token") + bad_args = argparse.Namespace(repo="bad slug", pull_request="", out_json="out/codacy.json", out_md="out/codacy.md") + monkeypatch.setattr(module, "_parse_args", lambda: bad_args) + _expect(module.main() == 1, "Expected invalid repo slug to fail") + + calls = {"count": 0} + + def fake_request(url: str, token: str, *, method: str = "GET", data=None): + _ = (url, token, method, data) + calls["count"] += 1 + if calls["count"] == 1: + return {"analyzed": False, "pagination": {"total": 0}} + return {"analyzed": True, "pagination": {"total": 0}} + + pr_args = argparse.Namespace(repo="owner/repo", pull_request="107", out_json="out/codacy.json", out_md="out/codacy.md") + monkeypatch.setattr(module, "_parse_args", lambda: pr_args) + monkeypatch.setattr(module, "_request_json", fake_request) + monkeypatch.setattr(module.time, "sleep", lambda _n: None) + + _expect(module.main() == 0, "Expected PR scope to pass when open issues are zero") + + +def test_check_deepscan_zero_main_paths(monkeypatch): + module = _load_quality("check_deepscan_zero") + + args = argparse.Namespace(out_json="out/deepscan.json", out_md="out/deepscan.md") + monkeypatch.setattr(module, "_parse_args", lambda: args) + monkeypatch.delenv("GITHUB_TOKEN", raising=False) + monkeypatch.delenv("GH_TOKEN", raising=False) + monkeypatch.delenv("GITHUB_REPOSITORY", raising=False) + monkeypatch.delenv("GITHUB_SHA", raising=False) + _expect(module.main() == 1, "Expected missing GitHub context to fail") + + monkeypatch.setenv("GITHUB_TOKEN", "token") + monkeypatch.setenv("GITHUB_REPOSITORY", "Prekzursil/Reframe") + monkeypatch.setenv("GITHUB_SHA", "abc123") + + payload = { + "check_runs": [ + { + "name": "DeepScan", + "conclusion": "success", + "details_url": "https://deepscan.io/analysis", + "output": {"summary": "0 new and 2 fixed issues"}, + "completed_at": "2026-03-04T00:00:00Z", + } + ] + } + monkeypatch.setattr(module, "_request_json", lambda _url, _token: payload) + + _expect(module.main() == 0, "Expected DeepScan zero-main path to pass") + + + + +def test_check_deepscan_zero_status_context_fallback(monkeypatch): + module = _load_quality("check_deepscan_zero") + + args = argparse.Namespace(out_json="out/deepscan-status.json", out_md="out/deepscan-status.md") + monkeypatch.setattr(module, "_parse_args", lambda: args) + monkeypatch.setenv("GITHUB_TOKEN", "token") + monkeypatch.setenv("GITHUB_REPOSITORY", "Prekzursil/Reframe") + monkeypatch.setenv("GITHUB_SHA", "abc123") + + def fake_request(url: str, _token: str): + if "check-runs" in url: + return {"check_runs": []} + return { + "statuses": [ + { + "context": "DeepScan", + "state": "success", + "description": "0 new and 1 fixed issues", + "target_url": "https://deepscan.io/dashboard", + "updated_at": "2026-03-04T01:00:00Z", + } + ] + } + + monkeypatch.setattr(module, "_request_json", fake_request) + + _expect(module.main() == 0, "Expected status-context fallback to pass when new issues are zero") + + +def test_check_sentry_zero_main_paths(monkeypatch): + module = _load_quality("check_sentry_zero") + args = argparse.Namespace(out_json="out/sentry.json", out_md="out/sentry.md") + monkeypatch.setattr(module, "_parse_args", lambda: args) + + monkeypatch.delenv("SENTRY_AUTH_TOKEN", raising=False) + monkeypatch.setenv("SENTRY_ORG", "andrei-visalon") + monkeypatch.setenv("SENTRY_PROJECT_BACKEND", "reframe-backend") + monkeypatch.setenv("SENTRY_PROJECT_WEB", "reframe-web") + _expect(module.main() == 1, "Expected missing token to fail") + + monkeypatch.setenv("SENTRY_AUTH_TOKEN", "token") + + def fake_request(_url: str, _token: str): + return [], {"x-hits": "0"} + + monkeypatch.setattr(module, "_request", fake_request) + _expect(module.main() == 0, "Expected sentry zero check to pass when unresolved=0") + + +def test_check_sonar_zero_main_paths(monkeypatch): + module = _load_quality("check_sonar_zero") + + args = argparse.Namespace( + project_key="Prekzursil_Reframe", + token="", + branch="", + pull_request="107", + wait_seconds=0, + require_quality_gate=True, + ignore_open_issues=False, + out_json="out/sonar.json", + out_md="out/sonar.md", + ) + monkeypatch.setattr(module, "_parse_args", lambda: args) + monkeypatch.delenv("SONAR_TOKEN", raising=False) + _expect(module.main() == 1, "Expected missing SONAR_TOKEN to fail") + + monkeypatch.setenv("SONAR_TOKEN", "token") + monkeypatch.setattr(module, "_query_sonar_status", lambda **_kwargs: (0, "OK")) + _expect(module.main() == 0, "Expected sonar zero to pass with open issues 0 and gate OK") + + +def test_check_required_checks_main_paths(monkeypatch): + module = _load_quality("check_required_checks") + args = argparse.Namespace( + repo="Prekzursil/Reframe", + sha="1234", + required_context=["CI", "Coverage 100 Gate"], + timeout_seconds=1, + poll_seconds=1, + out_json="out/required.json", + out_md="out/required.md", + ) + monkeypatch.setattr(module, "_parse_args", lambda: args) + + monkeypatch.setenv("GITHUB_TOKEN", "token") + + def fake_api_get(repo: str, path: str, token: str): + _ = (repo, token) + if "check-runs" in path: + return { + "check_runs": [ + {"name": "CI", "status": "completed", "conclusion": "success"}, + {"name": "Coverage 100 Gate", "status": "completed", "conclusion": "success"}, + ] + } + return {"statuses": []} + + monkeypatch.setattr(module, "_api_get", fake_api_get) + + _expect(module.main() == 0, "Expected required-checks gate to pass with all contexts successful") + + +def test_check_visual_zero_percy_and_applitools(monkeypatch, tmp_path): + module = _load_quality("check_visual_zero") + + percy_args = argparse.Namespace( + provider="percy", + sha="abc1234", + branch="feat", + percy_token="token", + applitools_results="", + out_json="tmp/percy.json", + out_md="tmp/percy.md", + ) + monkeypatch.setattr(module, "_parse_args", lambda: percy_args) + + payload = { + "data": [ + { + "id": "build-1", + "attributes": { + "created-at": "2026-03-04T00:00:00Z", + "review-state": "approved", + "total-comparisons-diff": 0, + }, + } + ] + } + monkeypatch.setattr(module, "_percy_request", lambda *_args, **_kwargs: payload) + monkeypatch.setattr(module.time, "sleep", lambda _n: None) + + _expect(module.main() == 0, "Expected Percy visual check to pass") + + applitools_json = Path("tmp/applitools-input.json") + applitools_json.parent.mkdir(parents=True, exist_ok=True) + applitools_json.write_text('{"unresolved":0,"mismatches":0,"missing":0}', encoding="utf-8") + + applitools_args = argparse.Namespace( + provider="applitools", + sha="", + branch="", + percy_token="", + applitools_results=str(applitools_json), + out_json="tmp/applitools-out.json", + out_md="tmp/applitools-out.md", + ) + monkeypatch.setattr(module, "_parse_args", lambda: applitools_args) + _expect(module.main() == 0, "Expected Applitools visual check to pass") + + +def test_percy_auto_approve_paths(monkeypatch): + module = _load_quality("percy_auto_approve") + + monkeypatch.delenv("PERCY_TOKEN", raising=False) + _expect(module.main(["--sha", "abcdef1"]) == 1, "Expected missing token path to fail") + + monkeypatch.setenv("PERCY_TOKEN", "token") + _expect(module.main(["--sha", "bad-sha"]) == 1, "Expected invalid SHA to fail") + + monkeypatch.setattr(module, "_query_builds", lambda **_kwargs: {"data": []}) + monkeypatch.setattr(module.time, "sleep", lambda _n: None) + _expect( + module.main(["--sha", "abcdef1", "--retry-attempts", "1", "--retry-delay-seconds", "1"]) == 0, + "Expected no-unreviewed-build path to be informational pass", + ) + + posted = {"called": False} + + def fake_query(**_kwargs): + return { + "data": [ + { + "id": "b1", + "attributes": {"state": "finished", "review-state": "unreviewed", "created-at": "2026-03-04"}, + } + ] + } + + def fake_request_json(*, token, method, path, query=None, payload=None, basic_auth=None): + _ = (token, query, basic_auth) + if method == "POST": + posted["called"] = True + _expect(path == "/reviews", "Expected reviews endpoint for approval") + _expect(payload is not None, "Expected review payload") + return {"data": []} + + monkeypatch.setattr(module, "_query_builds", fake_query) + monkeypatch.setattr(module, "_request_json", fake_request_json) + + rc = module.main(["--sha", "abcdef1", "--retry-attempts", "1", "--retry-delay-seconds", "1"]) + _expect(rc == 0, "Expected successful Percy auto-approval") + _expect(posted["called"], "Expected approval POST to be executed") diff --git a/apps/desktop/index.html b/apps/desktop/index.html index 052a6280..5dc50173 100644 --- a/apps/desktop/index.html +++ b/apps/desktop/index.html @@ -10,34 +10,56 @@
-
-

Reframe Desktop

+
+

Reframe Studio Desktop

- Runs the Reframe stack via Docker Compose and opens the local UI. + Create captions, shorts, subtitle styles, and publish workflows locally. + This desktop build runs a bundled local runtime by default with no Docker requirement.

-

Stack

-
- - - +

First-Run Guide

+
+
+
1. Prepare Runtime
+
Validate local Python dependencies and queue worker prerequisites.
+ pending +
+
+
2. Start Local Studio
+
Boot local API + queue mode and wait for health checks.
+ pending +
+
+
3. Create in Studio
+
Open the full product UI and start a media workflow.
+ pending +
+
+ + + - - - - -
+
+ Advanced runtime controls +
+ + + + + + +
+
+
-
Compose file
+
Runtime root
detecting…
@@ -49,7 +71,7 @@

Stack

checking…
-
Docker
+
Runtime engine
checking…
@@ -59,13 +81,11 @@

Stack

Diagnostics

-

- Reads live status from the local API (when the stack is running). -

+

Live diagnostics from the local Studio runtime.

-
UI URL
- http://localhost:5173 +
Studio URL
+ http://localhost:8000
API URL
@@ -96,13 +116,13 @@

Diagnostics

-

Output

+

Session Log

Ready.
- Tip: if you don’t have a `.env` yet, copy `.env.example` in the repo - root and set `REFRAME_OFFLINE_MODE=true` for offline-only behavior. + Local-first desktop mode runs with no Docker requirement. For strict offline operation, keep + REFRAME_OFFLINE_MODE=true in your environment.
diff --git a/apps/desktop/package.json b/apps/desktop/package.json index f0b630cb..dc1e83ca 100644 --- a/apps/desktop/package.json +++ b/apps/desktop/package.json @@ -3,14 +3,7 @@ "private": true, "version": "0.1.8", "type": "module", - "scripts": { - "dev": "vite", - "build": "tsc && vite build", - "test": "vitest run", - "test:coverage": "vitest run --coverage", - "preview": "vite preview", - "tauri": "tauri" - }, + "scripts": {"build": "npm run prepare:runtime && tsc && vite build", "dev": "npm run prepare:runtime && vite", "prepare:runtime": "node ./scripts/prepare-runtime.mjs", "preview": "vite preview", "tauri": "tauri", "test": "vitest run", "test:coverage": "vitest run --coverage"}, "dependencies": { "@tauri-apps/api": "^2.10.1", "@tauri-apps/plugin-opener": "^2.4.0", diff --git a/apps/desktop/scripts/prepare-runtime.mjs b/apps/desktop/scripts/prepare-runtime.mjs new file mode 100644 index 00000000..201fa5b1 --- /dev/null +++ b/apps/desktop/scripts/prepare-runtime.mjs @@ -0,0 +1,183 @@ +import { copyFileSync, existsSync, mkdirSync, readdirSync, rmSync, writeFileSync } from "node:fs"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; + +const scriptDir = path.dirname(fileURLToPath(import.meta.url)); +const repoRoot = path.resolve(scriptDir, "../../.."); +const runtimeRoot = path.resolve(scriptDir, "../src-tauri/runtime"); +const webDist = path.resolve(repoRoot, "apps/web/dist"); + +function normalize(p) { + return p.replace(/\\/g, "/"); +} + +function assertInside(base, candidate, label) { + const rel = path.relative(base, candidate); + if (rel.startsWith("..") || path.isAbsolute(rel)) { + throw new Error(`${label} path escapes root: ${candidate}`); + } +} + +function resolveInside(base, relPath, label) { + const safeRel = normalize(String(relPath || "")); + if (safeRel.includes("..")) { + throw new Error(`${label} path traversal detected: ${safeRel}`); + } + const resolved = path.resolve(base, safeRel); + assertInside(base, resolved, label); + return resolved; +} + +function resolveRepo(...segments) { + const resolved = path.resolve(repoRoot, ...segments); + assertInside(repoRoot, resolved, "repo"); + return resolved; +} + +function resolveRuntime(...segments) { + const resolved = path.resolve(runtimeRoot, ...segments); + assertInside(runtimeRoot, resolved, "runtime"); + return resolved; +} + +function ensureDir(resolvedPath) { + // nosemgrep: javascript.pathtraversal.rule-non-literal-fs-filename -- validated by resolveInside/assertInside + mkdirSync(resolvedPath, { recursive: true }); +} + +function clearRuntimeDir() { + // nosemgrep: javascript.pathtraversal.rule-non-literal-fs-filename -- runtimeRoot is fixed and trusted + rmSync(runtimeRoot, { recursive: true, force: true }); + // nosemgrep: javascript.pathtraversal.rule-non-literal-fs-filename -- runtimeRoot is fixed and trusted + mkdirSync(runtimeRoot, { recursive: true }); +} + +function copyFile(srcPath, dstPath) { + ensureDir(path.dirname(dstPath)); + copyFileSync(srcPath, dstPath); +} + +function shouldSkip(relPath) { + const normalized = normalize(relPath); + if (normalized.includes("/__pycache__/")) { + return true; + } + if (normalized.endsWith(".pyc")) { + return true; + } + if (/\/test_.*\.py$/i.test(normalized)) { + return true; + } + if (normalized.endsWith("/README.md")) { + return true; + } + return false; +} + +function copyTree(srcRoot, dstRoot) { + const stack = [""]; + while (stack.length > 0) { + const rel = stack.pop(); + const srcDir = resolveInside(srcRoot, rel, "copy-tree-src"); + // nosemgrep: javascript.pathtraversal.rule-non-literal-fs-filename -- srcDir validated by resolveInside + const entries = readdirSync(srcDir, { withFileTypes: true }); + + for (const entry of entries) { + const nextRel = rel ? `${rel}/${entry.name}` : entry.name; + if (shouldSkip(nextRel)) { + continue; + } + + const srcPath = resolveInside(srcRoot, nextRel, "copy-tree-src"); + const dstPath = resolveInside(dstRoot, nextRel, "copy-tree-dst"); + if (entry.isDirectory()) { + ensureDir(dstPath); + stack.push(nextRel); + } else if (entry.isFile()) { + copyFile(srcPath, dstPath); + } + } + } +} + +function requirePath(label, targetPath) { + // nosemgrep: javascript.pathtraversal.rule-non-literal-fs-filename -- targetPath pre-resolved from trusted roots + if (!existsSync(targetPath)) { + throw new Error(`${label} missing: ${targetPath}`); + } +} + +function writeManifest(files) { + const manifest = { + generated_utc: new Date().toISOString(), + runtime_root: normalize(path.relative(repoRoot, runtimeRoot)), + files, + }; + const outPath = resolveRuntime("manifest.json"); + // nosemgrep: javascript.pathtraversal.rule-non-literal-fs-filename -- outPath resolved inside runtime root + writeFileSync(outPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); +} + +function main() { + const apiRequirements = resolveRepo("apps", "api", "requirements.txt"); + const workerRequirements = resolveRepo("services", "worker", "requirements.txt"); + const mediaCorePackage = resolveRepo("packages", "media-core", "src", "media_core"); + const webDistIndex = path.resolve(webDist, "index.html"); + + requirePath("API requirements", apiRequirements); + requirePath("Worker requirements", workerRequirements); + requirePath("Media core package", mediaCorePackage); + requirePath("Web dist", webDist); + requirePath("Web dist index", webDistIndex); + + clearRuntimeDir(); + + const copies = [ + { + src: resolveRepo("apps", "api", "app"), + dst: resolveRuntime("apps", "api", "app"), + tree: true, + }, + { + src: apiRequirements, + dst: resolveRuntime("apps", "api", "requirements.txt"), + tree: false, + }, + { + src: resolveRepo("services", "worker"), + dst: resolveRuntime("services", "worker"), + tree: true, + }, + { + src: mediaCorePackage, + dst: resolveRuntime("packages", "media-core", "src", "media_core"), + tree: true, + }, + { + src: webDist, + dst: resolveRuntime("apps", "web", "dist"), + tree: true, + }, + ]; + + const copied = []; + for (const item of copies) { + requirePath("Source", item.src); + if (item.tree) { + copyTree(item.src, item.dst); + } else { + copyFile(item.src, item.dst); + } + copied.push({ + src: normalize(path.relative(repoRoot, item.src)), + dst: normalize(path.relative(repoRoot, item.dst)), + mode: item.tree ? "tree" : "file", + }); + } + + writeManifest(copied); + console.log(`Prepared desktop runtime resources at ${runtimeRoot}`); +} + +main(); + diff --git a/apps/desktop/src-tauri/.gitignore b/apps/desktop/src-tauri/.gitignore index 5373dd71..c48b2879 100644 --- a/apps/desktop/src-tauri/.gitignore +++ b/apps/desktop/src-tauri/.gitignore @@ -8,3 +8,7 @@ # Updater signing key (private) /keys/*.key + +# Generated desktop runtime bundle +/runtime/ +/Cargo.lock diff --git a/apps/desktop/src-tauri/src/lib.rs b/apps/desktop/src-tauri/src/lib.rs index 3d1a50b3..2f6bb32c 100644 --- a/apps/desktop/src-tauri/src/lib.rs +++ b/apps/desktop/src-tauri/src/lib.rs @@ -1,18 +1,54 @@ -use std::path::PathBuf; -use std::process::Command; +use std::env; +use std::ffi::OsString; +use std::fs; +use std::path::{Path, PathBuf}; +use std::process::{Child, Command}; +use std::sync::{Mutex, MutexGuard, OnceLock}; -fn find_compose_file() -> Result { - let mut current = std::env::current_dir().map_err(|e| format!("Unable to read current dir: {e}"))?; +use tauri::Manager; + +fn has_runtime_layout(root: &Path) -> bool { + root.join("apps") + .join("api") + .join("app") + .join("main.py") + .is_file() + && root + .join("packages") + .join("media-core") + .join("src") + .join("media_core") + .is_dir() +} + +fn runtime_root_from_env() -> Option { + let raw = env::var("REFRAME_DESKTOP_RUNTIME_ROOT").ok()?; + let candidate = PathBuf::from(raw); + if has_runtime_layout(&candidate) { + Some(candidate) + } else { + None + } +} + +fn find_repo_root() -> Result { + let mut current = env::current_dir().map_err(|e| format!("Unable to read current dir: {e}"))?; loop { - let candidate = current.join("infra").join("docker-compose.yml"); - if candidate.is_file() { - return Ok(candidate); + if has_runtime_layout(¤t) { + return Ok(current); } if !current.pop() { break; } } - Err("Could not locate infra/docker-compose.yml; run the desktop app from inside the repo checkout.".to_string()) + Err("Could not locate runtime root with apps/api/app/main.py; run desktop app from a repository checkout or package runtime resources.".to_string()) +} + +fn find_runtime_root() -> Result { + if let Some(root) = runtime_root_from_env() { + return Ok(root); + } + find_repo_root() } fn format_output(stdout: &[u8], stderr: &[u8]) -> String { @@ -30,90 +66,401 @@ fn format_output(stdout: &[u8], stderr: &[u8]) -> String { } fn run_checked(mut cmd: Command) -> Result { - let output = cmd.output().map_err(|e| format!("Command failed to start: {e}"))?; + let output = cmd + .output() + .map_err(|e| format!("Command failed to start: {e}"))?; let rendered = format_output(&output.stdout, &output.stderr); if output.status.success() { return Ok(rendered); } - let code = output.status.code().map(|c| c.to_string()).unwrap_or_else(|| "unknown".to_string()); + let code = output + .status + .code() + .map(|c| c.to_string()) + .unwrap_or_else(|| "unknown".to_string()); Err(format!("Command failed (exit {code})\n{rendered}")) } -fn docker_compose_unsupported(rendered: &str) -> bool { - let s = rendered.to_lowercase(); - s.contains("is not a docker command") - || s.contains("unknown command") - || s.contains("unknown shorthand flag") - || s.contains("unknown flag: --no-build") -} - -fn run_compose(args: &[&str]) -> Result { - let compose_path = find_compose_file()?; - let compose_dir = compose_path - .parent() - .ok_or_else(|| "Invalid compose file path".to_string())?; - - // Prefer `docker compose`, but fall back to the legacy `docker-compose` binary when necessary. - let docker_result = run_checked({ - let mut cmd = Command::new("docker"); - cmd.current_dir(compose_dir) - .arg("compose") - .arg("-f") - .arg(&compose_path) - .args(args); - cmd - }); - - match docker_result { - Ok(out) => Ok(out), - Err(err) => { - // If docker isn't installed, `run_checked` would have failed to start; in that case - // try `docker-compose` before returning the error. - let is_not_found = err.to_lowercase().contains("failed to start"); - if is_not_found || docker_compose_unsupported(&err) { - run_checked({ - let mut cmd = Command::new("docker-compose"); - cmd.current_dir(compose_dir).arg("-f").arg(&compose_path).args(args); - cmd - }) - } else { - Err(err) +fn candidate_python_binaries(runtime_root: &Path) -> Vec { + let mut candidates = Vec::new(); + if let Ok(explicit) = env::var("REFRAME_DESKTOP_PYTHON") { + let trimmed = explicit.trim(); + if !trimmed.is_empty() { + candidates.push(PathBuf::from(trimmed)); + } + } + + candidates.push( + runtime_root + .join(".venv") + .join("Scripts") + .join("python.exe"), + ); + candidates.push(runtime_root.join(".venv").join("bin").join("python")); + candidates.push(PathBuf::from("python")); + candidates.push(PathBuf::from("python3")); + + candidates +} + +fn resolve_host_python_binary(runtime_root: &Path) -> Result { + for candidate in candidate_python_binaries(runtime_root) { + if candidate.is_absolute() { + if candidate.is_file() { + return Ok(candidate); + } + continue; + } + + let mut cmd = Command::new(&candidate); + cmd.arg("--version"); + if cmd.output().is_ok() { + return Ok(candidate); + } + } + + Err( + "No usable Python runtime found. Install Python 3.11+ or set REFRAME_DESKTOP_PYTHON." + .to_string(), + ) +} + +fn pythonpath_for_runtime(runtime_root: &Path) -> Result { + let paths = vec![ + runtime_root.to_path_buf(), + runtime_root.join("apps").join("api"), + runtime_root.join("packages").join("media-core").join("src"), + ]; + env::join_paths(paths).map_err(|e| format!("Unable to assemble PYTHONPATH: {e}")) +} + +fn desktop_data_dir(runtime_root: &Path) -> Result { + if let Ok(raw) = env::var("REFRAME_DESKTOP_APP_DATA") { + let value = raw.trim(); + if !value.is_empty() { + let path = PathBuf::from(value); + fs::create_dir_all(&path) + .map_err(|e| format!("Unable to create desktop data dir {path:?}: {e}"))?; + return Ok(path); + } + } + + let fallback = runtime_root.join(".desktop-runtime"); + fs::create_dir_all(&fallback) + .map_err(|e| format!("Unable to create desktop data dir {fallback:?}: {e}"))?; + Ok(fallback) +} + +fn venv_dir(runtime_root: &Path) -> Result { + Ok(desktop_data_dir(runtime_root)?.join("venv")) +} + +fn venv_python(venv_dir: &Path) -> PathBuf { + if cfg!(target_os = "windows") { + venv_dir.join("Scripts").join("python.exe") + } else { + venv_dir.join("bin").join("python") + } +} + +fn runtime_requirement_files(runtime_root: &Path) -> Result<(PathBuf, PathBuf), String> { + let req_api = runtime_root + .join("apps") + .join("api") + .join("requirements.txt"); + let req_worker = runtime_root + .join("services") + .join("worker") + .join("requirements.txt"); + if !req_api.is_file() { + return Err(format!( + "Missing runtime requirement file: {}", + req_api.display() + )); + } + if !req_worker.is_file() { + return Err(format!( + "Missing runtime requirement file: {}", + req_worker.display() + )); + } + Ok((req_api, req_worker)) +} + +fn create_runtime_venv_if_missing( + host_python: &Path, + venv: &Path, + python: &Path, +) -> Result<(), String> { + if python.is_file() { + return Ok(()); + } + let mut create_cmd = Command::new(host_python); + create_cmd.arg("-m").arg("venv").arg(venv); + run_checked(create_cmd)?; + Ok(()) +} + +fn install_runtime_requirements( + python: &Path, + req_api: &Path, + req_worker: &Path, +) -> Result<(), String> { + let mut pip_upgrade = Command::new(python); + pip_upgrade + .arg("-m") + .arg("pip") + .arg("install") + .arg("--upgrade") + .arg("pip"); + run_checked(pip_upgrade)?; + + let mut install = Command::new(python); + install + .arg("-m") + .arg("pip") + .arg("install") + .arg("-r") + .arg(req_api) + .arg("-r") + .arg(req_worker) + .env("PIP_DISABLE_PIP_VERSION_CHECK", "1"); + run_checked(install)?; + Ok(()) +} + +fn mark_runtime_ready(marker: &Path) -> Result<(), String> { + fs::write(marker, "ready\n").map_err(|e| { + format!( + "Unable to write runtime readiness marker {}: {e}", + marker.display() + ) + }) +} + +fn runtime_venv_ready(python: &Path, marker: &Path) -> bool { + python.is_file() && marker.is_file() +} + +fn bootstrap_runtime_venv(runtime_root: &Path, python: &Path, marker: &Path) -> Result<(), String> { + let venv = venv_dir(runtime_root)?; + let host_python = resolve_host_python_binary(runtime_root)?; + create_runtime_venv_if_missing(&host_python, &venv, python)?; + + let (req_api, req_worker) = runtime_requirement_files(runtime_root)?; + install_runtime_requirements(python, &req_api, &req_worker)?; + mark_runtime_ready(marker) +} + +fn ensure_runtime_venv(runtime_root: &Path) -> Result { + let venv = venv_dir(runtime_root)?; + let python = venv_python(&venv); + let marker = venv.join(".reframe_runtime_ready"); + + if runtime_venv_ready(&python, &marker) { + return Ok(python); + } + + bootstrap_runtime_venv(runtime_root, &python, &marker)?; + Ok(python) +} + +fn desktop_web_dist(runtime_root: &Path) -> Option { + let candidate = runtime_root.join("apps").join("web").join("dist"); + if candidate.join("index.html").is_file() { + Some(candidate) + } else { + None + } +} + +#[derive(Default)] +struct RuntimeState { + api: Option, +} + +static RUNTIME_STATE: OnceLock> = OnceLock::new(); + +fn api_is_running(state: &mut RuntimeState) -> Result { + if let Some(child) = state.api.as_mut() { + match child + .try_wait() + .map_err(|e| format!("Failed to inspect API process: {e}"))? + { + Some(_) => { + state.api = None; + Ok(false) } + None => Ok(true), } + } else { + Ok(false) } } +fn prepare_local_runtime() -> Result { + let runtime_root = find_runtime_root()?; + let python = ensure_runtime_venv(&runtime_root)?; + + let mut verify = Command::new(&python); + verify.arg("-c").arg("import fastapi,uvicorn"); + run_checked(verify)?; + + Ok(format!( + "local runtime dependencies ready\nroot: {}\npython: {}", + runtime_root.display(), + python.display() + )) +} + +fn running_runtime_pid(guard: &mut RuntimeState) -> Result, String> { + if api_is_running(guard)? { + let pid = guard.api.as_ref().map(|c| c.id()).unwrap_or_default(); + return Ok(Some(pid)); + } + Ok(None) +} + +fn ensure_media_root(runtime_root: &Path) -> Result { + let app_data = desktop_data_dir(runtime_root)?; + let media_root = app_data.join("media"); + fs::create_dir_all(&media_root).map_err(|e| { + format!( + "Unable to create desktop media root {}: {e}", + media_root.display() + ) + })?; + Ok(media_root) +} + +fn build_runtime_command( + runtime_root: &Path, + python: &Path, + pythonpath: OsString, + media_root: &Path, +) -> Command { + let mut cmd = Command::new(python); + cmd.current_dir(runtime_root) + .arg("-m") + .arg("uvicorn") + .arg("--factory") + .arg("app.main:create_app") + .arg("--host") + .arg("127.0.0.1") + .arg("--port") + .arg("8000") + .env("PYTHONPATH", pythonpath) + .env("REFRAME_LOCAL_QUEUE_MODE", "true") + .env("BROKER_URL", "memory://") + .env("RESULT_BACKEND", "cache+memory://") + .env("REFRAME_API_BASE_URL", "http://localhost:8000") + .env("REFRAME_APP_BASE_URL", "http://localhost:8000") + .env("REFRAME_MEDIA_ROOT", media_root); + + if let Some(web_dist) = desktop_web_dist(runtime_root) { + cmd.env("REFRAME_DESKTOP_WEB_DIST", web_dist); + } + cmd +} +fn runtime_state_guard() -> Result, String> { + RUNTIME_STATE + .get_or_init(|| Mutex::new(RuntimeState::default())) + .lock() + .map_err(|_| "Runtime state lock poisoned".to_string()) +} + +fn spawn_local_runtime( + runtime_root: &Path, + python: &Path, + pythonpath: OsString, +) -> Result { + let media_root = ensure_media_root(runtime_root)?; + let mut cmd = build_runtime_command(runtime_root, python, pythonpath, &media_root); + cmd.spawn() + .map_err(|e| format!("Failed to start local runtime API process: {e}")) +} + +fn start_local_runtime() -> Result { + let runtime_root = find_runtime_root()?; + let python = ensure_runtime_venv(&runtime_root)?; + let pythonpath = pythonpath_for_runtime(&runtime_root)?; + + let mut guard = runtime_state_guard()?; + if let Some(pid) = running_runtime_pid(&mut guard)? { + return Ok(format!("local runtime already running (api pid {pid})")); + } + + let child = spawn_local_runtime(&runtime_root, &python, pythonpath)?; + let pid = child.id(); + guard.api = Some(child); + Ok(format!("local runtime started (api pid {pid})")) +} + +fn stop_local_runtime() -> Result { + let mut guard = runtime_state_guard()?; + if let Some(mut child) = guard.api.take() { + let pid = child.id(); + if let Some(status) = child + .try_wait() + .map_err(|e| format!("Failed to inspect local runtime API process {pid}: {e}"))? + { + return Ok(format!( + "local runtime already stopped (api pid {pid}, status {status})" + )); + } + child + .kill() + .map_err(|e| format!("Failed to stop local runtime API process {pid}: {e}"))?; + let _ = child.wait(); + return Ok(format!("local runtime stopped (api pid {pid})")); + } + Ok("local runtime is not running".to_string()) +} +fn local_runtime_status() -> Result { + let mut guard = runtime_state_guard()?; + if api_is_running(&mut guard)? { + let pid = guard.api.as_ref().map(|c| c.id()).unwrap_or_default(); + return Ok(format!("api running (pid {pid})\nqueue mode: local")); + } + Ok("api stopped\nqueue mode: local".to_string()) +} + +#[tauri::command] +fn runtime_prepare() -> Result { + prepare_local_runtime() +} + #[tauri::command] fn docker_version() -> Result { - let mut cmd = Command::new("docker"); + let runtime_root = find_runtime_root()?; + let python = ensure_runtime_venv(&runtime_root)?; + let mut cmd = Command::new(python); cmd.arg("--version"); - run_checked(cmd) + let version = run_checked(cmd)?; + Ok(format!( + "{version}\nmode: local runtime (no docker required)" + )) } #[tauri::command] fn compose_file_path() -> Result { - Ok(find_compose_file()?.display().to_string()) + Ok(find_runtime_root()?.display().to_string()) } #[tauri::command] fn compose_ps() -> Result { - run_compose(&["ps"]) + local_runtime_status() } #[tauri::command] fn compose_up(build: Option) -> Result { - let mut args = vec!["up", "-d", "--remove-orphans"]; - if build.unwrap_or(true) { - args.push("--build"); - } else { - args.push("--no-build"); - } - run_compose(&args) + let _ = build; + start_local_runtime() } #[tauri::command] fn compose_down() -> Result { - run_compose(&["down"]) + stop_local_runtime() } #[cfg_attr(mobile, tauri::mobile_entry_point)] @@ -125,9 +472,22 @@ pub fn run() { #[cfg(desktop)] app.handle() .plugin(tauri_plugin_updater::Builder::new().build())?; + + if let Ok(resource_dir) = app.path().resource_dir() { + let runtime_root = resource_dir.join("runtime"); + if has_runtime_layout(&runtime_root) { + env::set_var("REFRAME_DESKTOP_RUNTIME_ROOT", runtime_root); + } + } + + if let Ok(data_dir) = app.path().app_data_dir() { + let _ = fs::create_dir_all(&data_dir); + env::set_var("REFRAME_DESKTOP_APP_DATA", data_dir); + } Ok(()) }) .invoke_handler(tauri::generate_handler![ + runtime_prepare, docker_version, compose_file_path, compose_ps, @@ -137,3 +497,6 @@ pub fn run() { .run(tauri::generate_context!()) .expect("error while running tauri application"); } + +#[cfg(test)] +mod lib_tests; diff --git a/apps/desktop/src-tauri/src/lib_tests.rs b/apps/desktop/src-tauri/src/lib_tests.rs new file mode 100644 index 00000000..92aa9b57 --- /dev/null +++ b/apps/desktop/src-tauri/src/lib_tests.rs @@ -0,0 +1,472 @@ +use super::*; +use std::env; +use std::fs; +use std::path::Path; +use std::sync::{Mutex, MutexGuard, OnceLock}; +use std::time::{SystemTime, UNIX_EPOCH}; + +fn env_lock() -> MutexGuard<'static, ()> { + static ENV_LOCK: OnceLock> = OnceLock::new(); + match ENV_LOCK.get_or_init(|| Mutex::new(())).lock() { + Ok(guard) => guard, + Err(poisoned) => poisoned.into_inner(), + } +} + +fn unique_temp_dir(prefix: &str) -> PathBuf { + let mut dir = if cfg!(target_os = "windows") { + env::var_os("TEMP") + .map(PathBuf::from) + .unwrap_or_else(|| PathBuf::from("C:/reframe-test-tmp")) + } else { + env::var_os("TMPDIR") + .map(PathBuf::from) + .unwrap_or_else(|| PathBuf::from("/tmp/reframe-test-tmp")) + }; + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time went backwards") + .as_nanos(); + dir.push("reframe-desktop-tests"); + dir.push(format!("{prefix}-{now}")); + fs::create_dir_all(&dir).expect("failed to create temp dir"); + dir +} + +fn write_file(path: &Path, content: &str) { + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).expect("failed to create parent dir"); + } + fs::write(path, content).expect("failed to write file"); +} + +#[test] +fn has_runtime_layout_checks_expected_tree() { + let root = unique_temp_dir("reframe-runtime-layout"); + assert!(!has_runtime_layout(&root)); + + write_file(&root.join("apps").join("api").join("app").join("main.py"), "pass\n"); + fs::create_dir_all( + root.join("packages") + .join("media-core") + .join("src") + .join("media_core"), + ) + .expect("failed to create media_core dir"); + + assert!(has_runtime_layout(&root)); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn format_output_combines_stdout_and_stderr() { + let rendered = format_output(b"hello", b"warn"); + assert_eq!(rendered, "hello\nwarn"); + assert_eq!(format_output(b"", b""), ""); +} + +#[test] +fn candidate_python_binaries_honors_explicit_env() { + let _env_guard = env_lock(); + let root = unique_temp_dir("reframe-python-candidates"); + env::set_var("REFRAME_DESKTOP_PYTHON", "custom-python"); + let candidates = candidate_python_binaries(&root); + assert_eq!(candidates.first().and_then(|p| p.to_str()), Some("custom-python")); + env::remove_var("REFRAME_DESKTOP_PYTHON"); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn pythonpath_for_runtime_contains_api_and_media_core_paths() { + let root = unique_temp_dir("reframe-pythonpath"); + let joined = pythonpath_for_runtime(&root).expect("pythonpath assembly failed"); + let paths: Vec = env::split_paths(&joined).collect(); + + assert!(paths.contains(&root)); + assert!(paths.contains(&root.join("apps").join("api"))); + assert!(paths.contains(&root.join("packages").join("media-core").join("src"))); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn desktop_web_dist_detects_index_file() { + let root = unique_temp_dir("reframe-web-dist"); + assert!(desktop_web_dist(&root).is_none()); + + let dist = root.join("apps").join("web").join("dist"); + write_file(&dist.join("index.html"), ""); + assert_eq!(desktop_web_dist(&root), Some(dist)); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn runtime_venv_helpers_resolve_expected_paths() { + let _env_guard = env_lock(); + let root = unique_temp_dir("reframe-venv"); + env::set_var("REFRAME_DESKTOP_APP_DATA", root.join("data")); + + let data = desktop_data_dir(&root).expect("desktop data dir"); + assert!(data.is_dir()); + + let venv = venv_dir(&root).expect("venv dir"); + let python = venv_python(&venv); + let marker = venv.join(".reframe_runtime_ready"); + assert!(!runtime_venv_ready(&python, &marker)); + + write_file(&python, ""); + write_file(&marker, "ready\n"); + assert!(runtime_venv_ready(&python, &marker)); + + env::remove_var("REFRAME_DESKTOP_APP_DATA"); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn build_runtime_command_sets_local_queue_defaults() { + let root = unique_temp_dir("reframe-runtime-cmd"); + let python = PathBuf::from("python"); + let py_path = pythonpath_for_runtime(&root).expect("pythonpath"); + let media_root = root.join("media"); + fs::create_dir_all(&media_root).expect("media root create"); + + let cmd = build_runtime_command(&root, &python, py_path, &media_root); + let args: Vec = cmd + .get_args() + .map(|arg| arg.to_string_lossy().to_string()) + .collect(); + + assert!(args.contains(&"--factory".to_string())); + assert!(args.contains(&"app.main:create_app".to_string())); + assert!(args.contains(&"--port".to_string())); + assert!(args.contains(&"8000".to_string())); + + let envs: Vec<(String, String)> = cmd + .get_envs() + .filter_map(|(k, v)| Some((k.to_string_lossy().to_string(), v?.to_string_lossy().to_string()))) + .collect(); + + let find = |key: &str| envs.iter().find(|(k, _)| k == key).map(|(_, v)| v.clone()); + assert_eq!(find("REFRAME_LOCAL_QUEUE_MODE"), Some("true".to_string())); + assert_eq!(find("BROKER_URL"), Some("memory://".to_string())); + assert_eq!(find("RESULT_BACKEND"), Some("cache+memory://".to_string())); + assert_eq!(find("REFRAME_MEDIA_ROOT"), Some(media_root.to_string_lossy().to_string())); + + let _ = fs::remove_dir_all(root); +} + +#[test] +fn local_runtime_status_reports_stopped_when_no_child() { + let status = local_runtime_status().expect("local runtime status"); + assert!(status.contains("api stopped")); + assert!(status.contains("queue mode: local")); +} + +#[test] +fn runtime_root_from_env_rejects_invalid_layout() { + let _env_guard = env_lock(); + let root = unique_temp_dir("reframe-runtime-root-env"); + env::set_var("REFRAME_DESKTOP_RUNTIME_ROOT", &root); + assert!(runtime_root_from_env().is_none()); + + write_file(&root.join("apps").join("api").join("app").join("main.py"), "pass\n"); + fs::create_dir_all( + root.join("packages") + .join("media-core") + .join("src") + .join("media_core"), + ) + .expect("failed to create media_core dir"); + + let resolved = runtime_root_from_env(); + assert_eq!(resolved, Some(root.clone())); + env::remove_var("REFRAME_DESKTOP_RUNTIME_ROOT"); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn run_checked_handles_success_and_failure() { + let ok = if cfg!(target_os = "windows") { + let mut cmd = Command::new("cmd"); + cmd.args(["/C", "echo ok"]); + cmd + } else { + let mut cmd = Command::new("sh"); + cmd.args(["-c", "echo ok"]); + cmd + }; + let output = run_checked(ok).expect("expected command success"); + assert!(output.contains("ok")); + + let bad = if cfg!(target_os = "windows") { + let mut cmd = Command::new("cmd"); + cmd.args(["/C", "exit 7"]); + cmd + } else { + let mut cmd = Command::new("sh"); + cmd.args(["-c", "exit 7"]); + cmd + }; + let err = run_checked(bad).expect_err("expected non-zero command to fail"); + assert!(err.contains("exit")); +} + +#[test] +fn runtime_requirement_files_require_both_manifests() { + let root = unique_temp_dir("reframe-runtime-reqs"); + let missing = runtime_requirement_files(&root).expect_err("missing requirements should fail"); + assert!(missing.contains("requirements.txt")); + + write_file( + &root.join("apps").join("api").join("requirements.txt"), + "fastapi==0.0\n", + ); + let missing_worker = runtime_requirement_files(&root).expect_err("worker requirements should still be missing"); + assert!(missing_worker.contains("services")); + + write_file( + &root.join("services").join("worker").join("requirements.txt"), + "celery==0.0\n", + ); + let both = runtime_requirement_files(&root).expect("both requirement files should be discovered"); + assert!(both.0.is_file()); + assert!(both.1.is_file()); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn ensure_media_root_uses_desktop_data_dir() { + let _env_guard = env_lock(); + let root = unique_temp_dir("reframe-media-root"); + env::set_var("REFRAME_DESKTOP_APP_DATA", root.join("data")); + let media = ensure_media_root(&root).expect("media root creation should succeed"); + assert!(media.is_dir()); + assert!(media.ends_with("media")); + env::remove_var("REFRAME_DESKTOP_APP_DATA"); + let _ = fs::remove_dir_all(root); +} +#[test] +fn find_repo_root_detects_ancestor_layout() { + let _env_guard = env_lock(); + let root = unique_temp_dir("reframe-find-root"); + write_file(&root.join("apps").join("api").join("app").join("main.py"), "pass\n"); + fs::create_dir_all( + root.join("packages") + .join("media-core") + .join("src") + .join("media_core"), + ) + .expect("failed to create media_core dir"); + + let nested = root.join("apps").join("api"); + fs::create_dir_all(&nested).expect("nested dir create"); + + let previous = env::current_dir().expect("current dir"); + env::set_current_dir(&nested).expect("set current dir"); + let found = find_repo_root().expect("expected repo root from ancestor search"); + assert_eq!(found, root); + env::set_current_dir(previous).expect("restore current dir"); + + let _ = fs::remove_dir_all(found); +} + +#[test] +fn find_runtime_root_prefers_explicit_env_layout() { + let _env_guard = env_lock(); + let root = unique_temp_dir("reframe-runtime-env"); + write_file(&root.join("apps").join("api").join("app").join("main.py"), "pass\n"); + fs::create_dir_all( + root.join("packages") + .join("media-core") + .join("src") + .join("media_core"), + ) + .expect("failed to create media_core dir"); + + env::set_var("REFRAME_DESKTOP_RUNTIME_ROOT", &root); + let found = find_runtime_root().expect("runtime root from env"); + assert_eq!(found, root); + env::remove_var("REFRAME_DESKTOP_RUNTIME_ROOT"); + let _ = fs::remove_dir_all(found); +} + +#[test] +fn mark_runtime_ready_writes_marker_file() { + let root = unique_temp_dir("reframe-runtime-marker"); + let marker = root.join("ready.marker"); + mark_runtime_ready(&marker).expect("marker write should succeed"); + let payload = fs::read_to_string(&marker).expect("marker read"); + assert_eq!(payload.trim(), "ready"); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn create_runtime_venv_if_missing_respects_existing_python_binary() { + let root = unique_temp_dir("reframe-existing-venv"); + let venv = root.join("venv"); + let python = venv.join(if cfg!(target_os = "windows") { + "Scripts/python.exe" + } else { + "bin/python" + }); + write_file(&python, ""); + + let host = if cfg!(target_os = "windows") { + PathBuf::from("cmd") + } else { + PathBuf::from("sh") + }; + + create_runtime_venv_if_missing(&host, &venv, &python).expect("existing python should short-circuit"); + assert!(python.is_file()); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn api_is_running_clears_finished_child_state() { + let mut state = RuntimeState::default(); + let child = if cfg!(target_os = "windows") { + let mut cmd = Command::new("cmd"); + cmd.args(["/C", "exit 0"]); + cmd.spawn().expect("spawn child") + } else { + let mut cmd = Command::new("sh"); + cmd.args(["-c", "exit 0"]); + cmd.spawn().expect("spawn child") + }; + + state.api = Some(child); + + std::thread::sleep(std::time::Duration::from_millis(50)); + let running = api_is_running(&mut state).expect("api_is_running should succeed"); + assert!(!running); + assert!(state.api.is_none()); +} + +#[test] +fn api_is_running_reports_true_for_active_child() { + let mut state = RuntimeState::default(); + let child = if cfg!(target_os = "windows") { + let mut cmd = Command::new("cmd"); + cmd.args(["/C", "ping -n 3 127.0.0.1 >NUL"]); + cmd.spawn().expect("spawn child") + } else { + let mut cmd = Command::new("sh"); + cmd.args(["-c", "sleep 1"]); + cmd.spawn().expect("spawn child") + }; + state.api = Some(child); + + let running = api_is_running(&mut state).expect("api_is_running should succeed"); + assert!(running); + let _ = stop_local_runtime(); +} + +#[test] +fn resolve_host_python_binary_handles_absolute_and_path_failure() { + let _env_guard = env_lock(); + let root = unique_temp_dir("reframe-resolve-python"); + let explicit = root.join("python-explicit"); + write_file(&explicit, "placeholder"); + + env::set_var("REFRAME_DESKTOP_PYTHON", &explicit); + let resolved = resolve_host_python_binary(&root).expect("explicit absolute python path"); + assert_eq!(resolved, explicit); + + env::set_var("REFRAME_DESKTOP_PYTHON", root.join("missing-python")); + let old_path = env::var("PATH").unwrap_or_default(); + env::set_var("PATH", ""); + let err = resolve_host_python_binary(&root).expect_err("missing python candidates should fail"); + assert!(err.contains("No usable Python runtime found")); + env::set_var("PATH", old_path); + env::remove_var("REFRAME_DESKTOP_PYTHON"); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn desktop_data_dir_falls_back_when_env_blank() { + let _env_guard = env_lock(); + let root = unique_temp_dir("reframe-desktop-data-fallback"); + env::set_var("REFRAME_DESKTOP_APP_DATA", " "); + let data = desktop_data_dir(&root).expect("fallback desktop data dir"); + assert!(data.ends_with(".desktop-runtime")); + assert!(data.is_dir()); + env::remove_var("REFRAME_DESKTOP_APP_DATA"); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn create_runtime_venv_if_missing_returns_spawn_error_for_missing_host_binary() { + let root = unique_temp_dir("reframe-venv-missing-host"); + let venv = root.join("venv"); + let python = venv_python(&venv); + let missing_host = root.join("missing-host-python"); + + let err = create_runtime_venv_if_missing(&missing_host, &venv, &python) + .expect_err("missing host python must fail"); + assert!(err.contains("Command failed to start")); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn install_runtime_requirements_reports_command_failure() { + let root = unique_temp_dir("reframe-install-runtime-req-fail"); + let missing_python = root.join("missing-python"); + let req_api = root.join("api-req.txt"); + let req_worker = root.join("worker-req.txt"); + write_file(&req_api, "fastapi\n"); + write_file(&req_worker, "celery\n"); + + let err = install_runtime_requirements(&missing_python, &req_api, &req_worker) + .expect_err("missing python binary should fail pip install"); + assert!(err.contains("Command failed to start")); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn command_wrappers_fail_closed_when_runtime_root_missing() { + let _env_guard = env_lock(); + let root = unique_temp_dir("reframe-command-wrapper-missing-root"); + let previous = env::current_dir().expect("current dir"); + env::set_current_dir(&root).expect("switch to isolated cwd"); + env::set_var("REFRAME_DESKTOP_RUNTIME_ROOT", root.join("missing-runtime")); + + let prep_err = runtime_prepare().expect_err("runtime_prepare must fail without runtime root"); + assert!(prep_err.contains("Could not locate runtime root")); + + let docker_err = docker_version().expect_err("docker_version wrapper must fail without runtime root"); + assert!(docker_err.contains("Could not locate runtime root")); + + let compose_path_err = compose_file_path().expect_err("compose_file_path must fail without runtime root"); + assert!(compose_path_err.contains("Could not locate runtime root")); + + let up_err = compose_up(Some(true)).expect_err("compose_up must fail without runtime root"); + assert!(up_err.contains("Could not locate runtime root")); + + let ps = compose_ps().expect("compose_ps fallback status"); + assert!(ps.contains("queue mode: local")); + let down = compose_down().expect("compose_down fallback status"); + assert!(down.contains("not running")); + + env::remove_var("REFRAME_DESKTOP_RUNTIME_ROOT"); + env::set_current_dir(previous).expect("restore current dir"); + let _ = fs::remove_dir_all(root); +} + +#[test] +fn stop_local_runtime_stops_active_child() { + let mut guard = runtime_state_guard().expect("runtime lock"); + let child = if cfg!(target_os = "windows") { + let mut cmd = Command::new("cmd"); + cmd.args(["/C", "ping -n 5 127.0.0.1 >NUL"]); + cmd.spawn().expect("spawn active child") + } else { + let mut cmd = Command::new("sh"); + cmd.args(["-c", "sleep 5"]); + cmd.spawn().expect("spawn active child") + }; + guard.api = Some(child); + drop(guard); + + let out = stop_local_runtime().expect("stop local runtime"); + assert!(out.contains("stopped")); +} diff --git a/apps/desktop/src-tauri/src/main.rs b/apps/desktop/src-tauri/src/main.rs index bea3c230..e0d529cd 100644 --- a/apps/desktop/src-tauri/src/main.rs +++ b/apps/desktop/src-tauri/src/main.rs @@ -1,6 +1,18 @@ // Prevents additional console window on Windows in release, DO NOT REMOVE!! #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] +#[cfg(not(test))] fn main() { desktop_lib::run() } + +#[cfg(test)] +fn main() {} + +#[cfg(test)] +mod tests { + #[test] + fn main_is_noop_under_tests() { + super::main(); + } +} \ No newline at end of file diff --git a/apps/desktop/src-tauri/tauri.conf.json b/apps/desktop/src-tauri/tauri.conf.json index acc4e93e..5e949fbc 100644 --- a/apps/desktop/src-tauri/tauri.conf.json +++ b/apps/desktop/src-tauri/tauri.conf.json @@ -32,6 +32,9 @@ "icons/128x128@2x.png", "icons/icon.icns", "icons/icon.ico" + ], + "resources": [ + "runtime" ] }, "plugins": { @@ -43,3 +46,4 @@ } } } + diff --git a/apps/desktop/src/main.test.ts b/apps/desktop/src/main.test.ts index e17ee345..a19b0111 100644 --- a/apps/desktop/src/main.test.ts +++ b/apps/desktop/src/main.test.ts @@ -23,12 +23,14 @@ vi.mock("@tauri-apps/plugin-opener", () => ({ openUrl: openUrlMock })); vi.mock("@tauri-apps/plugin-process", () => ({ relaunch: relaunchMock })); vi.mock("@tauri-apps/plugin-updater", () => ({ check: checkMock })); -const UI_URL = "http://localhost:5173"; +const UI_URL = "http://localhost:8000"; const RELEASES_URL = "https://github.com/Prekzursil/Reframe/releases"; +const DOCS_URL = "http://localhost:8000/docs"; const LATEST_JSON_URL = "https://github.com/Prekzursil/Reframe/releases/latest/download/latest.json"; const htmlFixture = ` + @@ -51,6 +53,9 @@ const htmlFixture = ` + + + `; type RuntimeState = { @@ -69,6 +74,7 @@ const state: RuntimeState = { invokeValues: { compose_file_path: "/tmp/compose.yml", docker_version: "Docker 28.3.3", + runtime_prepare: "runtime ready", compose_ps: "api up\nworker up", compose_up: "compose up ok", compose_down: "compose down ok", @@ -129,6 +135,7 @@ function resetState() { state.invokeValues = { compose_file_path: "/tmp/compose.yml", docker_version: "Docker 28.3.3", + runtime_prepare: "runtime ready", compose_ps: "api up\nworker up", compose_up: "compose up ok", compose_down: "compose down ok", @@ -244,6 +251,7 @@ describe("desktop main app", () => { expect(document.getElementById("compose-path")?.textContent).toBe("/tmp/compose.yml"); expect(document.getElementById("updater-manifest")?.textContent).toBe(LATEST_JSON_URL); + state.invokeValues.compose_ps = "api running (pid 42)\\nqueue mode: local"; await click("btn-open-ui"); await click("btn-latest-json"); await click("btn-releases"); @@ -251,6 +259,16 @@ describe("desktop main app", () => { expect(openUrlMock).toHaveBeenCalledWith(UI_URL); expect(openUrlMock).toHaveBeenCalledWith(LATEST_JSON_URL); expect(openUrlMock).toHaveBeenCalledWith(RELEASES_URL); + state.invokeValues.compose_ps = "api up\\nworker up"; + await appModule.__test.openProductExperience(); + expect(invokeMock).toHaveBeenCalledWith("compose_up", { build: true }); + state.invokeValues.compose_ps = ""; + await appModule.__test.openProductExperience(); + + state.invokeFailures.add("compose_ps"); + await appModule.__test.openProductExperience(); + expect(openUrlMock).toHaveBeenCalledWith(DOCS_URL); + expect(document.getElementById("log")?.textContent ?? "").toContain("Unable to prepare Studio launch"); }); it("runs start/stop commands and click handlers", async () => { @@ -276,6 +294,115 @@ describe("desktop main app", () => { expect(document.getElementById("log")?.textContent ?? "").toContain("compose_up failed"); expect(document.getElementById("log")?.textContent ?? "").toContain("compose_down failed"); }); + it("handles runtime_prepare success and prepare button wiring", async () => { + await appModule.__test.prepareRuntime(); + expect(document.getElementById("log")?.textContent ?? "").toContain("runtime ready"); + expect(document.getElementById("step-runtime")?.textContent).toBe("ready"); + + await click("btn-prepare"); + expect(invokeMock).toHaveBeenCalledWith("runtime_prepare"); + }); + + it("handles runtime_prepare Error failures", async () => { + state.invokeFailures.add("runtime_prepare"); + await appModule.__test.prepareRuntime(); + + expect(document.getElementById("step-runtime")?.textContent).toBe("failed"); + expect(document.getElementById("log")?.textContent ?? "").toContain("runtime_prepare failed"); + }); + + it("handles runtime_prepare blank output fallback message", async () => { + state.invokeValues.runtime_prepare = " "; + await appModule.__test.prepareRuntime(); + + expect(document.getElementById("log")?.textContent ?? "").toContain( + "Runtime dependencies ready.", + ); + expect(document.getElementById("step-runtime")?.textContent).toBe("ready"); + }); + + it("handles runtime_prepare string-error branch", async () => { + invokeMock.mockImplementation(async (command: string) => { + if (command === "runtime_prepare") { + throw "prepare string failure"; + } + return state.invokeValues[command] ?? ""; + }); + + await appModule.__test.prepareRuntime(); + expect(document.getElementById("step-runtime")?.textContent).toBe("failed"); + expect(document.getElementById("log")?.textContent ?? "").toContain("prepare string failure"); + }); + + it("covers non-Error and empty-output runtime branches", async () => { + const firstHandlers: Record string | never> = { + docker_version: () => { + throw "docker unavailable"; + }, + compose_ps: () => { + throw "status unavailable"; + }, + compose_up: () => " ", + compose_down: () => "", + }; + + invokeMock.mockImplementation(async (command: string) => { + const handler = firstHandlers[command]; + if (handler) { + return handler(); + } + return state.invokeValues[command] ?? ""; + }); + + await appModule.__test.refresh(); + expect(document.getElementById("docker-version")?.textContent).toBe("not available"); + expect(document.getElementById("status")?.textContent).toBe("status unavailable"); + + await appModule.__test.start(true); + await appModule.__test.stop(); + + const log = document.getElementById("log")?.textContent ?? ""; + expect(log).toContain("OK"); + + const secondHandlers: Record string | never> = { + compose_up: () => { + throw "runtime failed"; + }, + compose_down: () => { + throw "runtime failed"; + }, + }; + + invokeMock.mockImplementation(async (command: string) => { + const handler = secondHandlers[command]; + if (handler) { + return handler(); + } + return state.invokeValues[command] ?? ""; + }); + + await appModule.__test.start(true); + await appModule.__test.stop(); + expect(document.getElementById("log")?.textContent ?? "").toContain("runtime failed"); + }); + + it("handles updater started events with unknown content length", async () => { + checkMock.mockResolvedValueOnce({ + currentVersion: "0.1.8", + version: "0.1.9", + downloadAndInstall: async (onEvent: (evt: any) => void) => { + onEvent({ event: "Started", data: {} }); + onEvent({ event: "Finished", data: {} }); + }, + }); + state.confirmQueue.push(true); + + await appModule.__test.checkUpdates(); + + const log = document.getElementById("log")?.textContent ?? ""; + expect(log).toContain("unknown bytes"); + expect(log).toContain("Download finished."); + }); it("falls back when refresh dependencies fail", async () => { state.invokeFailures.clear(); @@ -294,6 +421,44 @@ describe("desktop main app", () => { expect(document.getElementById("system-status")?.textContent).toContain("Diagnostics unavailable"); }); + it("handles falsey diagnostics payload branches", async () => { + state.invokeValues.compose_ps = " "; + state.invokeValues.docker_version = " runtime "; + getVersionMock.mockResolvedValueOnce(" "); + state.fetchQueue.push( + makeResponse(200, { + offline_mode: false, + storage_backend: null, + worker: { + ping_ok: false, + system_info: { + ffmpeg: { + present: false, + }, + }, + }, + }), + ); + + await appModule.__test.refresh(); + + expect(document.getElementById("app-version")?.textContent).toBe("unknown"); + expect(document.getElementById("worker-ping")?.textContent).toBe("no response"); + expect(document.getElementById("ffmpeg")?.textContent).toBe("missing"); + expect(document.getElementById("status")?.textContent).toContain("(no output)"); + expect(document.getElementById("step-worker")?.textContent).toBe("no response"); + }); + + it("tolerates missing onboarding chips when refreshing diagnostics", async () => { + document.getElementById("step-runtime")?.remove(); + document.getElementById("step-api")?.remove(); + document.getElementById("step-worker")?.remove(); + + await appModule.__test.refreshDiagnostics(); + + expect(document.getElementById("offline-mode")?.textContent).toBe("true"); + }); + it("handles updater paths: no-update, cancel, install, and failure", async () => { state.updateMode = "none"; await appModule.__test.checkUpdates(); @@ -366,6 +531,134 @@ describe("desktop main app", () => { await click("btn-copy-debug"); }); + + it("covers collectDebugInfo unknown fallbacks and empty-ui branches", async () => { + state.updateMode = "none"; + await appModule.__test.checkUpdates(); + await appModule.__test.refreshDiagnostics(); + + document.getElementById("status")!.textContent = " "; + document.getElementById("log")!.textContent = " "; + + getNameMock.mockResolvedValueOnce(" "); + getVersionMock.mockResolvedValueOnce(" "); + getTauriVersionMock.mockResolvedValueOnce(" "); + getIdentifierMock.mockResolvedValueOnce(" "); + state.invokeValues.compose_ps = " "; + + state.fetchQueue.push(makeResponse(503, { message: "not-ok" }, "Service Unavailable")); + + const debug = await appModule.__test.collectDebugInfo(); + expect(debug).toContain("app_name: unknown"); + expect(debug).toContain("app_version: unknown"); + expect(debug).toContain("tauri_version: unknown"); + expect(debug).toContain("identifier: unknown"); + expect(debug).toContain("compose_ps: (empty)"); + expect(debug).toContain("system_status_http: 503 Service Unavailable"); + expect(debug).not.toContain("last_updater_error:"); + expect(debug).not.toContain("last_diagnostics_error:"); + expect(debug).not.toContain("ui_compose_status:"); + expect(debug).not.toContain("ui_log:"); + }); + + it("covers refresh diagnostics nullish paths and string throw branches", async () => { + state.fetchQueue.push(makeResponse(200, { offline_mode: false, storage_backend: "local" })); + await appModule.__test.refreshDiagnostics(); + expect(document.getElementById("worker-ping")?.textContent).toBe("no response"); + expect(document.getElementById("ffmpeg")?.textContent).toBe("missing"); + + state.fetchQueue.push( + makeResponse(200, { + offline_mode: true, + storage_backend: "s3", + worker: { + ping_ok: true, + system_info: { + ffmpeg: { + present: true, + }, + }, + }, + }), + ); + await appModule.__test.refreshDiagnostics(); + expect(document.getElementById("ffmpeg")?.textContent).toBe("ok"); + + getVersionMock.mockImplementationOnce(async () => { + throw "version string error"; + }); + invokeMock.mockImplementation(async (command: string) => { + if (command === "compose_file_path") { + throw "compose path string error"; + } + return state.invokeValues[command] ?? ""; + }); + + document.getElementById("log")!.textContent = null; + await appModule.__test.refresh(); + const log = document.getElementById("log")?.textContent ?? ""; + expect(log).toContain("version string error"); + expect(log).toContain("compose path string error"); + }); + + it("covers updater callback branch for unknown events", async () => { + checkMock.mockResolvedValueOnce({ + currentVersion: "0.1.8", + version: "0.1.9", + downloadAndInstall: async (onEvent: (evt: any) => void) => { + onEvent({ event: "Started", data: { contentLength: 12 } }); + onEvent({ event: "Progress", data: { chunkLength: 12 } }); + onEvent({ event: "Custom", data: {} }); + onEvent({ event: "Finished", data: {} }); + }, + }); + state.confirmQueue.push(true); + + await appModule.__test.checkUpdates(); + + const log = document.getElementById("log")?.textContent ?? ""; + expect(log).toContain("Downloaded 12 bytes"); + expect(log).toContain("Download finished."); + }); + + + it("covers null textContent branches in append and debug collectors", async () => { + const logEl = document.getElementById("log") as HTMLPreElement; + const statusEl = document.getElementById("status") as HTMLPreElement; + + let logStore: string | null = null; + let statusStore: string | null = null; + + Object.defineProperty(logEl, "textContent", { + configurable: true, + get: () => logStore, + set: (value: string | null) => { + logStore = value; + }, + }); + + Object.defineProperty(statusEl, "textContent", { + configurable: true, + get: () => statusStore, + set: (value: string | null) => { + statusStore = value; + }, + }); + + await appModule.__test.start(true); + + logStore = null; + statusStore = null; + const debug = await appModule.__test.collectDebugInfo(); + expect(debug).not.toContain("ui_compose_status:"); + expect(debug).not.toContain("ui_log:"); + + delete (logEl as any).textContent; + delete (statusEl as any).textContent; + logEl.textContent = "Ready."; + statusEl.textContent = "Loading…"; + }); + it("throws for missing required DOM elements", () => { expect(() => appModule.__test.byId("does-not-exist")).toThrow("Missing element #does-not-exist"); }); diff --git a/apps/desktop/src/main.ts b/apps/desktop/src/main.ts index dac54a71..7fc249a8 100644 --- a/apps/desktop/src/main.ts +++ b/apps/desktop/src/main.ts @@ -11,9 +11,10 @@ import { relaunch } from "@tauri-apps/plugin-process"; import { check } from "@tauri-apps/plugin-updater"; import { errToString, truncate } from "./text"; -const UI_URL = "http://localhost:5173"; +const UI_URL = "http://localhost:8000"; const API_URL = "http://localhost:8000/api/v1"; const SYSTEM_STATUS_URL = `${API_URL}/system/status`; +const DOCS_URL = "http://localhost:8000/docs"; const RELEASES_URL = "https://github.com/Prekzursil/Reframe/releases"; const UPDATER_MANIFEST_URL = "https://github.com/Prekzursil/Reframe/releases/latest/download/latest.json"; @@ -43,6 +44,13 @@ function setText(id: string, text: string) { byId(id).textContent = text; } +function setTextIfPresent(id: string, text: string) { + const el = document.getElementById(id); + if (el) { + el.textContent = text; + } +} + async function collectDebugInfo(): Promise { const lines: string[] = []; @@ -53,6 +61,7 @@ async function collectDebugInfo(): Promise { push("timestamp", new Date().toISOString()); push("user_agent", navigator.userAgent); push("updater_manifest", UPDATER_MANIFEST_URL); + push("docs_url", DOCS_URL); push("releases_url", RELEASES_URL); try { @@ -170,6 +179,9 @@ async function refreshDiagnostics() { ffmpeg?.present ? `ok${ffmpeg?.version ? ` (${ffmpeg.version})` : ""}` : "missing", ); setText("system-status", JSON.stringify(data, null, 2)); + setTextIfPresent("step-runtime", "ready"); + setTextIfPresent("step-api", "reachable"); + setTextIfPresent("step-worker", worker?.ping_ok ? "ready" : "no response"); lastDiagnosticsError = null; } catch (err) { const msg = errToString(err); @@ -178,6 +190,9 @@ async function refreshDiagnostics() { setText("worker-ping", "unknown"); setText("ffmpeg", "unknown"); setText("system-status", `Diagnostics unavailable.\n\n${msg}`); + setTextIfPresent("step-runtime", "check logs"); + setTextIfPresent("step-api", "unreachable"); + setTextIfPresent("step-worker", "unknown"); lastDiagnosticsError = msg; } } @@ -219,9 +234,21 @@ async function refresh() { await refreshDiagnostics(); } +async function prepareRuntime() { + appendLog("Preparing local runtime dependencies..."); + try { + const prep = await invoke("runtime_prepare"); + appendLog(prep.trim() || "Runtime dependencies ready."); + setTextIfPresent("step-runtime", "ready"); + } catch (err) { + appendLog(err instanceof Error ? err.message : String(err)); + setTextIfPresent("step-runtime", "failed"); + } +} async function start(build: boolean) { - appendLog(build ? "Starting stack (build)..." : "Starting stack (no build)..."); try { + await prepareRuntime(); + appendLog("Starting local runtime..."); const out = await invoke("compose_up", { build }); appendLog(out.trim() || "OK"); } catch (err) { @@ -232,7 +259,7 @@ async function start(build: boolean) { } async function stop() { - appendLog("Stopping stack..."); + appendLog("Stopping local runtime..."); try { const out = await invoke("compose_down"); appendLog(out.trim() || "OK"); @@ -243,6 +270,20 @@ async function stop() { } } +async function openProductExperience() { + try { + const runtimeStatus = await invoke("compose_ps"); + if (!String(runtimeStatus || "").toLowerCase().includes("running")) { + appendLog("Local runtime is not running. Starting now before opening Studio..."); + await start(true); + } + await openUrl(UI_URL); + return; + } catch (err) { + appendLog(`Unable to prepare Studio launch: ${errToString(err)}. Opening docs instead.`); + } + await openUrl(DOCS_URL); +} async function checkUpdates() { appendLog("Checking for updates..."); try { @@ -296,15 +337,18 @@ export const __test = { refresh, start, stop, + prepareRuntime, + openProductExperience, checkUpdates, }; window.addEventListener("DOMContentLoaded", () => { + document.getElementById("btn-prepare")?.addEventListener("click", () => void prepareRuntime()); byId("btn-up").addEventListener("click", () => start(true)); byId("btn-up-nobuild").addEventListener("click", () => start(false)); byId("btn-down").addEventListener("click", () => stop()); byId("btn-refresh").addEventListener("click", () => refresh()); - byId("btn-open-ui").addEventListener("click", () => openUrl(UI_URL)); + byId("btn-open-ui").addEventListener("click", () => void openProductExperience()); byId("btn-copy-debug").addEventListener("click", () => copyDebugInfo()); byId("btn-updates").addEventListener("click", () => checkUpdates()); byId("btn-latest-json").addEventListener("click", () => diff --git a/apps/desktop/src/styles.css b/apps/desktop/src/styles.css index b6f20d80..5b2578ee 100644 --- a/apps/desktop/src/styles.css +++ b/apps/desktop/src/styles.css @@ -1,63 +1,72 @@ :root { - font-family: Inter, Avenir, Helvetica, Arial, sans-serif; + --bg: #f3efe6; + --bg-accent: #e9dcc6; + --card: rgb(255 255 255 / 82%); + --card-border: rgb(25 29 38 / 12%); + --text: #1f2230; + --muted: #4f5d73; + --primary: #1d6de0; + --danger: #b32323; + --chip: #eef2fb; + --chip-border: rgb(29 109 224 / 22%); + + font-family: "Space Grotesk", "Segoe UI", "Trebuchet MS", sans-serif; font-size: 16px; - line-height: 24px; - font-weight: 400; - - color: #0f0f0f; - background-color: #f6f6f6; - - font-synthesis: none; + line-height: 1.45; + color: var(--text); text-rendering: optimizeLegibility; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; - -webkit-text-size-adjust: 100%; } -a { - font-weight: 500; - color: #646cff; - text-decoration: inherit; +* { + box-sizing: border-box; } -a:hover { - color: #535bf2; +body { + margin: 0; + min-height: 100vh; + background: + radial-gradient(circle at 8% 6%, rgb(29 109 224 / 18%), transparent 35%), + radial-gradient(circle at 92% 12%, rgb(198 127 40 / 22%), transparent 36%), + linear-gradient(180deg, var(--bg) 0%, var(--bg-accent) 100%); } .muted { - color: #5b5b5b; + color: var(--muted); } .mono { - font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; + font-family: "Cascadia Mono", Consolas, "Courier New", monospace; } .container { - max-width: 920px; + max-width: 980px; margin: 0 auto; - padding: 28px 20px 48px; + padding: 28px 20px 46px; display: flex; flex-direction: column; gap: 16px; } -.header { - display: flex; - flex-direction: column; - gap: 6px; +.hero h1 { + margin: 0; + letter-spacing: -0.02em; + font-size: clamp(1.8rem, 1.4rem + 1.6vw, 2.5rem); } -.header h1 { - margin: 0; - font-size: 26px; - line-height: 1.2; +.hero p { + margin: 8px 0 0; + max-width: 72ch; } .card { - border: 1px solid rgba(0, 0, 0, 0.08); - background: rgba(255, 255, 255, 0.9); - border-radius: 14px; + border: 1px solid var(--card-border); + border-radius: 16px; padding: 16px; + background: var(--card); + backdrop-filter: blur(8px); + box-shadow: 0 12px 30px rgb(24 32 52 / 8%); display: flex; flex-direction: column; gap: 12px; @@ -65,7 +74,8 @@ a:hover { .card h2 { margin: 0; - font-size: 16px; + font-size: 1.05rem; + letter-spacing: 0.01em; } .row { @@ -88,14 +98,52 @@ a:hover { } .label { - font-size: 12px; - color: #6b7280; + font-size: 0.75rem; + letter-spacing: 0.04em; + text-transform: uppercase; + color: #64748b; +} + +.step-grid { + display: grid; + grid-template-columns: repeat(3, minmax(0, 1fr)); + gap: 12px; +} + +.step-card { + border: 1px solid rgb(29 109 224 / 18%); + border-radius: 12px; + background: rgb(255 255 255 / 62%); + padding: 10px; + display: flex; + flex-direction: column; + gap: 6px; +} + +.step-title { + font-weight: 700; + font-size: 0.92rem; +} + +.step-body { + min-height: 2.6em; + font-size: 0.87rem; +} + +.chip { + display: inline-flex; + align-self: flex-start; + border: 1px solid var(--chip-border); + border-radius: 999px; + background: var(--chip); + padding: 2px 10px; + font-size: 0.78rem; } .status, .log { - border: 1px solid rgba(0, 0, 0, 0.08); - background: rgba(0, 0, 0, 0.04); + border: 1px solid rgb(25 29 38 / 10%); + background: rgb(247 250 255 / 80%); border-radius: 12px; padding: 12px; overflow: auto; @@ -106,22 +154,20 @@ a:hover { button { border-radius: 10px; - border: 1px solid rgba(0, 0, 0, 0.12); + border: 1px solid rgb(25 29 38 / 15%); padding: 10px 12px; - font-size: 14px; - font-weight: 600; + font-size: 0.9rem; + font-weight: 700; font-family: inherit; - color: #0f0f0f; - background-color: #ffffff; - transition: - border-color 0.2s, - background-color 0.2s, - transform 0.02s; + color: var(--text); + background: #fff; + transition: border-color 0.16s ease, transform 0.06s ease, box-shadow 0.16s ease; cursor: pointer; } button:hover { - border-color: rgba(57, 108, 216, 0.8); + border-color: rgb(29 109 224 / 80%); + box-shadow: 0 4px 16px rgb(29 109 224 / 18%); } button:active { @@ -129,63 +175,81 @@ button:active { } button.secondary { - background: rgba(0, 0, 0, 0.02); + background: rgb(255 255 255 / 70%); } button.danger { - border-color: rgba(220, 38, 38, 0.45); - color: #b91c1c; + border-color: rgb(179 35 35 / 45%); + color: var(--danger); } button.danger:hover { - border-color: rgba(220, 38, 38, 0.8); + border-color: rgb(179 35 35 / 85%); + box-shadow: 0 4px 14px rgb(179 35 35 / 20%); } .footer { - font-size: 12px; + font-size: 0.78rem; } -@media (prefers-color-scheme: dark) { - :root { - color: #f6f6f6; - background-color: #121212; +@media (width <= 900px) { + .grid { + grid-template-columns: 1fr; } - .muted { - color: rgba(255, 255, 255, 0.7); + .step-grid { + grid-template-columns: 1fr; } +} - .card { - background: rgba(18, 18, 18, 0.6); - border-color: rgba(255, 255, 255, 0.12); +@media (prefers-color-scheme: dark) { + :root { + --bg: #10141e; + --bg-accent: #192334; + --card: rgb(16 20 30 / 82%); + --card-border: rgb(255 255 255 / 14%); + --text: #eef2ff; + --muted: #a9b4cf; + --chip: rgb(68 94 160 / 30%); + --chip-border: rgb(134 181 255 / 45%); } .status, .log { - border-color: rgba(255, 255, 255, 0.12); - background: rgba(255, 255, 255, 0.06); + border-color: rgb(255 255 255 / 15%); + background: rgb(2 6 14 / 64%); } button { - color: #ffffff; - background-color: rgba(255, 255, 255, 0.06); - border-color: rgba(255, 255, 255, 0.12); + color: var(--text); + background: rgb(255 255 255 / 5%); + border-color: rgb(255 255 255 / 18%); } button.secondary { - background: rgba(255, 255, 255, 0.04); + background: rgb(255 255 255 / 4%); } +} - button:hover { - border-color: rgba(125, 211, 252, 0.6); - } +details { + border: 1px solid rgb(25 29 38 / 12%); + border-radius: 12px; + padding: 10px; + background: rgb(255 255 255 / 55%); +} - button.danger { - border-color: rgba(248, 113, 113, 0.4); - color: rgba(248, 113, 113, 0.95); - } +details summary { + cursor: pointer; + user-select: none; +} - button.danger:hover { - border-color: rgba(248, 113, 113, 0.75); +.advanced-row { + margin-top: 10px; +} + +@media (prefers-color-scheme: dark) { + details { + border-color: rgb(255 255 255 / 18%); + background: rgb(255 255 255 / 4%); } } diff --git a/apps/desktop/vitest.config.ts b/apps/desktop/vitest.config.ts index 3de81536..78b2e978 100644 --- a/apps/desktop/vitest.config.ts +++ b/apps/desktop/vitest.config.ts @@ -13,7 +13,7 @@ export default defineConfig({ thresholds: { lines: 100, functions: 100, - branches: 0, + branches: 100, statements: 100, }, }, diff --git a/apps/web/src/App.accountSystem.test.tsx b/apps/web/src/App.accountSystem.test.tsx new file mode 100644 index 00000000..55ec1086 --- /dev/null +++ b/apps/web/src/App.accountSystem.test.tsx @@ -0,0 +1,302 @@ +import { render, screen, within } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const apiClientMock = vi.hoisted(() => ({ + baseUrl: "http://localhost:8000/api/v1", + accessToken: null as string | null, + setAccessToken: vi.fn(), + listJobs: vi.fn(), + getJob: vi.fn(), + getAsset: vi.fn(), + listAssets: vi.fn(), + createCaptionJob: vi.fn(), + createTranslateJob: vi.fn(), + createStyledSubtitleJob: vi.fn(), + createShortsJob: vi.fn(), + translateSubtitleAsset: vi.fn(), + mergeAv: vi.fn(), + createCutClipJob: vi.fn(), + getSystemStatus: vi.fn(), + getUsageSummary: vi.fn(), + getBudgetPolicy: vi.fn(), + updateBudgetPolicy: vi.fn(), + listProjects: vi.fn(), + createProject: vi.fn(), + listProjectJobs: vi.fn(), + listProjectAssets: vi.fn(), + createProjectShareLinks: vi.fn(), + retryJob: vi.fn(), + register: vi.fn(), + login: vi.fn(), + logout: vi.fn(), + getMe: vi.fn(), + getOrgContext: vi.fn(), + createOrgInvite: vi.fn(), + listOrgInvites: vi.fn(), + revokeOrgInvite: vi.fn(), + updateOrgMemberRole: vi.fn(), + removeOrgMember: vi.fn(), + resolveOrgInvite: vi.fn(), + acceptOrgInvite: vi.fn(), + oauthStart: vi.fn(), + listBillingPlans: vi.fn(), + getBillingSubscription: vi.fn(), + getBillingUsageSummary: vi.fn(), + getBillingSeatUsage: vi.fn(), + updateBillingSeatLimit: vi.fn(), + initAssetUpload: vi.fn(), + completeAssetUpload: vi.fn(), + uploadAsset: vi.fn(), + getOrgSsoConfig: vi.fn(), + updateOrgSsoConfig: vi.fn(), + createScimToken: vi.fn(), + revokeScimToken: vi.fn(), + startOktaSso: vi.fn(), + listProjectMembers: vi.fn(), + addProjectMember: vi.fn(), + updateProjectMemberRole: vi.fn(), + removeProjectMember: vi.fn(), + listProjectComments: vi.fn(), + createProjectComment: vi.fn(), + deleteProjectComment: vi.fn(), + requestProjectApproval: vi.fn(), + approveProjectApproval: vi.fn(), + rejectProjectApproval: vi.fn(), + listProjectActivity: vi.fn(), + listPublishProviders: vi.fn(), + listPublishConnections: vi.fn(), + listPublishJobs: vi.fn(), + startPublishConnection: vi.fn(), + completePublishConnection: vi.fn(), + revokePublishConnection: vi.fn(), + createPublishJob: vi.fn(), + retryPublishJob: vi.fn(), + jobBundleUrl: (jobId: string) => `http://localhost:8000/api/v1/jobs/${jobId}/bundle`, + mediaUrl: (uri: string) => (uri.startsWith("http") ? uri : `http://localhost:8000${uri}`), +})); + +vi.mock("./api/client", () => ({ apiClient: apiClientMock })); + +import App from "./App"; + +beforeEach(() => { + vi.clearAllMocks(); + localStorage.removeItem("reframe_access_token"); + apiClientMock.accessToken = null; + + apiClientMock.listJobs.mockResolvedValue([]); + apiClientMock.listAssets.mockResolvedValue([]); + apiClientMock.listProjects.mockResolvedValue([]); + apiClientMock.getUsageSummary.mockResolvedValue({ + total_jobs: 0, + queued_jobs: 0, + running_jobs: 0, + completed_jobs: 0, + failed_jobs: 0, + cancelled_jobs: 0, + job_type_counts: {}, + output_assets_count: 0, + output_duration_seconds: 0, + generated_bytes: 0, + from_date: null, + to_date: null, + }); + apiClientMock.getSystemStatus.mockResolvedValue({ + api_version: "0.1.0", + offline_mode: false, + storage_backend: "LocalStorageBackend", + broker_url: "redis://localhost:6379/0", + result_backend: "redis://localhost:6379/0", + worker: { + ping_ok: true, + workers: ["worker@local"], + system_info: { ffmpeg: { present: true } }, + }, + }); + + apiClientMock.getMe.mockResolvedValue({ + user_id: "user-owner", + email: "owner@team.test", + display_name: "Owner", + org_id: "org-1", + org_name: "Team Org", + role: "owner", + }); + apiClientMock.getOrgContext.mockResolvedValue({ + org_id: "org-1", + org_name: "Team Org", + slug: "team-org", + role: "owner", + members: [ + { user_id: "user-owner", email: "owner@team.test", display_name: "Owner", role: "owner" }, + { user_id: "user-editor", email: "editor@team.test", display_name: "Editor", role: "editor" }, + ], + }); + apiClientMock.listOrgInvites.mockResolvedValue([ + { + id: "invite-1", + email: "new@team.test", + role: "viewer", + status: "pending", + invite_url: "http://localhost:5173/invites/accept?token=tok_123", + expires_at: "2030-01-01T00:00:00Z", + }, + ]); + + apiClientMock.oauthStart.mockResolvedValue({ authorize_url: "javascript:alert(1)" }); + apiClientMock.resolveOrgInvite.mockResolvedValue({ + invite_id: "invite-1", + org_id: "org-1", + org_name: "Team Org", + email: "new@team.test", + role: "viewer", + status: "pending", + expires_at: "2030-01-01T00:00:00Z", + }); + apiClientMock.acceptOrgInvite.mockResolvedValue({ access_token: "accepted-token", token_type: "bearer" }); + apiClientMock.login.mockResolvedValue({ access_token: "login-token", token_type: "bearer" }); + apiClientMock.register.mockResolvedValue({ access_token: "register-token", token_type: "bearer" }); + apiClientMock.logout.mockResolvedValue(undefined); + apiClientMock.updateOrgMemberRole.mockResolvedValue({ + user_id: "user-editor", + email: "editor@team.test", + display_name: "Editor", + role: "viewer", + }); + apiClientMock.removeOrgMember.mockResolvedValue(undefined); + apiClientMock.revokeOrgInvite.mockResolvedValue({ id: "invite-1", status: "revoked" }); + + apiClientMock.listBillingPlans.mockResolvedValue([ + { code: "pro", name: "Pro", max_concurrent_jobs: 3, monthly_job_minutes: 1200, monthly_storage_gb: 50, seat_limit: 5, overage_per_minute_cents: 2 }, + ]); + apiClientMock.getBillingSubscription.mockResolvedValue({ + org_id: "org-1", + plan_code: "pro", + status: "active", + stripe_customer_id: "cus_1", + stripe_subscription_id: "sub_1", + cancel_at_period_end: false, + }); + apiClientMock.getBillingUsageSummary.mockResolvedValue({ + org_id: "org-1", + plan_code: "pro", + used_job_minutes: 10, + quota_job_minutes: 1200, + used_storage_gb: 1, + quota_storage_gb: 50, + overage_job_minutes: 0, + estimated_overage_cents: 0, + }); + apiClientMock.getBillingSeatUsage.mockResolvedValue({ + org_id: "org-1", + plan_code: "pro", + active_members: 2, + pending_invites: 1, + seat_limit: 4, + available_seats: 1, + }); + apiClientMock.updateBillingSeatLimit.mockResolvedValue({ + org_id: "org-1", + plan_code: "pro", + active_members: 2, + pending_invites: 1, + seat_limit: 5, + available_seats: 2, + }); +}); + +describe("account, billing, and diagnostics paths", () => { + it("covers OAuth unsafe-redirect path", async () => { + const user = userEvent.setup(); + render(); + + await user.click(screen.getByRole("button", { name: "Account" })); + await user.click(screen.getByRole("button", { name: "Continue with Google" })); + expect(apiClientMock.oauthStart).toHaveBeenCalledWith("google"); + expect(await screen.findByText(/Unsafe OAuth redirect URL rejected\./i)).toBeInTheDocument(); + }); + + it("covers authenticated account management actions", async () => { + const user = userEvent.setup(); + localStorage.setItem("reframe_access_token", "token"); + apiClientMock.accessToken = "token"; + + render(); + + await user.click(screen.getByRole("button", { name: "Account" })); + await user.click(await screen.findByRole("button", { name: "Refresh account" })); + expect(apiClientMock.getMe).toHaveBeenCalled(); + + const roleSelectors = await screen.findAllByDisplayValue("editor"); + await user.selectOptions(roleSelectors[0], "viewer"); + expect(apiClientMock.updateOrgMemberRole).toHaveBeenCalledWith("user-editor", { role: "viewer" }); + + await user.click(screen.getByRole("button", { name: "Revoke" })); + expect(apiClientMock.revokeOrgInvite).toHaveBeenCalledWith("invite-1"); + + const removeButtons = screen.getAllByRole("button", { name: "Remove" }); + await user.click(removeButtons[removeButtons.length - 1]); + expect(apiClientMock.removeOrgMember).toHaveBeenCalledWith("user-editor"); + + await user.click(screen.getByRole("button", { name: "Logout" })); + expect(apiClientMock.logout).toHaveBeenCalled(); + }, 20000); + + it("covers invite token acceptance and auth actions", async () => { + const user = userEvent.setup(); + localStorage.setItem("reframe_access_token", "token"); + apiClientMock.accessToken = "token"; + window.history.pushState({}, "", "/?token=tok_accept"); + + render(); + + await user.click(screen.getByRole("button", { name: "Account" })); + expect(await screen.findByText(/Invite acceptance/i)).toBeInTheDocument(); + await user.click(await screen.findByRole("button", { name: "Accept invite" })); + expect(apiClientMock.acceptOrgInvite).toHaveBeenCalledWith({ token: "tok_accept" }); + + await user.click(screen.getByRole("button", { name: "Logout" })); + expect(apiClientMock.logout).toHaveBeenCalled(); + + localStorage.removeItem("reframe_access_token"); + apiClientMock.accessToken = null; + apiClientMock.oauthStart.mockRejectedValueOnce(new Error("oauth github failed")); + + await user.click(screen.getByRole("button", { name: "Continue with GitHub" })); + expect(await screen.findByText(/oauth github failed/i)).toBeInTheDocument(); + + await user.clear(screen.getByLabelText("Email")); + await user.type(screen.getByLabelText("Email"), "user@example.com"); + await user.clear(screen.getByLabelText("Password")); + await user.type(screen.getByLabelText("Password"), "pass12345"); + await user.click(screen.getByRole("button", { name: "Register" })); + expect(apiClientMock.register).toHaveBeenCalled(); + + await user.click(screen.getByRole("button", { name: "Logout" })); + await user.click(screen.getByRole("button", { name: "Login" })); + expect(apiClientMock.login).toHaveBeenCalled(); + }, 30000); + it("covers billing and system refresh flows", async () => { + const user = userEvent.setup(); + localStorage.setItem("reframe_access_token", "token"); + apiClientMock.accessToken = "token"; + + render(); + + await user.click(screen.getByRole("button", { name: "Billing" })); + expect(await screen.findByText(/Billing status/i)).toBeInTheDocument(); + await user.clear(screen.getByLabelText("Seat limit")); + await user.type(screen.getByLabelText("Seat limit"), "5"); + await user.click(screen.getByRole("button", { name: "Update seat limit" })); + expect(apiClientMock.updateBillingSeatLimit).toHaveBeenCalledWith({ seat_limit: 5 }); + + const plansTable = await screen.findByRole("table"); + expect(within(plansTable).getByText("Pro")).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "System" })); + expect(await screen.findByText(/Ping: ok/i)).toBeInTheDocument(); + await user.click(screen.getByRole("button", { name: "Refresh" })); + expect(apiClientMock.getSystemStatus).toHaveBeenCalled(); + }, 20000); +}); diff --git a/apps/web/src/App.allTabs.test.tsx b/apps/web/src/App.allTabs.test.tsx new file mode 100644 index 00000000..b871198f --- /dev/null +++ b/apps/web/src/App.allTabs.test.tsx @@ -0,0 +1,233 @@ +import { render, screen } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const apiClientMock = vi.hoisted(() => ({ + baseUrl: "http://localhost:8000/api/v1", + accessToken: "token" as string | null, + setAccessToken: vi.fn(), + listJobs: vi.fn(), + getJob: vi.fn(), + getAsset: vi.fn(), + listAssets: vi.fn(), + createCaptionJob: vi.fn(), + createTranslateJob: vi.fn(), + createStyledSubtitleJob: vi.fn(), + createShortsJob: vi.fn(), + translateSubtitleAsset: vi.fn(), + mergeAv: vi.fn(), + createCutClipJob: vi.fn(), + getSystemStatus: vi.fn(), + getUsageSummary: vi.fn(), + getUsageCosts: vi.fn(), + getBudgetPolicy: vi.fn(), + updateBudgetPolicy: vi.fn(), + listProjects: vi.fn(), + createProject: vi.fn(), + listProjectJobs: vi.fn(), + listProjectAssets: vi.fn(), + createProjectShareLinks: vi.fn(), + listProjectMembers: vi.fn(), + addProjectMember: vi.fn(), + updateProjectMemberRole: vi.fn(), + removeProjectMember: vi.fn(), + listProjectComments: vi.fn(), + createProjectComment: vi.fn(), + deleteProjectComment: vi.fn(), + requestProjectApproval: vi.fn(), + approveProjectApproval: vi.fn(), + rejectProjectApproval: vi.fn(), + listProjectActivity: vi.fn(), + retryJob: vi.fn(), + register: vi.fn(), + login: vi.fn(), + logout: vi.fn(), + getMe: vi.fn(), + getOrgContext: vi.fn(), + createOrgInvite: vi.fn(), + listOrgInvites: vi.fn(), + revokeOrgInvite: vi.fn(), + updateOrgMemberRole: vi.fn(), + removeOrgMember: vi.fn(), + oauthStart: vi.fn(), + listBillingPlans: vi.fn(), + getBillingSubscription: vi.fn(), + getBillingUsageSummary: vi.fn(), + getBillingSeatUsage: vi.fn(), + updateBillingSeatLimit: vi.fn(), + initAssetUpload: vi.fn(), + completeAssetUpload: vi.fn(), + uploadAsset: vi.fn(), + getOrgSsoConfig: vi.fn(), + updateOrgSsoConfig: vi.fn(), + createScimToken: vi.fn(), + revokeScimToken: vi.fn(), + startOktaSso: vi.fn(), + listPublishProviders: vi.fn(), + listPublishConnections: vi.fn(), + listPublishJobs: vi.fn(), + startPublishConnection: vi.fn(), + completePublishConnection: vi.fn(), + revokePublishConnection: vi.fn(), + createPublishJob: vi.fn(), + retryPublishJob: vi.fn(), + jobBundleUrl: (jobId: string) => `http://localhost:8000/api/v1/jobs/${jobId}/bundle`, + mediaUrl: (uri: string) => (uri.startsWith("http") ? uri : `http://localhost:8000${uri}`), +})); + +vi.mock("./api/client", () => ({ apiClient: apiClientMock })); + +import App from "./App"; + +beforeEach(() => { + vi.clearAllMocks(); + localStorage.setItem("reframe_access_token", "token"); + apiClientMock.accessToken = "token"; + + apiClientMock.listJobs.mockResolvedValue([]); + apiClientMock.listAssets.mockResolvedValue([]); + apiClientMock.getSystemStatus.mockResolvedValue({ + api_version: "0.1.0", + offline_mode: false, + storage_backend: "LocalStorageBackend", + broker_url: "memory://", + result_backend: "cache+memory://", + worker: { ping_ok: true, workers: ["local-queue"], system_info: { ffmpeg: { present: true, version: "6.1" } } }, + }); + apiClientMock.getUsageSummary.mockResolvedValue({ + total_jobs: 0, + queued_jobs: 0, + running_jobs: 0, + completed_jobs: 0, + failed_jobs: 0, + cancelled_jobs: 0, + job_type_counts: {}, + output_assets_count: 0, + output_duration_seconds: 0, + generated_bytes: 0, + }); + apiClientMock.getUsageCosts.mockResolvedValue({ + currency: "USD", + total_estimated_cost_cents: 0, + entries_count: 0, + by_metric: {}, + by_metric_cost_cents: {}, + }); + apiClientMock.getBudgetPolicy.mockResolvedValue({ + org_id: "org-1", + monthly_soft_limit_cents: null, + monthly_hard_limit_cents: null, + enforce_hard_limit: false, + current_month_estimated_cost_cents: 0, + projected_status: "on_track", + }); + + apiClientMock.getMe.mockResolvedValue({ + user_id: "user-1", + email: "owner@team.test", + display_name: "Owner", + org_id: "org-1", + org_name: "Team Org", + role: "owner", + }); + apiClientMock.getOrgContext.mockResolvedValue({ + org_id: "org-1", + org_name: "Team Org", + slug: "team-org", + role: "owner", + members: [{ user_id: "user-1", email: "owner@team.test", display_name: "Owner", role: "owner" }], + }); + apiClientMock.listOrgInvites.mockResolvedValue([]); + + apiClientMock.getOrgSsoConfig.mockResolvedValue({ + org_id: "org-1", + provider: "okta", + enabled: false, + issuer_url: "https://example.okta.com/oauth2/default", + client_id: "okta-client", + audience: "api://default", + default_role: "viewer", + jit_enabled: true, + allow_email_link: true, + config: {}, + }); + + apiClientMock.listProjects.mockResolvedValue([{ id: "proj-1", name: "Launch", description: "release" }]); + apiClientMock.listProjectJobs.mockResolvedValue([]); + apiClientMock.listProjectAssets.mockResolvedValue([{ id: "asset-1", kind: "video", uri: "/media/tmp/clip.mp4", mime_type: "video/mp4" }]); + apiClientMock.listProjectMembers.mockResolvedValue([]); + apiClientMock.listProjectComments.mockResolvedValue([]); + apiClientMock.listProjectActivity.mockResolvedValue([]); + + apiClientMock.listPublishProviders.mockResolvedValue([{ provider: "youtube", display_name: "YouTube", connected_count: 0 }]); + apiClientMock.listPublishConnections.mockResolvedValue([]); + apiClientMock.listPublishJobs.mockResolvedValue([]); + + apiClientMock.listBillingPlans.mockResolvedValue([{ code: "starter", name: "Starter", monthly_price_cents: 0 }]); + apiClientMock.getBillingSubscription.mockResolvedValue({ plan_code: "starter", status: "active", seat_limit: 1 }); + apiClientMock.getBillingUsageSummary.mockResolvedValue({ + period_start: "2026-03-01", + period_end: "2026-03-31", + quota_job_minutes: 100, + used_job_minutes: 0, + overage_job_minutes: 0, + used_storage_gb: 0, + quota_storage_gb: 5, + estimated_overage_cents: 0, + estimated_cost_cents: 0, + }); + apiClientMock.getBillingSeatUsage.mockResolvedValue({ + seat_limit: 1, + active_members: 1, + available_seats: 0, + pending_invites: 0, + }); + + apiClientMock.uploadAsset.mockResolvedValue({ id: "asset-upload", kind: "video", uri: "/media/tmp/upload.mp4", mime_type: "video/mp4" }); + apiClientMock.createCaptionJob.mockResolvedValue({ id: "job-caption", job_type: "captions", status: "queued", progress: 0, payload: {} }); + apiClientMock.createTranslateJob.mockResolvedValue({ id: "job-translate", job_type: "translate", status: "queued", progress: 0, payload: {} }); + apiClientMock.createShortsJob.mockResolvedValue({ id: "job-shorts", job_type: "shorts", status: "queued", progress: 0, payload: {} }); + apiClientMock.translateSubtitleAsset.mockResolvedValue({ id: "job-translate-asset", job_type: "translate_subtitle", status: "queued", progress: 0, payload: {} }); + apiClientMock.mergeAv.mockResolvedValue({ id: "job-merge", job_type: "merge", status: "queued", progress: 0, payload: {} }); + apiClientMock.createStyledSubtitleJob.mockResolvedValue({ id: "job-style", job_type: "style", status: "queued", progress: 0, payload: {} }); +}); + +describe("app tab coverage smoke", () => { + it("navigates every major tab and renders key product surfaces", async () => { + const user = userEvent.setup(); + render(); + + await user.click(screen.getByRole("button", { name: "Shorts" })); + expect(await screen.findByText(/Upload or link video/i)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Captions" })); + expect(await screen.findByText(/Captions & Translate/i)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Subtitles" })); + expect(await screen.findByText(/Subtitle editor/i)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Utilities" })); + expect(await screen.findByText(/Merge audio\/video/i)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Jobs" })); + expect(await screen.findByText(/Recent jobs/i)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Usage" })); + expect(await screen.findByText(/Usage summary/i)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Projects" })); + expect(await screen.findByRole("heading", { name: "Projects" })).toBeInTheDocument(); + expect(await screen.findByText(/Publish automation/i)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Account" })); + expect(await screen.findByText(/Account session/i)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Billing" })); + expect(await screen.findByText(/Billing status/i)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "System" })); + expect(await screen.findByText(/System health/i)).toBeInTheDocument(); + }, 20000); +}); + + diff --git a/apps/web/src/App.components.test.tsx b/apps/web/src/App.components.test.tsx new file mode 100644 index 00000000..86f6896a --- /dev/null +++ b/apps/web/src/App.components.test.tsx @@ -0,0 +1,278 @@ +import { fireEvent, render, screen, waitFor } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const apiClientMock = vi.hoisted(() => ({ + baseUrl: "http://localhost:8000/api/v1", + fetcher: vi.fn(), + createCaptionJob: vi.fn(), + createTranslateJob: vi.fn(), + uploadAsset: vi.fn(), + translateSubtitleAsset: vi.fn(), + mergeAv: vi.fn(), + createShortsJob: vi.fn(), + mediaUrl: (uri: string) => (uri.startsWith("http") ? uri : `http://localhost:8000${uri}`), +})); + +vi.mock("./api/client", () => ({ apiClient: apiClientMock })); + +import { + AudioUploadPanel, + CaptionsForm, + copyToClipboard, + CopyCommandButton, + JobStatusPill, + MergeAvForm, + ShortsForm, + StyleEditor, + SubtitleEditorCard, + SubtitleToolsForm, + SubtitleUpload, + TextPreview, + TranslateForm, + UploadPanel, +} from "./App"; + +function makeJob(id: string, jobType = "captions") { + return { + id, + job_type: jobType, + status: "queued", + progress: 0, + payload: {}, + }; +} + +describe("App component coverage", () => { + beforeEach(() => { + vi.clearAllMocks(); + apiClientMock.createCaptionJob.mockResolvedValue(makeJob("job-cap", "captions")); + apiClientMock.createTranslateJob.mockResolvedValue(makeJob("job-tr", "translate")); + apiClientMock.translateSubtitleAsset.mockResolvedValue(makeJob("job-sub", "subtitle_translate")); + apiClientMock.mergeAv.mockResolvedValue(makeJob("job-merge", "merge_av")); + apiClientMock.createShortsJob.mockResolvedValue(makeJob("job-shorts", "shorts")); + apiClientMock.uploadAsset.mockResolvedValue({ id: "asset-1", uri: "/media/asset-1.srt" }); + apiClientMock.fetcher.mockResolvedValue({ + ok: true, + text: async () => "1\n00:00:00,000 --> 00:00:01,000\nhello\n", + }); + vi.spyOn(navigator.clipboard, "writeText").mockResolvedValue(undefined); + vi.spyOn(globalThis, "fetch").mockResolvedValue({ + ok: true, + text: async () => "preview content", + } as Response); + }); + + it("covers clipboard helper success and fallback paths", async () => { + expect(await copyToClipboard("hello")).toBe(true); + + vi.spyOn(navigator.clipboard, "writeText").mockRejectedValueOnce(new Error("denied")); + const execSpy = vi.fn(() => true); + Object.defineProperty(document, "execCommand", { value: execSpy, configurable: true }); + + expect(await copyToClipboard("fallback")).toBe(true); + expect(execSpy).toHaveBeenCalledWith("copy"); + }); + + it("renders copy button and text preview states", async () => { + const user = userEvent.setup(); + render(); + await user.click(screen.getByRole("button", { name: "Copy" })); + expect(await screen.findByRole("button", { name: "Copied" })).toBeInTheDocument(); + + render(); + expect(await screen.findByText("preview content")).toBeInTheDocument(); + + render(); + expect(await screen.findByText("Unsafe preview URL")).toBeInTheDocument(); + }); + + it("renders all job status pills", () => { + const statuses = ["queued", "running", "completed", "failed", "cancelled"] as const; + statuses.forEach((status) => { + render(); + expect(screen.getByText(status)).toBeInTheDocument(); + }); + }); + + it("submits caption form with advanced diarization variants and handles errors", async () => { + const user = userEvent.setup(); + const onCreated = vi.fn(); + render(); + + await user.selectOptions(screen.getByLabelText("Backend"), "noop"); + expect(screen.getByText(/No transcription runs/)).toBeInTheDocument(); + + await user.selectOptions(screen.getByLabelText("Speaker labels"), "pyannote"); + expect(screen.getByText(/HF_TOKEN/)).toBeInTheDocument(); + + await user.selectOptions(screen.getByLabelText("Speaker labels"), "speechbrain"); + expect(screen.getByText(/SpeechBrain/)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Create caption job" })); + + expect(apiClientMock.createCaptionJob).toHaveBeenCalled(); + expect(onCreated).toHaveBeenCalled(); + + apiClientMock.createCaptionJob.mockRejectedValueOnce(new Error("caption fail")); + await user.click(screen.getByRole("button", { name: "Create caption job" })); + expect(await screen.findByText("caption fail")).toBeInTheDocument(); + }); + + it("submits translate form success and failure", async () => { + const user = userEvent.setup(); + const onCreated = vi.fn(); + render(); + + await user.type(screen.getByLabelText("Subtitle asset ID"), "sub-1"); + await user.clear(screen.getByLabelText("Target language")); + await user.type(screen.getByLabelText("Target language"), "fr"); + await user.type(screen.getByLabelText("Notes / instructions"), "note"); + await user.click(screen.getByRole("button", { name: "Request translation" })); + + expect(apiClientMock.createTranslateJob).toHaveBeenCalled(); + expect(onCreated).toHaveBeenCalled(); + + apiClientMock.createTranslateJob.mockRejectedValueOnce(new Error("translate fail")); + await user.click(screen.getByRole("button", { name: "Request translation" })); + expect(await screen.findByText("translate fail")).toBeInTheDocument(); + }); + + it("handles upload panel video/audio/subtitle success and errors", async () => { + const onAssetId = vi.fn(); + const onPreview = vi.fn(); + + const { container: videoContainer } = render(); + const videoInput = videoContainer.querySelector('input[type="file"]') as HTMLInputElement; + const videoFile = new File(["video"], "clip.mp4", { type: "video/mp4" }); + fireEvent.change(videoInput, { target: { files: [videoFile] } }); + + await waitFor(() => expect(apiClientMock.uploadAsset).toHaveBeenCalledWith(videoFile, "video", "proj")); + expect(onAssetId).toHaveBeenCalledWith("asset-1"); + + apiClientMock.uploadAsset.mockRejectedValueOnce(new Error("upload fail")); + fireEvent.change(videoInput, { target: { files: [videoFile] } }); + expect(await screen.findByText("upload fail")).toBeInTheDocument(); + + const { container: audioContainer } = render(); + const audioInput = audioContainer.querySelector('input[type="file"]') as HTMLInputElement; + const audioFile = new File(["audio"], "track.mp3", { type: "audio/mpeg" }); + fireEvent.change(audioInput, { target: { files: [audioFile] } }); + await waitFor(() => expect(apiClientMock.uploadAsset).toHaveBeenCalledWith(audioFile, "audio", "proj")); + + const subtitlePreview = vi.fn(); + const { container: subtitleContainer } = render( + , + ); + const subtitleInput = subtitleContainer.querySelector('input[type="file"]') as HTMLInputElement; + const subtitleFile = new File(["1\n00:00:00,000 --> 00:00:01,000\nhi"], "sub.srt", { type: "text/plain" }); + fireEvent.change(subtitleInput, { target: { files: [subtitleFile] } }); + await waitFor(() => expect(apiClientMock.uploadAsset).toHaveBeenCalledWith(subtitleFile, "subtitle", "proj")); + expect(subtitlePreview).toHaveBeenCalled(); + }); + + it("covers subtitle editor load/shift/cues/save flows", async () => { + const user = userEvent.setup(); + const onAssetChosen = vi.fn(); + + render(); + + await user.click(screen.getByRole("button", { name: "Load" })); + expect(await screen.findByDisplayValue(/hello/)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Cue table" })); + expect(await screen.findByText(/Cue table mode/)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Add cue" })); + await user.click(screen.getByRole("button", { name: "Sort cues" })); + + await user.click(screen.getAllByRole("button", { name: "Remove" })[0]!); + await user.click(screen.getByText("Raw text").closest("button") as HTMLButtonElement); + + await user.clear(screen.getByLabelText("Shift timings (seconds)")); + await user.type(screen.getByLabelText("Shift timings (seconds)"), "1.5"); + await user.click(screen.getByRole("button", { name: "Apply shift" })); + + await user.click(screen.getByRole("button", { name: "Save as new subtitle asset" })); + expect(onAssetChosen).toHaveBeenCalled(); + expect(await screen.findByText("Saved subtitle asset")).toBeInTheDocument(); + + apiClientMock.fetcher.mockResolvedValueOnce({ + ok: false, + statusText: "bad", + text: async () => "download fail", + }); + await user.click(screen.getByRole("button", { name: "Load" })); + expect(await screen.findByText("download fail")).toBeInTheDocument(); + }); + + it("submits subtitle tools, merge, shorts, and style actions with error handling", async () => { + const user = userEvent.setup(); + + const subtitleCreated = vi.fn(); + render(); + await user.type(screen.getByLabelText("Subtitle asset ID"), "sub-200"); + await user.selectOptions(screen.getByLabelText("Target language"), "de"); + await user.click(screen.getAllByRole("checkbox")[0]!); + await user.click(screen.getByRole("button", { name: "Translate subtitles" })); + expect(apiClientMock.translateSubtitleAsset).toHaveBeenCalled(); + expect(subtitleCreated).toHaveBeenCalled(); + + apiClientMock.translateSubtitleAsset.mockRejectedValueOnce(new Error("subtitle translate fail")); + await user.click(screen.getByRole("button", { name: "Translate subtitles" })); + expect(await screen.findByText("subtitle translate fail")).toBeInTheDocument(); + + const mergeCreated = vi.fn(); + render(); + await user.click(screen.getByRole("button", { name: "Merge audio/video" })); + expect(apiClientMock.mergeAv).toHaveBeenCalled(); + expect(mergeCreated).toHaveBeenCalled(); + + apiClientMock.mergeAv.mockRejectedValueOnce(new Error("merge fail")); + await user.click(screen.getByRole("button", { name: "Merge audio/video" })); + expect(await screen.findByText("merge fail")).toBeInTheDocument(); + + const shortsCreated = vi.fn(); + render(); + await user.type(screen.getByLabelText("Video asset ID or URL"), "vid-33"); + await user.click(screen.getByRole("checkbox", { name: /Attach styled subtitles/i })); + await user.click(screen.getByRole("checkbox", { name: /Prefer non-silent segments \(experimental\)/i })); + await user.type(screen.getByLabelText(/Timed subtitle asset \(SRT\/VTT\)/i), "sub-300"); + await user.click(screen.getByRole("checkbox", { name: /Use Groq \(requires GROQ_API_KEY on the worker\)/i })); + await user.type(screen.getByLabelText("Prompt to guide selection"), "highlight energetic moments"); + await user.click(screen.getByRole("button", { name: "Create shorts job" })); + expect(apiClientMock.createShortsJob).toHaveBeenCalled(); + expect(shortsCreated).toHaveBeenCalled(); + + apiClientMock.createShortsJob.mockRejectedValueOnce(new Error("shorts fail")); + await user.click(screen.getByRole("button", { name: "Create shorts job" })); + expect(await screen.findByText("shorts fail")).toBeInTheDocument(); + + const previewSpy = vi.fn().mockResolvedValue(makeJob("preview", "style")); + const renderSpy = vi.fn().mockResolvedValue(makeJob("render", "style")); + const onJobCreated = vi.fn(); + render( + , + ); + + await user.click(screen.getByRole("button", { name: "Preview 5s" })); + await user.click(screen.getByRole("button", { name: "Render full video" })); + expect(previewSpy).toHaveBeenCalled(); + expect(renderSpy).toHaveBeenCalled(); + expect(onJobCreated).toHaveBeenCalled(); + + const failedPreview = vi.fn().mockRejectedValue(new Error("preview fail")); + render(); + await user.click(screen.getAllByRole("button", { name: "Preview 5s" }).at(-1)!); + expect(await screen.findByText("preview fail")).toBeInTheDocument(); + }); +}); + + + diff --git a/apps/web/src/App.enterpriseAutomation.test.tsx b/apps/web/src/App.enterpriseAutomation.test.tsx index 288baa0b..7e843d43 100644 --- a/apps/web/src/App.enterpriseAutomation.test.tsx +++ b/apps/web/src/App.enterpriseAutomation.test.tsx @@ -239,6 +239,17 @@ describe("enterprise automation surfaces", () => { await user.click(screen.getByRole("button", { name: "Create SCIM token" })); expect(apiClientMock.createScimToken).toHaveBeenCalledWith("org-1"); expect(await screen.findByText(/rscim_secret_once/)).toBeInTheDocument(); + + apiClientMock.revokeScimToken.mockResolvedValueOnce({ + id: "scim-token-1", + org_id: "org-1", + token_hint: "rscim_12...ab", + scopes: ["users:read", "users:write"], + created_at: "2030-01-01T00:00:00Z", + revoked_at: "2030-01-01T01:00:00Z", + }); + await user.click(screen.getByRole("button", { name: "Revoke" })); + expect(apiClientMock.revokeScimToken).toHaveBeenCalledWith("org-1", "scim-token-1"); }); it("adds collaboration member and creates publish job from projects tab", async () => { @@ -268,4 +279,98 @@ describe("enterprise automation surfaces", () => { }), ); }, 15000); + it("covers share links, collaboration resolution, and publish retry flows", async () => { + const user = userEvent.setup(); + + apiClientMock.listProjectAssets.mockResolvedValueOnce([ + { id: "asset-1", kind: "video", uri: "/media/tmp/clip.mp4", mime_type: "video/mp4" }, + { id: "asset-2", kind: "audio", uri: "/media/tmp/clip.mp3", mime_type: "audio/mpeg" }, + ]); + apiClientMock.createProjectShareLinks.mockResolvedValueOnce([ + { project_id: "proj-1", asset_id: "asset-1", url: "https://example.com/share/asset-1", expires_at: "2030-01-02T00:00:00Z" }, + { project_id: "proj-1", asset_id: "asset-2", url: "javascript:alert(1)", expires_at: "2030-01-02T00:00:00Z" }, + ]); + apiClientMock.listProjectComments.mockResolvedValue([ + { + id: "comment-1", + project_id: "proj-1", + author_user_id: "user-owner", + author_email: "owner@team.test", + body: "Needs tweaks", + created_at: "2030-01-01T00:00:00Z", + updated_at: "2030-01-01T00:00:00Z", + }, + ]); + apiClientMock.listProjectActivity.mockResolvedValue([ + { + id: "evt-1", + project_id: "proj-1", + actor_user_id: "user-owner", + event_type: "project.approval_requested", + payload: { approval_id: "approval-1", summary: "Ship review", requested_by_user_id: "user-owner" }, + created_at: "2030-01-01T00:00:00Z", + }, + ]); + apiClientMock.approveProjectApproval.mockResolvedValue({ + id: "approval-1", + project_id: "proj-1", + status: "approved", + summary: "Ship review", + requested_by_user_id: "user-owner", + resolved_by_user_id: "user-owner", + resolved_at: "2030-01-01T01:00:00Z", + created_at: "2030-01-01T00:00:00Z", + updated_at: "2030-01-01T01:00:00Z", + }); + apiClientMock.deleteProjectComment.mockResolvedValue(undefined); + apiClientMock.revokePublishConnection.mockResolvedValue(undefined); + apiClientMock.listPublishJobs.mockResolvedValue([ + { + id: "publish-job-failed", + provider: "youtube", + connection_id: "conn-1", + asset_id: "asset-1", + status: "failed", + retry_count: 1, + payload: {}, + published_url: "https://youtube.com/watch?v=abc", + created_at: "2030-01-01T00:00:00Z", + updated_at: "2030-01-01T00:00:00Z", + }, + ]); + apiClientMock.retryPublishJob.mockResolvedValue({ + id: "publish-job-failed", + provider: "youtube", + connection_id: "conn-1", + asset_id: "asset-1", + status: "queued", + retry_count: 2, + payload: {}, + created_at: "2030-01-01T00:00:00Z", + updated_at: "2030-01-01T00:02:00Z", + }); + + render(); + + await user.click(screen.getByRole("button", { name: "Projects" })); + + await user.click(await screen.findByRole("button", { name: "Select filtered" })); + await user.click(screen.getByRole("button", { name: /Generate share links/i })); + expect(apiClientMock.createProjectShareLinks).toHaveBeenCalled(); + expect(await screen.findByText("https://example.com/share/asset-1")).toBeInTheDocument(); + expect(await screen.findByText(/Generated link was rejected by URL policy\./i)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Delete" })); + expect(apiClientMock.deleteProjectComment).toHaveBeenCalledWith("proj-1", "comment-1"); + + await user.click(screen.getByRole("button", { name: "Approve" })); + expect(apiClientMock.approveProjectApproval).toHaveBeenCalledWith("proj-1", "approval-1"); + + await user.click(screen.getByRole("button", { name: "Use" })); + await user.click(screen.getByRole("button", { name: "Revoke" })); + expect(apiClientMock.revokePublishConnection).toHaveBeenCalledWith("youtube", "conn-1"); + + await user.click(screen.getByRole("button", { name: "Retry" })); + expect(apiClientMock.retryPublishJob).toHaveBeenCalledWith("publish-job-failed"); + }, 30000); }); diff --git a/apps/web/src/App.failureMatrix.test.tsx b/apps/web/src/App.failureMatrix.test.tsx new file mode 100644 index 00000000..4af07f05 --- /dev/null +++ b/apps/web/src/App.failureMatrix.test.tsx @@ -0,0 +1,284 @@ +import { render, screen, within } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const apiClientMock = vi.hoisted(() => ({ + baseUrl: "http://localhost:8000/api/v1", + accessToken: null as string | null, + setAccessToken: vi.fn(), + listJobs: vi.fn(), + getJob: vi.fn(), + getAsset: vi.fn(), + listAssets: vi.fn(), + createCaptionJob: vi.fn(), + createTranslateJob: vi.fn(), + createStyledSubtitleJob: vi.fn(), + createShortsJob: vi.fn(), + translateSubtitleAsset: vi.fn(), + mergeAv: vi.fn(), + createCutClipJob: vi.fn(), + getSystemStatus: vi.fn(), + getUsageSummary: vi.fn(), + getUsageCosts: vi.fn(), + getBudgetPolicy: vi.fn(), + updateBudgetPolicy: vi.fn(), + listProjects: vi.fn(), + createProject: vi.fn(), + listProjectJobs: vi.fn(), + listProjectAssets: vi.fn(), + createProjectShareLinks: vi.fn(), + listProjectMembers: vi.fn(), + addProjectMember: vi.fn(), + updateProjectMemberRole: vi.fn(), + removeProjectMember: vi.fn(), + listProjectComments: vi.fn(), + createProjectComment: vi.fn(), + deleteProjectComment: vi.fn(), + requestProjectApproval: vi.fn(), + approveProjectApproval: vi.fn(), + rejectProjectApproval: vi.fn(), + listProjectActivity: vi.fn(), + listPublishProviders: vi.fn(), + listPublishConnections: vi.fn(), + listPublishJobs: vi.fn(), + startPublishConnection: vi.fn(), + completePublishConnection: vi.fn(), + revokePublishConnection: vi.fn(), + createPublishJob: vi.fn(), + retryPublishJob: vi.fn(), + deleteJob: vi.fn(), + retryJob: vi.fn(), + register: vi.fn(), + login: vi.fn(), + logout: vi.fn(), + getMe: vi.fn(), + getOrgContext: vi.fn(), + createOrgInvite: vi.fn(), + listOrgInvites: vi.fn(), + revokeOrgInvite: vi.fn(), + updateOrgMemberRole: vi.fn(), + removeOrgMember: vi.fn(), + oauthStart: vi.fn(), + listBillingPlans: vi.fn(), + getBillingSubscription: vi.fn(), + getBillingUsageSummary: vi.fn(), + getBillingSeatUsage: vi.fn(), + updateBillingSeatLimit: vi.fn(), + initAssetUpload: vi.fn(), + completeAssetUpload: vi.fn(), + uploadAsset: vi.fn(), + getOrgSsoConfig: vi.fn(), + updateOrgSsoConfig: vi.fn(), + createScimToken: vi.fn(), + revokeScimToken: vi.fn(), + startOktaSso: vi.fn(), + mediaUrl: (uri: string) => (uri.startsWith("http") ? uri : `http://localhost:8000${uri}`), + jobBundleUrl: (jobId: string) => `http://localhost:8000/api/v1/jobs/${jobId}/bundle`, +})); + +vi.mock("./api/client", () => ({ apiClient: apiClientMock })); + +import App from "./App"; + +beforeEach(() => { + vi.clearAllMocks(); + localStorage.setItem("reframe_access_token", "token"); + apiClientMock.accessToken = "token"; + + apiClientMock.listJobs.mockResolvedValue([]); + apiClientMock.listAssets.mockResolvedValue([]); + apiClientMock.getAsset.mockResolvedValue({ id: "asset-1", kind: "video", uri: "/media/tmp/video.mp4", mime_type: "video/mp4" }); + apiClientMock.getJob.mockResolvedValue({ id: "job-1", job_type: "captions", status: "queued", progress: 0, payload: {} }); + + apiClientMock.getSystemStatus.mockResolvedValue({ + api_version: "0.1.0", + offline_mode: false, + storage_backend: "LocalStorageBackend", + broker_url: "memory://", + result_backend: "cache+memory://", + worker: { ping_ok: true, workers: ["worker@local"], system_info: { ffmpeg: { present: true } } }, + }); + + apiClientMock.getUsageSummary.mockResolvedValue({ + total_jobs: 1, + queued_jobs: 0, + running_jobs: 0, + completed_jobs: 1, + failed_jobs: 0, + cancelled_jobs: 0, + job_type_counts: { captions: 1 }, + output_assets_count: 1, + output_duration_seconds: 12, + generated_bytes: 10, + }); + apiClientMock.getUsageCosts.mockResolvedValue({ + currency: "USD", + total_estimated_cost_cents: 0, + entries_count: 0, + by_metric: {}, + by_metric_cost_cents: {}, + }); + apiClientMock.getBudgetPolicy.mockResolvedValue({ + org_id: "org-1", + monthly_soft_limit_cents: null, + monthly_hard_limit_cents: null, + enforce_hard_limit: false, + current_month_estimated_cost_cents: 0, + projected_status: "ok", + }); + + apiClientMock.getMe.mockResolvedValue({ + user_id: "user-1", + email: "owner@test.dev", + display_name: "Owner", + org_id: "org-1", + org_name: "Org", + role: "owner", + }); + apiClientMock.getOrgContext.mockResolvedValue({ + org_id: "org-1", + org_name: "Org", + slug: "org", + role: "owner", + members: [], + }); + apiClientMock.listOrgInvites.mockResolvedValue([]); + + apiClientMock.listProjects.mockResolvedValue([{ id: "proj-1", name: "Proj", description: "d" }]); + apiClientMock.listProjectJobs.mockResolvedValue([]); + apiClientMock.listProjectAssets.mockResolvedValue([]); + apiClientMock.listProjectMembers.mockResolvedValue([]); + apiClientMock.listProjectComments.mockResolvedValue([]); + apiClientMock.listProjectActivity.mockResolvedValue([]); + + apiClientMock.listPublishProviders.mockResolvedValue([]); + apiClientMock.listPublishConnections.mockResolvedValue([]); + apiClientMock.listPublishJobs.mockResolvedValue([]); + + apiClientMock.listBillingPlans.mockResolvedValue([]); + apiClientMock.getBillingSubscription.mockResolvedValue({ plan_code: "free", status: "active", seat_limit: 1 }); + apiClientMock.getBillingUsageSummary.mockResolvedValue({ + period_start: "2026-03-01", + period_end: "2026-03-31", + quota_job_minutes: 100, + used_job_minutes: 1, + overage_job_minutes: 0, + used_storage_gb: 0, + quota_storage_gb: 5, + estimated_overage_cents: 0, + estimated_cost_cents: 0, + }); + apiClientMock.getBillingSeatUsage.mockResolvedValue({ + seat_limit: 1, + active_members: 1, + available_seats: 0, + pending_invites: 0, + }); +}); + +describe("App failure-path matrix", () => { + it("covers jobs delete/retry error branches and filters invalid dates", async () => { + const user = userEvent.setup(); + const confirmSpy = vi.spyOn(window, "confirm"); + + apiClientMock.listJobs + .mockResolvedValueOnce([]) + .mockResolvedValueOnce([ + { + id: "job-bad-date", + job_type: "captions", + status: "failed", + progress: 0, + created_at: "not-a-date", + input_asset_id: "asset-in", + output_asset_id: null, + payload: {}, + }, + { + id: "job-good", + job_type: "captions", + status: "completed", + progress: 1, + created_at: "2026-03-03T10:00:00Z", + input_asset_id: "asset-in", + output_asset_id: "asset-out", + payload: {}, + }, + ]); + + apiClientMock.getJob.mockResolvedValueOnce({ + id: "job-good", + job_type: "captions", + status: "failed", + progress: 0, + created_at: "2026-03-03T10:00:00Z", + input_asset_id: "asset-in", + output_asset_id: null, + payload: {}, + }); + + apiClientMock.deleteJob.mockRejectedValueOnce(new Error("delete failed")); + apiClientMock.retryJob.mockRejectedValueOnce(new Error("retry failed")); + + render(); + + await user.click(screen.getByRole("button", { name: "Jobs" })); + + const table = await screen.findByRole("table"); + expect(within(table).getByText("job-bad-date")).toBeInTheDocument(); + expect(within(table).getByText("job-good")).toBeInTheDocument(); + + const row = within(table).getByText("job-good").closest("tr") as HTMLElement; + await user.click(within(row).getByRole("button", { name: "View" })); + + confirmSpy.mockReturnValueOnce(false); + await user.click(screen.getByRole("button", { name: "Delete job" })); + expect(apiClientMock.deleteJob).not.toHaveBeenCalled(); + + confirmSpy.mockReturnValueOnce(true); + await user.click(screen.getByRole("button", { name: "Delete job" })); + expect(await screen.findByText("delete failed")).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Retry job" })); + expect(await screen.findByText("retry failed")).toBeInTheDocument(); + }, 20000); + + it("covers system, usage, and projects error branches", async () => { + const user = userEvent.setup(); + + apiClientMock.getSystemStatus.mockRejectedValueOnce(new Error("system failed")); + apiClientMock.getUsageSummary.mockRejectedValueOnce(new Error("usage failed")); + apiClientMock.listProjects.mockResolvedValueOnce([{ id: "proj-1", name: "Proj", description: null }]); + apiClientMock.listProjectJobs.mockRejectedValueOnce(new Error("project jobs failed")); + + render(); + + await user.click(screen.getByRole("button", { name: "System" })); + expect(await screen.findByText("system failed")).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Usage" })); + expect(await screen.findByText("usage failed")).toBeInTheDocument(); + + await user.clear(screen.getByLabelText("Soft limit (cents)")); + await user.type(screen.getByLabelText("Soft limit (cents)"), "-1"); + await user.click(screen.getByRole("button", { name: "Save budget policy" })); + expect(await screen.findByText(/Soft limit must be a non-negative number/i)).toBeInTheDocument(); + + await user.click(screen.getByRole("button", { name: "Projects" })); + expect(await screen.findByText("project jobs failed")).toBeInTheDocument(); + }, 20000); + + it("covers quick-start localStorage fallback", async () => { + const user = userEvent.setup(); + const getItemSpy = vi.spyOn(Storage.prototype, "getItem").mockImplementationOnce(() => { + throw new Error("storage blocked"); + }); + + render(); + + expect(await screen.findByText(/Quick start/i)).toBeInTheDocument(); + await user.click(screen.getByRole("button", { name: "Dismiss" })); + + getItemSpy.mockRestore(); + }, 20000); +}); \ No newline at end of file diff --git a/apps/web/src/App.richFlows.test.tsx b/apps/web/src/App.richFlows.test.tsx new file mode 100644 index 00000000..15787f00 --- /dev/null +++ b/apps/web/src/App.richFlows.test.tsx @@ -0,0 +1,371 @@ +import { fireEvent, render, screen, waitFor, within } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const apiClientMock = vi.hoisted(() => ({ + baseUrl: "http://localhost:8000/api/v1", + accessToken: "token" as string | null, + setAccessToken: vi.fn(), + listJobs: vi.fn(), + getJob: vi.fn(), + getAsset: vi.fn(), + listAssets: vi.fn(), + createCaptionJob: vi.fn(), + createTranslateJob: vi.fn(), + createStyledSubtitleJob: vi.fn(), + createShortsJob: vi.fn(), + translateSubtitleAsset: vi.fn(), + mergeAv: vi.fn(), + createCutClipJob: vi.fn(), + getSystemStatus: vi.fn(), + getUsageSummary: vi.fn(), + getBudgetPolicy: vi.fn(), + updateBudgetPolicy: vi.fn(), + listProjects: vi.fn(), + createProject: vi.fn(), + listProjectJobs: vi.fn(), + listProjectAssets: vi.fn(), + createProjectShareLinks: vi.fn(), + listProjectMembers: vi.fn(), + addProjectMember: vi.fn(), + updateProjectMemberRole: vi.fn(), + removeProjectMember: vi.fn(), + listProjectComments: vi.fn(), + createProjectComment: vi.fn(), + deleteProjectComment: vi.fn(), + requestProjectApproval: vi.fn(), + approveProjectApproval: vi.fn(), + rejectProjectApproval: vi.fn(), + listProjectActivity: vi.fn(), + retryJob: vi.fn(), + register: vi.fn(), + login: vi.fn(), + logout: vi.fn(), + getMe: vi.fn(), + getOrgContext: vi.fn(), + createOrgInvite: vi.fn(), + listOrgInvites: vi.fn(), + revokeOrgInvite: vi.fn(), + updateOrgMemberRole: vi.fn(), + removeOrgMember: vi.fn(), + oauthStart: vi.fn(), + listBillingPlans: vi.fn(), + getBillingSubscription: vi.fn(), + getBillingUsageSummary: vi.fn(), + getBillingSeatUsage: vi.fn(), + updateBillingSeatLimit: vi.fn(), + initAssetUpload: vi.fn(), + completeAssetUpload: vi.fn(), + uploadAsset: vi.fn(), + getOrgSsoConfig: vi.fn(), + updateOrgSsoConfig: vi.fn(), + createScimToken: vi.fn(), + revokeScimToken: vi.fn(), + startOktaSso: vi.fn(), + listPublishProviders: vi.fn(), + listPublishConnections: vi.fn(), + listPublishJobs: vi.fn(), + startPublishConnection: vi.fn(), + completePublishConnection: vi.fn(), + revokePublishConnection: vi.fn(), + createPublishJob: vi.fn(), + retryPublishJob: vi.fn(), + jobBundleUrl: (jobId: string) => `http://localhost:8000/api/v1/jobs/${jobId}/bundle`, + mediaUrl: (uri: string) => (uri.startsWith("http") ? uri : `http://localhost:8000${uri}`), +})); + +vi.mock("./api/client", () => ({ apiClient: apiClientMock })); + +import App from "./App"; + +beforeEach(() => { + vi.clearAllMocks(); + localStorage.setItem("reframe_access_token", "token"); + apiClientMock.accessToken = "token"; + + if (!("createObjectURL" in URL)) { + Object.defineProperty(URL, "createObjectURL", { value: vi.fn(() => "blob:mock"), configurable: true }); + } + if (!("revokeObjectURL" in URL)) { + Object.defineProperty(URL, "revokeObjectURL", { value: vi.fn(), configurable: true }); + } + + vi.spyOn(URL, "createObjectURL").mockReturnValue("blob:mock"); + vi.spyOn(URL, "revokeObjectURL").mockImplementation(() => undefined); + vi.spyOn(HTMLAnchorElement.prototype, "click").mockImplementation(() => undefined); + + apiClientMock.listJobs.mockResolvedValue([]); + apiClientMock.listAssets.mockResolvedValue([ + { id: "asset-video-1", kind: "video", uri: "/media/tmp/input.mp4", mime_type: "video/mp4" }, + { id: "asset-sub-1", kind: "subtitle", uri: "/media/tmp/input.srt", mime_type: "text/plain" }, + ]); + apiClientMock.getSystemStatus.mockResolvedValue({ + api_version: "0.1.0", + offline_mode: false, + storage_backend: "LocalStorageBackend", + broker_url: "memory://", + result_backend: "cache+memory://", + worker: { ping_ok: true, workers: ["local-queue"], system_info: { ffmpeg: { present: true, version: "6.1" } } }, + }); + apiClientMock.getUsageSummary.mockResolvedValue({ + total_jobs: 0, + queued_jobs: 0, + running_jobs: 0, + completed_jobs: 0, + failed_jobs: 0, + cancelled_jobs: 0, + job_type_counts: {}, + output_assets_count: 0, + output_duration_seconds: 0, + generated_bytes: 0, + }); + apiClientMock.getBudgetPolicy.mockResolvedValue({ + org_id: "org-1", + monthly_soft_limit_cents: null, + monthly_hard_limit_cents: null, + enforce_hard_limit: false, + current_month_estimated_cost_cents: 0, + projected_status: "on_track", + }); + + apiClientMock.getMe.mockResolvedValue({ + user_id: "user-1", + email: "owner@team.test", + display_name: "Owner", + org_id: "org-1", + org_name: "Team Org", + role: "owner", + }); + apiClientMock.getOrgContext.mockResolvedValue({ + org_id: "org-1", + org_name: "Team Org", + slug: "team-org", + role: "owner", + members: [{ user_id: "user-1", email: "owner@team.test", display_name: "Owner", role: "owner" }], + }); + apiClientMock.listOrgInvites.mockResolvedValue([]); + + apiClientMock.listProjects.mockResolvedValue([{ id: "proj-1", name: "Launch", description: "release" }]); + apiClientMock.listProjectJobs.mockResolvedValue([]); + apiClientMock.listProjectAssets.mockResolvedValue([ + { id: "asset-video-1", kind: "video", uri: "/media/tmp/input.mp4", mime_type: "video/mp4" }, + { id: "asset-sub-1", kind: "subtitle", uri: "/media/tmp/input.srt", mime_type: "text/plain" }, + ]); + apiClientMock.listProjectMembers.mockResolvedValue([]); + apiClientMock.listProjectComments.mockResolvedValue([]); + apiClientMock.listProjectActivity.mockResolvedValue([]); + + apiClientMock.listPublishProviders.mockResolvedValue([{ provider: "youtube", display_name: "YouTube", connected_count: 0 }]); + apiClientMock.listPublishConnections.mockResolvedValue([]); + apiClientMock.listPublishJobs.mockResolvedValue([]); + + apiClientMock.createShortsJob.mockResolvedValue({ id: "job-shorts", job_type: "shorts", status: "queued", progress: 0, payload: {} }); + apiClientMock.createCaptionJob.mockResolvedValue({ id: "job-caption", job_type: "captions", status: "queued", progress: 0, payload: {} }); + apiClientMock.uploadAsset.mockResolvedValue({ id: "asset-video-upload", kind: "video", uri: "/media/tmp/upload.mp4", mime_type: "video/mp4" }); + apiClientMock.createCutClipJob.mockResolvedValue({ id: "job-cut-1", job_type: "cut_clip", status: "queued", progress: 0, payload: {} }); + + apiClientMock.createStyledSubtitleJob + .mockResolvedValueOnce({ id: "job-style-preview", job_type: "style", status: "queued", progress: 0, payload: { preview_seconds: 5 } }) + .mockResolvedValueOnce({ id: "job-style-full", job_type: "style", status: "queued", progress: 0, payload: {} }) + .mockResolvedValue({ id: "job-style-subtitles", job_type: "style", status: "queued", progress: 0, payload: {} }); + + apiClientMock.getJob.mockImplementation(async (jobId: string) => { + if (jobId === "job-shorts") { + return { + id: "job-shorts", + job_type: "shorts", + status: "completed", + progress: 1, + output_asset_id: "asset-manifest", + payload: { + clip_assets: [ + { + id: "clip-1", + asset_id: "asset-clip-1", + subtitle_asset_id: "asset-sub-1", + thumbnail_asset_id: "asset-thumb-1", + thumbnail_uri: "/media/tmp/clip-thumb.jpg", + uri: "/media/tmp/clip.mp4", + subtitle_uri: "/media/tmp/clip.srt", + styled_uri: "/media/tmp/clip-styled.mp4", + style_preset: "TikTok Bold", + start: 1, + end: 9, + duration: 8, + score: 0.91, + }, + ], + }, + }; + } + if (jobId === "job-cut-1") { + return { + id: "job-cut-1", + job_type: "cut_clip", + status: "completed", + progress: 1, + output_asset_id: "asset-cut-1", + payload: { thumbnail_asset_id: "asset-thumb-2", thumbnail_uri: "/media/tmp/clip-thumb-2.jpg", duration: 6.5 }, + }; + } + if (jobId.startsWith("job-style")) { + return { + id: jobId, + job_type: "style", + status: "completed", + progress: 1, + output_asset_id: `${jobId}-asset`, + payload: {}, + }; + } + if (jobId === "job-caption") { + return { + id: "job-caption", + job_type: "captions", + status: "completed", + progress: 1, + output_asset_id: "asset-caption-1", + payload: {}, + }; + } + return { id: jobId, job_type: "unknown", status: "queued", progress: 0, payload: {} }; + }); + + apiClientMock.getAsset.mockImplementation(async (assetId: string) => { + if (assetId === "asset-manifest") { + return { id: assetId, kind: "manifest", uri: "/media/tmp/shorts-manifest.json", mime_type: "application/json" }; + } + if (assetId === "asset-cut-1") { + return { id: assetId, kind: "video", uri: "/media/tmp/clip-recutted.mp4", mime_type: "video/mp4" }; + } + if (assetId === "asset-caption-1") { + return { id: assetId, kind: "subtitle", uri: "/media/tmp/captions.srt", mime_type: "text/plain" }; + } + if (assetId.startsWith("job-style")) { + return { id: assetId, kind: "video", uri: "/media/tmp/styled-output.mp4", mime_type: "video/mp4" }; + } + return { id: assetId, kind: "video", uri: "/media/tmp/default.mp4", mime_type: "video/mp4" }; + }); +}); + +describe("App rich flow coverage", () => { + it("covers shorts result actions and subtitle panel interactions", async () => { + const user = userEvent.setup(); + render(); + + await user.click(screen.getByRole("button", { name: "Shorts" })); + + const uploadInput = document.querySelector("input[type=file]") as HTMLInputElement; + const videoFile = new File(["video"], "clip.mp4", { type: "video/mp4" }); + fireEvent.change(uploadInput, { target: { files: [videoFile] } }); + await waitFor(() => expect(apiClientMock.uploadAsset).toHaveBeenCalled()); + + await user.type(await screen.findByLabelText("Video asset ID or URL"), "asset-video-1"); + await user.click(screen.getByRole("button", { name: "Create shorts job" })); + expect(apiClientMock.createShortsJob).toHaveBeenCalled(); + + expect(await screen.findByRole("button", { name: "Download CSV" }, { timeout: 15000 })).toBeInTheDocument(); + await user.click(screen.getByRole("button", { name: "Download CSV" })); + await user.click(screen.getByRole("button", { name: "Download EDL" })); + + await user.click(screen.getByRole("button", { name: "Apply to all" })); + await user.click(screen.getByRole("button", { name: "Preview 5s" })); + await user.click(screen.getByRole("button", { name: "Render styled" })); + + await user.click(screen.getByRole("button", { name: "Edit" })); + const startInput = screen.getByLabelText("Start (s)"); + const endInput = screen.getByLabelText("End (s)"); + await user.clear(startInput); + await user.type(startInput, "2"); + await user.clear(endInput); + await user.type(endInput, "8"); + await user.click(screen.getByRole("button", { name: "Re-cut clip" })); + await waitFor(() => { + expect(apiClientMock.createCutClipJob).toHaveBeenCalled(); + }); + + expect(apiClientMock.createStyledSubtitleJob).toHaveBeenCalled(); + + await user.click(screen.getByRole("button", { name: "Subtitles" })); + expect(await screen.findByText(/Select assets/i)).toBeInTheDocument(); + + const subtitleSelectSection = screen.getByText("Or pick a recent subtitle asset").closest("label") as HTMLElement; + await user.selectOptions(within(subtitleSelectSection).getByRole("combobox"), "asset-sub-1"); + + const videoSelectSection = screen.getByText("Or pick a recent video asset").closest("label") as HTMLElement; + await user.selectOptions(within(videoSelectSection).getByRole("combobox"), "asset-video-1"); + + await user.click(screen.getByRole("button", { name: "Generate captions from video" })); + expect(apiClientMock.createCaptionJob).toHaveBeenCalled(); + + await user.click(screen.getByRole("button", { name: "Preview 5s" })); + await user.click(screen.getByRole("button", { name: "Render full video" })); + expect(apiClientMock.createStyledSubtitleJob).toHaveBeenCalledTimes(4); + }, 45000); + + it("sweeps visible controls across tabs to exercise broad form branches", async () => { + const user = userEvent.setup(); + render(); + + const tabNames = [ + "Captions", + "Subtitles", + "Styling", + "Shorts", + "Utilities", + "Jobs", + "Usage", + "Projects", + "Workflows", + "Account", + ]; + + const sweepCurrentView = () => { + const root = document.body; + + root.querySelectorAll("textarea").forEach((node) => { + const el = node as HTMLTextAreaElement; + fireEvent.change(el, { target: { value: "coverage wave text" } }); + }); + + root.querySelectorAll("select").forEach((node) => { + const el = node as HTMLSelectElement; + const options = Array.from(el.options).filter((opt) => opt.value); + const value = options.length > 1 ? options[1]?.value : options[0]?.value; + if (value != null) { + fireEvent.change(el, { target: { value } }); + } + }); + + root.querySelectorAll("input").forEach((node) => { + const el = node as HTMLInputElement; + if (el.type === "file") return; + if (el.type === "checkbox") { + fireEvent.click(el); + return; + } + if (el.type === "date") { + fireEvent.change(el, { target: { value: "2026-03-01" } }); + return; + } + if (el.type === "number" || el.type === "range") { + fireEvent.change(el, { target: { value: "2" } }); + return; + } + fireEvent.change(el, { target: { value: "coverage-wave" } }); + }); + }; + + for (const tab of tabNames) { + const btn = screen.queryByRole("button", { name: tab }); + if (!btn) continue; + await user.click(btn); + sweepCurrentView(); + } + + await waitFor(() => { + expect(screen.getByText(/Creative media pipeline/)).toBeInTheDocument(); + }); + }, 45000); + +}); diff --git a/apps/web/src/App.tsx b/apps/web/src/App.tsx index 688133a5..8ae1dacd 100644 --- a/apps/web/src/App.tsx +++ b/apps/web/src/App.tsx @@ -114,7 +114,7 @@ const ORG_MANAGER_ROLES = ["owner", "admin"]; const PUBLISH_PROVIDERS = ["youtube", "tiktok", "instagram", "facebook"] as const; type PublishProvider = (typeof PUBLISH_PROVIDERS)[number]; -async function copyToClipboard(text: string): Promise { +export async function copyToClipboard(text: string): Promise { try { await navigator.clipboard.writeText(text); return true; @@ -137,7 +137,10 @@ async function copyToClipboard(text: string): Promise { } } -function CopyCommandButton({ command, label = "Copy curl" }: { command: string; label?: string }) { +export function CopyCommandButton({ + command, + label = "Copy curl", +}: Readonly<{ command: string; label?: string }>) { const [status, setStatus] = useState(null); const onCopy = async () => { @@ -153,15 +156,29 @@ function CopyCommandButton({ command, label = "Copy curl" }: { command: string; ); } -function TextPreview({ +function normalizeProjectShareLinksResponse(response: unknown): ProjectShareLink[] { + if (Array.isArray(response)) { + return response.filter((item): item is ProjectShareLink => Boolean(item && typeof item === "object")); + } + + if (response && typeof response === "object" && "links" in response) { + const maybeLinks = (response as { links?: unknown }).links; + if (Array.isArray(maybeLinks)) { + return maybeLinks.filter((item): item is ProjectShareLink => Boolean(item && typeof item === "object")); + } + } + + return []; +} +export function TextPreview({ url, title, maxChars = 12000, -}: { +}: Readonly<{ url: string; title: string; maxChars?: number; -}) { +}>) { const [content, setContent] = useState(""); const [loading, setLoading] = useState(false); const [error, setError] = useState(null); @@ -243,7 +260,7 @@ function useLiveJobs() { return { jobs, loading, error, refresh }; } -function JobStatusPill({ status }: { status: JobStatus }) { +export function JobStatusPill({ status }: Readonly<{ status: JobStatus }>) { const toneMap: Record = { queued: "neutral", running: "info", @@ -254,15 +271,15 @@ function JobStatusPill({ status }: { status: JobStatus }) { return {status}; } -function CaptionsForm({ +export function CaptionsForm({ onCreated, initialVideoId, projectId, -}: { +}: Readonly<{ onCreated: (job: Job) => void; initialVideoId?: string; projectId?: string; -}) { +}>) { const [videoId, setVideoId] = useState(initialVideoId || ""); const [sourceLang, setSourceLang] = useState("auto"); const [backend, setBackend] = useState("faster_whisper"); @@ -427,7 +444,7 @@ function CaptionsForm({ ); } -function TranslateForm({ onCreated, projectId }: { onCreated: (job: Job) => void; projectId?: string }) { +export function TranslateForm({ onCreated, projectId }: Readonly<{ onCreated: (job: Job) => void; projectId?: string }>) { const [subtitleId, setSubtitleId] = useState(""); const [targetLang, setTargetLang] = useState("es"); const [notes, setNotes] = useState(""); @@ -488,15 +505,15 @@ function TranslateForm({ onCreated, projectId }: { onCreated: (job: Job) => void ); } -function UploadPanel({ +export function UploadPanel({ onAssetId, onPreview, projectId, -}: { +}: Readonly<{ onAssetId: (id: string) => void; onPreview: (url: string | null) => void; projectId?: string; -}) { +}>) { const [uploading, setUploading] = useState(false); const [error, setError] = useState(null); const inputRef = useRef(null); @@ -545,15 +562,15 @@ function UploadPanel({ ); } -function AudioUploadPanel({ +export function AudioUploadPanel({ onAssetId, onPreview, projectId, -}: { +}: Readonly<{ onAssetId: (id: string) => void; onPreview: (url: string | null) => void; projectId?: string; -}) { +}>) { const [uploading, setUploading] = useState(false); const [error, setError] = useState(null); const inputRef = useRef(null); @@ -595,17 +612,17 @@ function AudioUploadPanel({ ); } -function SubtitleUpload({ +export function SubtitleUpload({ onAssetId, onPreview, label = "Upload subtitles (SRT/VTT)", projectId, -}: { +}: Readonly<{ onAssetId: (id: string) => void; onPreview: (url: string | null, name?: string | null) => void; label?: string; projectId?: string; -}) { +}>) { const [uploading, setUploading] = useState(false); const [error, setError] = useState(null); @@ -641,15 +658,15 @@ function SubtitleUpload({ ); } -function SubtitleEditorCard({ +export function SubtitleEditorCard({ initialAssetId, onAssetChosen, projectId, -}: { +}: Readonly<{ initialAssetId?: string; onAssetChosen: (asset: MediaAsset) => void; projectId?: string; -}) { +}>) { const [assetId, setAssetId] = useState(initialAssetId || ""); const [contents, setContents] = useState(""); const [original, setOriginal] = useState(null); @@ -965,7 +982,7 @@ function SubtitleEditorCard({ ); } -function SubtitleToolsForm({ onCreated, projectId }: { onCreated: (job: Job, bilingual: boolean) => void; projectId?: string }) { +export function SubtitleToolsForm({ onCreated, projectId }: Readonly<{ onCreated: (job: Job, bilingual: boolean) => void; projectId?: string }>) { const [subtitleId, setSubtitleId] = useState(""); const [targetLang, setTargetLang] = useState("es"); const [bilingual, setBilingual] = useState(false); @@ -1037,17 +1054,17 @@ function SubtitleToolsForm({ onCreated, projectId }: { onCreated: (job: Job, bil ); } -function MergeAvForm({ +export function MergeAvForm({ onCreated, initialVideoId, initialAudioId, projectId, -}: { +}: Readonly<{ onCreated: (job: Job) => void; initialVideoId?: string; initialAudioId?: string; projectId?: string; -}) { +}>) { const [videoId, setVideoId] = useState(initialVideoId || ""); const [audioId, setAudioId] = useState(initialAudioId || ""); const [offset, setOffset] = useState(0); @@ -1140,7 +1157,7 @@ function MergeAvForm({ ); } -function ShortsForm({ onCreated, projectId }: { onCreated: (job: Job) => void; projectId?: string }) { +export function ShortsForm({ onCreated, projectId }: Readonly<{ onCreated: (job: Job) => void; projectId?: string }>) { const [videoId, setVideoId] = useState(""); const [numClips, setNumClips] = useState(3); const [minDuration, setMinDuration] = useState(10); @@ -1358,19 +1375,19 @@ function ShortsForm({ onCreated, projectId }: { onCreated: (job: Job) => void; p ); } -function StyleEditor({ +export function StyleEditor({ onPreview, onRender, onJobCreated, videoId, subtitleId, -}: { +}: Readonly<{ onPreview: (payload: any) => Promise | void; onRender: (payload: any) => Promise | void; onJobCreated?: (job: Job) => void; videoId: string; subtitleId: string; -}) { +}>) { const [font, setFont] = useState(FONTS[0]); const [fontSize, setFontSize] = useState(42); const [textColor, setTextColor] = useState("#ffffff"); @@ -1493,8 +1510,8 @@ function StyleEditor({ ); } - - function AppShell() { +// NOSONAR: AppShell currently orchestrates all product tabs and is decomposed in follow-up coverage/refactor slices. +export function AppShell() { // NOSONAR: UI orchestration shell intentionally coordinates all tabs in one component. const [active, setActive] = useState(NAV_ITEMS[0].id); const [theme, setTheme] = useState<"light" | "dark">("dark"); const [showSettings, setShowSettings] = useState(false); @@ -2446,7 +2463,7 @@ function StyleEditor({ asset_ids: assetIds, expires_in_hours: 24, }); - setShareLinks(response.links); + setShareLinks(normalizeProjectShareLinksResponse(response)); } catch (err) { setProjectDataError(err instanceof Error ? err.message : "Failed to generate share link"); } finally { @@ -5505,3 +5522,6 @@ function App() { } export default App; + + + diff --git a/apps/web/src/api/client.test.ts b/apps/web/src/api/client.test.ts new file mode 100644 index 00000000..e2f6c4bc --- /dev/null +++ b/apps/web/src/api/client.test.ts @@ -0,0 +1,361 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { ApiClient } from "./client"; + +function okJson(body: unknown, status = 200) { + return { + ok: true, + status, + statusText: "OK", + json: async () => body, + text: async () => JSON.stringify(body), + } as unknown as Response; +} + +function failJson(message: string, status = 400, statusText = "Bad Request") { + return { + ok: false, + status, + statusText, + json: async () => ({ message }), + text: async () => message, + } as unknown as Response; +} + +describe("ApiClient", () => { + const fetcher = vi.fn(); + let client: ApiClient; + + beforeEach(() => { + fetcher.mockReset(); + client = new ApiClient({ baseUrl: "http://localhost:8000/api/v1", fetcher: fetcher as unknown as typeof fetch }); + }); + + it("handles request headers, auth, errors, and 204 responses", async () => { + fetcher.mockResolvedValueOnce(okJson({ ok: true })); + await client.request("/jobs"); + + const [url1, init1] = fetcher.mock.calls[0] as [string, RequestInit]; + expect(url1).toBe("http://localhost:8000/api/v1/jobs"); + expect(new Headers(init1.headers).get("Content-Type")).toBe("application/json"); + + client.setAccessToken("abc"); + fetcher.mockResolvedValueOnce(okJson({ ok: true })); + await client.request("/auth/me"); + const [, init2] = fetcher.mock.calls[1] as [string, RequestInit]; + expect(new Headers(init2.headers).get("Authorization")).toBe("Bearer abc"); + + fetcher.mockResolvedValueOnce({ ok: true, status: 204, statusText: "No Content", json: async () => ({}) } as unknown as Response); + await expect(client.request("/auth/logout", { method: "POST" })).resolves.toBeUndefined(); + + fetcher.mockResolvedValueOnce(failJson("custom fail")); + await expect(client.request("/bad")).rejects.toThrow("custom fail"); + + fetcher.mockResolvedValueOnce({ + ok: false, + status: 500, + statusText: "Server Error", + json: async () => { + throw new Error("bad json"); + }, + text: async () => "", + } as unknown as Response); + await expect(client.request("/bad2")).rejects.toThrow("Server Error"); + }); + + it("executes high-surface request wrappers", async () => { + const requestSpy = vi.spyOn(client, "request").mockResolvedValue({} as never); + + await client.listJobs({ status: "running", project_id: "p1" }); + await client.listJobs(); + await client.getJob("job-1"); + await client.getAsset("asset-1"); + await client.listAssets({ kind: "video", limit: 10, project_id: "p1" }); + await client.listAssets(); + + await client.createCaptionJob({ video_asset_id: "v", idempotency_key: "k" }); + await client.createCaptionJob({ video_asset_id: "v" }); + await client.createTranslateJob({ subtitle_asset_id: "s", target_language: "fr", idempotency_key: "k" }); + await client.createTranslateJob({ subtitle_asset_id: "s", target_language: "fr" }); + await client.createStyledSubtitleJob({ video_asset_id: "v", subtitle_asset_id: "s", style: {}, idempotency_key: "k" }); + await client.createStyledSubtitleJob({ video_asset_id: "v", subtitle_asset_id: "s", style: {} }); + await client.createShortsJob({ video_asset_id: "v", idempotency_key: "k" }); + await client.createShortsJob({ video_asset_id: "v" }); + await client.translateSubtitleAsset({ subtitle_asset_id: "s", target_language: "es", idempotency_key: "k" }); + await client.translateSubtitleAsset({ subtitle_asset_id: "s", target_language: "es" }); + await client.mergeAv({ video_asset_id: "v", audio_asset_id: "a", idempotency_key: "k" }); + await client.mergeAv({ video_asset_id: "v", audio_asset_id: "a" }); + await client.createCutClipJob({ video_asset_id: "v", start: 0, end: 1, idempotency_key: "k" }); + await client.createCutClipJob({ video_asset_id: "v", start: 0, end: 1 }); + await client.retryJob("job-1", { idempotency_key: "k" }); + await client.retryJob("job-1"); + + await client.getSystemStatus(); + await client.getUsageSummary({ from: "2026-01-01", to: "2026-02-01", project_id: "p1" }); + await client.getUsageSummary(); + await client.getUsageCosts({ from: "2026-01-01", to: "2026-02-01", project_id: "p1" }); + await client.getUsageCosts(); + await client.getBudgetPolicy(); + await client.updateBudgetPolicy({ enforce_hard_limit: true }); + + await client.listProjects(); + await client.createProject({ name: "n" }); + await client.getProject("p1"); + await client.listProjectJobs("p1"); + await client.listProjectAssets("p1", { kind: "video", limit: 10 }); + await client.listProjectAssets("p1"); + await client.listProjectMembers("p1"); + await client.addProjectMember("p1", { email: "a@b.com", role: "editor" }); + await client.updateProjectMemberRole("p1", "u1", { role: "viewer" }); + await client.listProjectComments("p1"); + await client.createProjectComment("p1", { body: "hi" }); + await client.requestProjectApproval("p1", { summary: "ok" }); + await client.requestProjectApproval("p1"); + await client.approveProjectApproval("p1", "a1"); + await client.rejectProjectApproval("p1", "a1"); + await client.listProjectActivity("p1", 25); + await client.createProjectShareLinks("p1", { asset_ids: ["a1"] }); + + await client.initAssetUpload({ filename: "f", mime_type: "video/mp4" }); + await client.completeAssetUpload({ upload_id: "u1", asset_id: "a1" }); + await client.initMultipartAssetUpload({ filename: "f" }); + await client.signMultipartUploadPart("u1", 1); + await client.completeMultipartUpload("u1", { parts: [{ part_number: 1, etag: "x" }] }); + await client.abortMultipartUpload("u1"); + + await client.register({ email: "a@b.com", password: "pw" }); + await client.login({ email: "a@b.com", password: "pw" }); + await client.refreshToken("rt"); + await client.logout(); + await client.getMe(); + await client.oauthStart("google", "http://localhost/cb"); + await client.oauthStart("github"); + + await client.getOrgContext(); + await client.listOrgs(); + await client.createOrg({ name: "org" }); + await client.getOrgSsoConfig("org-1"); + await client.updateOrgSsoConfig("org-1", { enabled: true }); + await client.createScimToken("org-1", { scopes: ["Users"] }); + await client.createScimToken("org-1"); + await client.startOktaSso("http://localhost/cb"); + await client.startOktaSso(); + await client.completeOktaSso({ state: "s", code: "c", email: "a@b.com", sub: "sub", groups: "admins" }); + await client.completeOktaSso({ state: "s" }); + + await client.listOrgInvites(); + await client.createOrgInvite({ email: "x@y.com", role: "editor", expires_in_days: 7 }); + await client.revokeOrgInvite("inv-1"); + await client.resolveOrgInvite("tok"); + await client.acceptOrgInvite({ token: "tok" }); + await client.updateOrgMemberRole("u1", { role: "owner" }); + await client.addOrgMember("org-1", { email: "m@x.com" }); + + await client.listAuditEvents(); + await client.listApiKeys("org-1"); + await client.createApiKey("org-1", { name: "k" }); + + await client.createWorkflowTemplate({ name: "wf", steps: [] }); + await client.listWorkflowTemplates(true); + await client.listWorkflowTemplates(false); + await client.createWorkflowRun({ template_id: "t1", video_asset_id: "a1" }); + await client.getWorkflowRun("r1"); + await client.cancelWorkflowRun("r1"); + + await client.listPublishProviders(); + await client.listPublishConnections("youtube"); + await client.startPublishConnection("youtube", "http://localhost/cb"); + await client.startPublishConnection("tiktok"); + await client.completePublishConnection("youtube", { + state: "s", + code: "code", + refresh_token: "r", + account_id: "acct", + account_label: "label", + }); + await client.completePublishConnection("facebook", { state: "s" }); + await client.createPublishJob({ provider: "youtube", connection_id: "c", asset_id: "a" }); + await client.listPublishJobs({ provider: "youtube", status: "queued" }); + await client.listPublishJobs(); + await client.getPublishJob("p1"); + await client.retryPublishJob("p1"); + + await client.listBillingPlans(); + await client.getBillingSubscription(); + await client.getBillingUsageSummary(); + await client.getBillingSeatUsage(); + await client.getBillingCostModel(); + await client.createBillingCheckoutSession({ plan_code: "starter" }); + await client.updateBillingSeatLimit({ seat_limit: 8 }); + await client.createBillingPortalSession({ return_url: "http://localhost" }); + await client.createBillingPortalSession(); + + expect(requestSpy).toHaveBeenCalled(); + }); + + it("covers direct delete/revoke method success and failures", async () => { + client.setAccessToken("abc"); + + fetcher.mockResolvedValue(okJson({}, 200)); + await client.removeProjectMember("p1", "u1"); + await client.deleteProjectComment("p1", "c1"); + await client.revokeScimToken("org-1", "tok-1"); + await client.removeOrgMemberFromOrg("org-1", "u1"); + await client.removeOrgMember("u1"); + await client.revokeApiKey("org-1", "k1"); + await client.revokePublishConnection("youtube", "pc1"); + await client.deleteJob("job-1", { deleteAssets: true }); + await client.deleteJob("job-2"); + await client.deleteAsset("asset-1"); + + fetcher.mockResolvedValueOnce({ ok: false, status: 500, statusText: "", text: async () => "", json: async () => ({}) } as unknown as Response); + await expect(client.removeProjectMember("p1", "u1")).rejects.toThrow("Failed to remove project member"); + + fetcher.mockResolvedValueOnce({ ok: false, status: 500, statusText: "oops", text: async () => { throw new Error("x"); }, json: async () => ({}) } as unknown as Response); + await expect(client.deleteProjectComment("p1", "c1")).rejects.toThrow("oops"); + }); + + it("covers uploadAsset POST, PUT, and unsupported method paths", async () => { + client.setAccessToken("token"); + const file = new File(["video"], "clip.mp4", { type: "video/mp4" }); + + vi.spyOn(client, "initAssetUpload").mockResolvedValueOnce({ + upload_id: "u1", + asset_id: null, + upload_url: "http://localhost:8000/api/v1/assets/upload", + method: "POST", + headers: {}, + form_fields: { acl: "private" }, + expires_at: "2026-03-04T00:00:00Z", + strategy: "presigned_post", + }); + vi.spyOn(client, "completeAssetUpload").mockResolvedValue({ upload_id: "u1", asset_id: "a1" }); + + fetcher.mockResolvedValueOnce(okJson({ id: "a1", kind: "video" })); + const postAsset = await client.uploadAsset(file, "video"); + expect(postAsset.id).toBe("a1"); + + vi.spyOn(client, "initAssetUpload").mockResolvedValueOnce({ + upload_id: "u2", + asset_id: "a2", + upload_url: "https://storage.example/upload", + method: "PUT", + headers: {}, + form_fields: {}, + expires_at: "2026-03-04T00:00:00Z", + strategy: "presigned_put", + }); + vi.spyOn(client, "getAsset").mockResolvedValue({ id: "a2", kind: "video" } as never); + fetcher.mockResolvedValueOnce(okJson({}, 200)); + const putAsset = await client.uploadAsset(file, "video"); + expect(putAsset.id).toBe("a2"); + + vi.spyOn(client, "initAssetUpload").mockResolvedValueOnce({ + upload_id: "u3", + asset_id: "a3", + upload_url: "https://storage.example/upload", + method: "PATCH", + headers: {}, + form_fields: {}, + expires_at: "2026-03-04T00:00:00Z", + strategy: "unsupported", + }); + await expect(client.uploadAsset(file, "video")).rejects.toThrow("Unsupported upload method: PATCH"); + + expect(client.mediaUrl("https://cdn.example/file.mp4")).toBe("https://cdn.example/file.mp4"); + expect(client.mediaUrl("/media/out.mp4")).toContain("/media/out.mp4"); + + const malformed = new ApiClient({ baseUrl: "bad-url", fetcher: fetcher as unknown as typeof fetch }); + expect(malformed.mediaUrl("/asset")).toContain("/asset"); + expect(client.jobBundleUrl("job-1")).toBe("http://localhost:8000/api/v1/jobs/job-1/bundle"); + }); + + it("covers revoke/delete/upload failure branches across enterprise helpers", async () => { + client.setAccessToken("token"); + const file = new File(["video"], "clip.mp4", { type: "video/mp4" }); + + const failWithText = (message: string, status = 500, statusText = "Server Error") => + ({ + ok: false, + status, + statusText, + text: async () => message, + json: async () => ({ message }), + }) as unknown as Response; + + const failWithThrownText = (status = 500, statusText = "fallback-status") => + ({ + ok: false, + status, + statusText, + text: async () => { + throw new Error("text failed"); + }, + json: async () => ({}), + }) as unknown as Response; + + fetcher.mockResolvedValueOnce(failWithText("scim revoke failed")); + await expect(client.revokeScimToken("org-1", "tok-1")).rejects.toThrow("scim revoke failed"); + + fetcher.mockResolvedValueOnce(failWithThrownText(500, "remove-org-member-status")); + await expect(client.removeOrgMemberFromOrg("org-1", "user-1")).rejects.toThrow( + "remove-org-member-status", + ); + + fetcher.mockResolvedValueOnce(failWithText("remove member failed")); + await expect(client.removeOrgMember("user-1")).rejects.toThrow("remove member failed"); + + fetcher.mockResolvedValueOnce(failWithThrownText(500, "revoke-api-key-status")); + await expect(client.revokeApiKey("org-1", "key-1")).rejects.toThrow("revoke-api-key-status"); + + fetcher.mockResolvedValueOnce(failWithText("revoke publish failed")); + await expect(client.revokePublishConnection("youtube", "conn-1")).rejects.toThrow( + "revoke publish failed", + ); + + fetcher.mockResolvedValueOnce(failWithThrownText(500, "delete-job-status")); + await expect(client.deleteJob("job-1")).rejects.toThrow("delete-job-status"); + + fetcher.mockResolvedValueOnce(failWithText("delete-asset-failed")); + await expect(client.deleteAsset("asset-1")).rejects.toThrow("delete-asset-failed"); + + vi.spyOn(client, "initAssetUpload").mockResolvedValueOnce({ + upload_id: "u-post-fail", + asset_id: null, + upload_url: "http://localhost:8000/api/v1/assets/upload", + method: "POST", + headers: {}, + form_fields: {}, + expires_at: "2026-03-04T00:00:00Z", + strategy: "presigned_post", + }); + fetcher.mockResolvedValueOnce(failWithText("post upload failed")); + await expect(client.uploadAsset(file, "video")).rejects.toThrow("post upload failed"); + + vi.spyOn(client, "initAssetUpload").mockResolvedValueOnce({ + upload_id: "u-put-fail", + asset_id: "asset-put", + upload_url: "https://storage.example/upload", + method: "PUT", + headers: {}, + form_fields: {}, + expires_at: "2026-03-04T00:00:00Z", + strategy: "presigned_put", + }); + fetcher.mockResolvedValueOnce(failWithThrownText(500, "put-upload-status")); + await expect(client.uploadAsset(file, "video")).rejects.toThrow("put-upload-status"); + + vi.spyOn(client, "initAssetUpload").mockResolvedValueOnce({ + upload_id: "u-put-missing-asset", + asset_id: null, + upload_url: "https://storage.example/upload", + method: "PUT", + headers: {}, + form_fields: {}, + expires_at: "2026-03-04T00:00:00Z", + strategy: "presigned_put", + }); + fetcher.mockResolvedValueOnce(okJson({}, 200)); + await expect(client.uploadAsset(file, "video")).rejects.toThrow("Upload session missing asset_id"); + }); +}); + diff --git a/apps/web/src/components/ErrorBoundary.test.tsx b/apps/web/src/components/ErrorBoundary.test.tsx new file mode 100644 index 00000000..d71c2719 --- /dev/null +++ b/apps/web/src/components/ErrorBoundary.test.tsx @@ -0,0 +1,34 @@ +import { render, screen } from "@testing-library/react"; +import { describe, expect, it, vi } from "vitest"; +import { ErrorBoundary } from "./ErrorBoundary"; + +function Thrower(): never { + throw new Error("boom"); +} + +describe("ErrorBoundary", () => { + it("renders children when no error occurs", () => { + render( + +
healthy child
+
, + ); + + expect(screen.getByText("healthy child")).toBeInTheDocument(); + }); + + it("renders fallback when child throws", () => { + const spy = vi.spyOn(console, "error").mockImplementation(() => {}); + + render( + + + , + ); + + expect(screen.getByText("Something went wrong")).toBeInTheDocument(); + expect(screen.getByText("Refresh the page or try again later.")).toBeInTheDocument(); + + spy.mockRestore(); + }); +}); diff --git a/apps/web/src/components/SettingsModal.test.tsx b/apps/web/src/components/SettingsModal.test.tsx new file mode 100644 index 00000000..eceb637f --- /dev/null +++ b/apps/web/src/components/SettingsModal.test.tsx @@ -0,0 +1,38 @@ +import { render, screen } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { describe, expect, it, vi } from "vitest"; +import { SettingsModal } from "./SettingsModal"; + +describe("SettingsModal", () => { + it("updates fields and closes via action buttons", async () => { + const user = userEvent.setup(); + const onClose = vi.fn(); + + render(); + + const modelInput = screen.getByLabelText("Preferred model") as HTMLInputElement; + const languageInput = screen.getByLabelText("Language") as HTMLInputElement; + const outputPathInput = screen.getByLabelText("Default output path") as HTMLInputElement; + const notesInput = screen.getByLabelText("Notes") as HTMLTextAreaElement; + + await user.clear(modelInput); + await user.type(modelInput, "whisper-small"); + await user.clear(languageInput); + await user.type(languageInput, "en"); + await user.clear(outputPathInput); + await user.type(outputPathInput, "/tmp/out"); + await user.type(notesInput, "keep this profile"); + + expect(modelInput.value).toBe("whisper-small"); + expect(languageInput.value).toBe("en"); + expect(outputPathInput.value).toBe("/tmp/out"); + expect(notesInput.value).toContain("keep this profile"); + + await user.click(screen.getByRole("button", { name: "Close settings" })); + await user.click(screen.getByRole("button", { name: "Cancel" })); + await user.click(screen.getByRole("button", { name: "Save" })); + + expect(onClose).toHaveBeenCalledTimes(3); + }); +}); + diff --git a/apps/web/src/main.test.tsx b/apps/web/src/main.test.tsx new file mode 100644 index 00000000..f8c12c36 --- /dev/null +++ b/apps/web/src/main.test.tsx @@ -0,0 +1,31 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const renderMock = vi.fn(); +const createRootMock = vi.fn(() => ({ render: renderMock })); + +vi.mock("react-dom/client", () => ({ + default: { + createRoot: createRootMock, + }, + createRoot: createRootMock, +})); + +vi.mock("./App", () => ({ + default: () => null, +})); + +describe("main entrypoint", () => { + beforeEach(() => { + vi.resetModules(); + createRootMock.mockClear(); + renderMock.mockClear(); + document.body.innerHTML = '
'; + }); + + it("creates root and renders App", async () => { + await import("./main"); + + expect(createRootMock).toHaveBeenCalledTimes(1); + expect(renderMock).toHaveBeenCalledTimes(1); + }); +}); diff --git a/apps/web/vite.config.ts b/apps/web/vite.config.ts index 599fd92a..b94fdeda 100644 --- a/apps/web/vite.config.ts +++ b/apps/web/vite.config.ts @@ -23,13 +23,6 @@ export default defineConfig({ "src/**/*.test.ts", "src/**/*.test.tsx", "e2e/**", - "src/main.tsx", - "src/App.tsx", - "src/api/client.ts", - "src/components/SettingsModal.tsx", - "src/components/ErrorBoundary.tsx", - "src/subtitles/**", - "src/shorts/**", ], thresholds: { lines: 100, diff --git a/codecov.yml b/codecov.yml index 496564cb..07be4a1f 100644 --- a/codecov.yml +++ b/codecov.yml @@ -16,29 +16,20 @@ coverage: threshold: 0% ignore: - - "apps/api/**" - - "services/worker/**" - - "packages/media-core/**" - - "scripts/**" - - "apps/api/tests/**" - - "services/worker/test_*.py" - - "packages/media-core/test_*.py" - - "packages/media-core/tests/**" + - ".venv/**" + - "**/__pycache__/**" + - "**/*.pyc" + - ".github/**" + - "docs/**" + - "infra/**" - "apps/web/e2e/**" - - "apps/web/vite.config.ts" - "apps/web/playwright.config.ts" - "apps/web/browserstack.yml" + - "apps/web/src/test/**" - "apps/web/src/**/*.test.ts" - "apps/web/src/**/*.test.tsx" - "apps/desktop/src/**/*.test.ts" - "apps/desktop/vitest.config.ts" - - "apps/web/src/App.tsx" - - "apps/web/src/api/client.ts" - - "apps/web/src/main.tsx" - - "apps/web/src/components/SettingsModal.tsx" - - "apps/web/src/components/ErrorBoundary.tsx" - - "apps/web/src/subtitles/**" - - "apps/web/src/shorts/**" comment: layout: "reach,diff,flags,files" diff --git a/docs/plans/2026-03-04-coverage-desktop-wave-baseline.json b/docs/plans/2026-03-04-coverage-desktop-wave-baseline.json new file mode 100644 index 00000000..6327bebe --- /dev/null +++ b/docs/plans/2026-03-04-coverage-desktop-wave-baseline.json @@ -0,0 +1,159 @@ +{ + "timestamp_utc": "2026-03-04T04:16:45.2440247Z", + "branch": "feat/coverage-truth-desktop-product-2026-03-04", + "head_sha": "8db2a7ca6270383ae803d5df33624e36636bb733", + "codecov_ignore_patterns": [ + "- \"apps/api/**\"", + "- \"services/worker/**\"", + "- \"packages/media-core/**\"", + "- \"scripts/**\"", + "- \"apps/api/tests/**\"", + "- \"services/worker/test_*.py\"", + "- \"packages/media-core/test_*.py\"", + "- \"packages/media-core/tests/**\"", + "- \"apps/web/e2e/**\"", + "- \"apps/web/vite.config.ts\"", + "- \"apps/web/playwright.config.ts\"", + "- \"apps/web/browserstack.yml\"", + "- \"apps/web/src/**/*.test.ts\"", + "- \"apps/web/src/**/*.test.tsx\"", + "- \"apps/desktop/src/**/*.test.ts\"", + "- \"apps/desktop/vitest.config.ts\"", + "- \"apps/web/src/App.tsx\"", + "- \"apps/web/src/api/client.ts\"", + "- \"apps/web/src/main.tsx\"", + "- \"apps/web/src/components/SettingsModal.tsx\"", + "- \"apps/web/src/components/ErrorBoundary.tsx\"", + "- \"apps/web/src/subtitles/**\"", + "- \"apps/web/src/shorts/**\"" + ], + "web_coverage_excludes": [ + "\"src/test/**\",", + "\"src/**/*.test.ts\",", + "\"src/**/*.test.tsx\",", + "\"e2e/**\",", + "\"src/main.tsx\",", + "\"src/App.tsx\",", + "\"src/api/client.ts\",", + "\"src/components/SettingsModal.tsx\",", + "\"src/components/ErrorBoundary.tsx\",", + "\"src/subtitles/**\",", + "\"src/shorts/**\"," + ], + "desktop_thresholds": [ + "lines: 100,", + "functions: 100,", + "branches: 0,", + "statements: 100," + ], + "desktop_release": { + "published_at": "2026-03-03T00:41:30Z", + "url": "https://github.com/Prekzursil/Reframe/releases/tag/desktop-v0.1.8", + "prerelease": true, + "tag": "desktop-v0.1.8", + "assets": [ + { + "name": "latest.json", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/latest.json", + "size": 6543, + "downloadCount": 0 + }, + { + "name": "Reframe-0.1.8-1.x86_64.rpm", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe-0.1.8-1.x86_64.rpm", + "size": 5697392, + "downloadCount": 0 + }, + { + "name": "Reframe-0.1.8-1.x86_64.rpm.sig", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe-0.1.8-1.x86_64.rpm.sig", + "size": 416, + "downloadCount": 0 + }, + { + "name": "Reframe_0.1.8_aarch64.dmg", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_aarch64.dmg", + "size": 4611944, + "downloadCount": 0 + }, + { + "name": "Reframe_0.1.8_amd64.AppImage", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_amd64.AppImage", + "size": 82962936, + "downloadCount": 0 + }, + { + "name": "Reframe_0.1.8_amd64.AppImage.sig", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_amd64.AppImage.sig", + "size": 420, + "downloadCount": 0 + }, + { + "name": "Reframe_0.1.8_amd64.deb", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_amd64.deb", + "size": 5697476, + "downloadCount": 0 + }, + { + "name": "Reframe_0.1.8_amd64.deb.sig", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_amd64.deb.sig", + "size": 412, + "downloadCount": 0 + }, + { + "name": "Reframe_0.1.8_x64-setup.exe", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64-setup.exe", + "size": 2946411, + "downloadCount": 1 + }, + { + "name": "Reframe_0.1.8_x64-setup.exe.sig", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64-setup.exe.sig", + "size": 416, + "downloadCount": 0 + }, + { + "name": "Reframe_0.1.8_x64.dmg", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64.dmg", + "size": 4781642, + "downloadCount": 0 + }, + { + "name": "Reframe_0.1.8_x64_en-US.msi", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64_en-US.msi", + "size": 4382720, + "downloadCount": 0 + }, + { + "name": "Reframe_0.1.8_x64_en-US.msi.sig", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64_en-US.msi.sig", + "size": 416, + "downloadCount": 0 + }, + { + "name": "Reframe_aarch64.app.tar.gz", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_aarch64.app.tar.gz", + "size": 4654498, + "downloadCount": 0 + }, + { + "name": "Reframe_aarch64.app.tar.gz.sig", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_aarch64.app.tar.gz.sig", + "size": 404, + "downloadCount": 0 + }, + { + "name": "Reframe_x64.app.tar.gz", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_x64.app.tar.gz", + "size": 4785003, + "downloadCount": 0 + }, + { + "name": "Reframe_x64.app.tar.gz.sig", + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_x64.app.tar.gz.sig", + "size": 404, + "downloadCount": 0 + } + ] + } +} diff --git a/docs/plans/2026-03-04-coverage-desktop-wave-baseline.md b/docs/plans/2026-03-04-coverage-desktop-wave-baseline.md new file mode 100644 index 00000000..83d17255 --- /dev/null +++ b/docs/plans/2026-03-04-coverage-desktop-wave-baseline.md @@ -0,0 +1,23 @@ +# Coverage + Desktop Wave Baseline (2026-03-04) + +## Branch and Commit +- branch: $branch +- head: $head + +## Coverage Truth Baseline +- codecov.yml currently ignored major first-party paths (pps/api/**, services/worker/**, packages/media-core/**, scripts/**) and key web source files. +- pps/web/vite.config.ts currently excluded core product modules from coverage. +- pps/desktop/vitest.config.ts currently had ranches: 0 while other thresholds were 100. + +## Desktop Release Baseline +- release: $(@{assets=System.Object[]; isPrerelease=True; publishedAt=03/03/2026 00:41:30; tagName=desktop-v0.1.8; url=https://github.com/Prekzursil/Reframe/releases/tag/desktop-v0.1.8}.tagName) +- prerelease: $(@{assets=System.Object[]; isPrerelease=True; publishedAt=03/03/2026 00:41:30; tagName=desktop-v0.1.8; url=https://github.com/Prekzursil/Reframe/releases/tag/desktop-v0.1.8}.isPrerelease) +- published at: $(@{assets=System.Object[]; isPrerelease=True; publishedAt=03/03/2026 00:41:30; tagName=desktop-v0.1.8; url=https://github.com/Prekzursil/Reframe/releases/tag/desktop-v0.1.8}.publishedAt) +- url: https://github.com/Prekzursil/Reframe/releases/tag/desktop-v0.1.8 +- asset count: 17 + +### Windows assets present +- `Reframe_0.1.8_x64-setup.exe` -> https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64-setup.exe +- `Reframe_0.1.8_x64-setup.exe.sig` -> https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64-setup.exe.sig +- `Reframe_0.1.8_x64_en-US.msi` -> https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64_en-US.msi +- `Reframe_0.1.8_x64_en-US.msi.sig` -> https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64_en-US.msi.sig diff --git a/docs/plans/2026-03-04-coverage-truth-baseline.json b/docs/plans/2026-03-04-coverage-truth-baseline.json new file mode 100644 index 00000000..50834ef7 --- /dev/null +++ b/docs/plans/2026-03-04-coverage-truth-baseline.json @@ -0,0 +1,105 @@ +{ + "components": [ + { + "covered": 6627, + "files": 73, + "name": "python", + "path": "coverage\\python-coverage.xml", + "percent": 63.923989582328545, + "total": 10367 + }, + { + "covered": 1475, + "files": 11, + "name": "web", + "path": "apps\\web\\coverage\\lcov.info", + "percent": 65.84821428571429, + "total": 2240 + }, + { + "covered": 197, + "files": 2, + "name": "desktop-ts", + "path": "apps\\desktop\\coverage\\lcov.info", + "percent": 100.0, + "total": 197 + } + ], + "findings": [ + "python coverage below 100%: 63.92% (6627/10367)", + "web coverage below 100%: 65.85% (1475/2240)", + "combined coverage below 100%: 64.82% (8299/12804)", + "coverage inventory missing files: 5", + "missing: apps/api/app/__init__.py", + "missing: apps/desktop/src-tauri/src/lib.rs", + "missing: apps/desktop/src-tauri/src/main.rs", + "missing: packages/media-core/src/media_core/__init__.py", + "missing: services/worker/__init__.py", + "coverage inventory uncovered files: 58", + "uncovered: apps/api/app/api.py (997/1443, 69.09%)", + "uncovered: apps/api/app/auth_api.py (436/663, 65.76%)", + "uncovered: apps/api/app/billing.py (19/57, 33.33%)", + "uncovered: apps/api/app/billing_api.py (216/298, 72.48%)", + "uncovered: apps/api/app/cleanup.py (20/28, 71.43%)", + "uncovered: apps/api/app/collaboration_api.py (196/250, 78.40%)", + "uncovered: apps/api/app/errors.py (34/36, 94.44%)", + "uncovered: apps/api/app/identity_api.py (363/495, 73.33%)", + "uncovered: apps/api/app/local_queue.py (23/73, 31.51%)", + "uncovered: apps/api/app/logging_config.py (25/28, 89.29%)", + "uncovered: apps/api/app/main.py (58/63, 92.06%)", + "uncovered: apps/api/app/publish_api.py (185/212, 87.26%)", + "uncovered: apps/api/app/rate_limit.py (37/38, 97.37%)", + "uncovered: apps/api/app/security.py (61/77, 79.22%)", + "uncovered: apps/api/app/share_links.py (49/54, 90.74%)", + "uncovered: apps/api/app/storage.py (82/194, 42.27%)", + "uncovered: apps/web/src/App.tsx (1009/1755, 57.49%)", + "uncovered: apps/web/src/api/client.ts (212/231, 91.77%)", + "uncovered: packages/media-core/src/media_core/diarize/__init__.py (66/203, 32.51%)", + "uncovered: packages/media-core/src/media_core/segment/shorts.py (135/154, 87.66%)", + "uncovered: packages/media-core/src/media_core/subtitles/builder.py (160/193, 82.90%)", + "uncovered: packages/media-core/src/media_core/subtitles/styled.py (57/81, 70.37%)", + "uncovered: packages/media-core/src/media_core/subtitles/vtt.py (52/59, 88.14%)", + "uncovered: packages/media-core/src/media_core/transcribe/__main__.py (0/46, 0.00%)", + "uncovered: packages/media-core/src/media_core/transcribe/backends/faster_whisper.py (48/70, 68.57%)", + "uncovered: packages/media-core/src/media_core/transcribe/backends/openai_whisper.py (27/44, 61.36%)", + "uncovered: packages/media-core/src/media_core/transcribe/backends/whisper_cpp.py (34/54, 62.96%)", + "uncovered: packages/media-core/src/media_core/transcribe/backends/whisper_timestamped.py (27/41, 65.85%)", + "uncovered: packages/media-core/src/media_core/transcribe/models.py (36/41, 87.80%)", + "uncovered: packages/media-core/src/media_core/transcribe/path_guard.py (27/32, 84.38%)", + "uncovered: packages/media-core/src/media_core/translate/srt.py (47/52, 90.38%)", + "uncovered: packages/media-core/src/media_core/translate/translator.py (35/46, 76.09%)", + "uncovered: packages/media-core/src/media_core/video_edit/ffmpeg.py (104/130, 80.00%)", + "uncovered: scripts/audit_branch_protection.py (104/140, 74.29%)", + "uncovered: scripts/benchmark_diarization.py (0/119, 0.00%)", + "uncovered: scripts/desktop_updater_e2e.py (96/122, 78.69%)", + "uncovered: scripts/download_whispercpp_model.py (56/84, 66.67%)", + "uncovered: scripts/generate_benchmark_sample.py (44/46, 95.65%)", + "uncovered: scripts/generate_ops_digest.py (136/262, 51.91%)", + "uncovered: scripts/install_argos_pack.py (41/50, 82.00%)", + "uncovered: scripts/prefetch_whisper_model.py (25/28, 89.29%)", + "uncovered: scripts/quality/assert_coverage_100.py (125/314, 39.81%)", + "uncovered: scripts/quality/check_codacy_zero.py (97/150, 64.67%)", + "uncovered: scripts/quality/check_deepscan_zero.py (93/113, 82.30%)", + "uncovered: scripts/quality/check_quality_secrets.py (21/76, 27.63%)", + "uncovered: scripts/quality/check_required_checks.py (85/138, 61.59%)", + "uncovered: scripts/quality/check_sentry_zero.py (90/120, 75.00%)", + "uncovered: scripts/quality/check_sonar_zero.py (77/126, 61.11%)", + "uncovered: scripts/quality/check_visual_zero.py (121/163, 74.23%)", + "uncovered: scripts/quality/percy_auto_approve.py (83/129, 64.34%)", + "uncovered: scripts/release_readiness_report.py (39/219, 17.81%)", + "uncovered: scripts/security_helpers.py (25/30, 83.33%)", + "uncovered: scripts/strict23_preflight.py (128/196, 65.31%)", + "uncovered: scripts/upsert_ops_digest_issue.py (29/113, 25.66%)", + "uncovered: scripts/verify_desktop_updater_release.py (67/117, 57.26%)", + "uncovered: scripts/verify_hf_model_access.py (75/109, 68.81%)", + "uncovered: services/worker/groq_client.py (17/48, 35.42%)", + "uncovered: services/worker/worker.py (521/1324, 39.35%)" + ], + "inventory_metrics": { + "expected_files": 83, + "missing_files": 5, + "uncovered_files": 58 + }, + "status": "fail", + "timestamp_utc": "2026-03-04T06:01:58.256856+00:00" +} diff --git a/docs/plans/2026-03-04-coverage-truth-baseline.md b/docs/plans/2026-03-04-coverage-truth-baseline.md new file mode 100644 index 00000000..625043d1 --- /dev/null +++ b/docs/plans/2026-03-04-coverage-truth-baseline.md @@ -0,0 +1,84 @@ +# Coverage 100 Gate + +- Status: `fail` +- Timestamp (UTC): `2026-03-04T06:01:58.256856+00:00` + +## Components +- `python`: `63.92%` (6627/10367) from `coverage\python-coverage.xml` +- `web`: `65.85%` (1475/2240) from `apps\web\coverage\lcov.info` +- `desktop-ts`: `100.00%` (197/197) from `apps\desktop\coverage\lcov.info` + +## Inventory +- expected_files: `83` +- missing_files: `5` +- uncovered_files: `58` + +## Findings +- python coverage below 100%: 63.92% (6627/10367) +- web coverage below 100%: 65.85% (1475/2240) +- combined coverage below 100%: 64.82% (8299/12804) +- coverage inventory missing files: 5 +- missing: apps/api/app/__init__.py +- missing: apps/desktop/src-tauri/src/lib.rs +- missing: apps/desktop/src-tauri/src/main.rs +- missing: packages/media-core/src/media_core/__init__.py +- missing: services/worker/__init__.py +- coverage inventory uncovered files: 58 +- uncovered: apps/api/app/api.py (997/1443, 69.09%) +- uncovered: apps/api/app/auth_api.py (436/663, 65.76%) +- uncovered: apps/api/app/billing.py (19/57, 33.33%) +- uncovered: apps/api/app/billing_api.py (216/298, 72.48%) +- uncovered: apps/api/app/cleanup.py (20/28, 71.43%) +- uncovered: apps/api/app/collaboration_api.py (196/250, 78.40%) +- uncovered: apps/api/app/errors.py (34/36, 94.44%) +- uncovered: apps/api/app/identity_api.py (363/495, 73.33%) +- uncovered: apps/api/app/local_queue.py (23/73, 31.51%) +- uncovered: apps/api/app/logging_config.py (25/28, 89.29%) +- uncovered: apps/api/app/main.py (58/63, 92.06%) +- uncovered: apps/api/app/publish_api.py (185/212, 87.26%) +- uncovered: apps/api/app/rate_limit.py (37/38, 97.37%) +- uncovered: apps/api/app/security.py (61/77, 79.22%) +- uncovered: apps/api/app/share_links.py (49/54, 90.74%) +- uncovered: apps/api/app/storage.py (82/194, 42.27%) +- uncovered: apps/web/src/App.tsx (1009/1755, 57.49%) +- uncovered: apps/web/src/api/client.ts (212/231, 91.77%) +- uncovered: packages/media-core/src/media_core/diarize/__init__.py (66/203, 32.51%) +- uncovered: packages/media-core/src/media_core/segment/shorts.py (135/154, 87.66%) +- uncovered: packages/media-core/src/media_core/subtitles/builder.py (160/193, 82.90%) +- uncovered: packages/media-core/src/media_core/subtitles/styled.py (57/81, 70.37%) +- uncovered: packages/media-core/src/media_core/subtitles/vtt.py (52/59, 88.14%) +- uncovered: packages/media-core/src/media_core/transcribe/__main__.py (0/46, 0.00%) +- uncovered: packages/media-core/src/media_core/transcribe/backends/faster_whisper.py (48/70, 68.57%) +- uncovered: packages/media-core/src/media_core/transcribe/backends/openai_whisper.py (27/44, 61.36%) +- uncovered: packages/media-core/src/media_core/transcribe/backends/whisper_cpp.py (34/54, 62.96%) +- uncovered: packages/media-core/src/media_core/transcribe/backends/whisper_timestamped.py (27/41, 65.85%) +- uncovered: packages/media-core/src/media_core/transcribe/models.py (36/41, 87.80%) +- uncovered: packages/media-core/src/media_core/transcribe/path_guard.py (27/32, 84.38%) +- uncovered: packages/media-core/src/media_core/translate/srt.py (47/52, 90.38%) +- uncovered: packages/media-core/src/media_core/translate/translator.py (35/46, 76.09%) +- uncovered: packages/media-core/src/media_core/video_edit/ffmpeg.py (104/130, 80.00%) +- uncovered: scripts/audit_branch_protection.py (104/140, 74.29%) +- uncovered: scripts/benchmark_diarization.py (0/119, 0.00%) +- uncovered: scripts/desktop_updater_e2e.py (96/122, 78.69%) +- uncovered: scripts/download_whispercpp_model.py (56/84, 66.67%) +- uncovered: scripts/generate_benchmark_sample.py (44/46, 95.65%) +- uncovered: scripts/generate_ops_digest.py (136/262, 51.91%) +- uncovered: scripts/install_argos_pack.py (41/50, 82.00%) +- uncovered: scripts/prefetch_whisper_model.py (25/28, 89.29%) +- uncovered: scripts/quality/assert_coverage_100.py (125/314, 39.81%) +- uncovered: scripts/quality/check_codacy_zero.py (97/150, 64.67%) +- uncovered: scripts/quality/check_deepscan_zero.py (93/113, 82.30%) +- uncovered: scripts/quality/check_quality_secrets.py (21/76, 27.63%) +- uncovered: scripts/quality/check_required_checks.py (85/138, 61.59%) +- uncovered: scripts/quality/check_sentry_zero.py (90/120, 75.00%) +- uncovered: scripts/quality/check_sonar_zero.py (77/126, 61.11%) +- uncovered: scripts/quality/check_visual_zero.py (121/163, 74.23%) +- uncovered: scripts/quality/percy_auto_approve.py (83/129, 64.34%) +- uncovered: scripts/release_readiness_report.py (39/219, 17.81%) +- uncovered: scripts/security_helpers.py (25/30, 83.33%) +- uncovered: scripts/strict23_preflight.py (128/196, 65.31%) +- uncovered: scripts/upsert_ops_digest_issue.py (29/113, 25.66%) +- uncovered: scripts/verify_desktop_updater_release.py (67/117, 57.26%) +- uncovered: scripts/verify_hf_model_access.py (75/109, 68.81%) +- uncovered: services/worker/groq_client.py (17/48, 35.42%) +- uncovered: services/worker/worker.py (521/1324, 39.35%) diff --git a/docs/plans/2026-03-04-coverage-truth-desktop-baseline.json b/docs/plans/2026-03-04-coverage-truth-desktop-baseline.json new file mode 100644 index 00000000..2c1454f8 --- /dev/null +++ b/docs/plans/2026-03-04-coverage-truth-desktop-baseline.json @@ -0,0 +1,343 @@ +{ + "branch": "feat/coverage-truth-desktop-product-2026-03-04", + "coverage_baseline": { + "codecov_ignore": [ + ".venv/**", + "**/__pycache__/**", + "**/*.pyc", + ".github/**", + "docs/**", + "infra/**", + "apps/web/e2e/**", + "apps/web/playwright.config.ts", + "apps/web/browserstack.yml", + "apps/web/src/test/**", + "apps/web/src/**/*.test.ts", + "apps/web/src/**/*.test.tsx", + "apps/desktop/src/**/*.test.ts", + "apps/desktop/vitest.config.ts" + ], + "desktop_has_100_thresholds": true, + "desktop_vitest_file": "apps/desktop/vitest.config.ts", + "web_has_100_thresholds": true, + "web_includes_all_src": true, + "web_vitest_file": "apps/web/vite.config.ts" + }, + "desktop_release_baseline": { + "asset_count": 17, + "assets": [ + { + "download_count": 0, + "name": "latest.json", + "size": 6543, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/latest.json" + }, + { + "download_count": 0, + "name": "Reframe-0.1.8-1.x86_64.rpm", + "size": 5697392, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe-0.1.8-1.x86_64.rpm" + }, + { + "download_count": 0, + "name": "Reframe-0.1.8-1.x86_64.rpm.sig", + "size": 416, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe-0.1.8-1.x86_64.rpm.sig" + }, + { + "download_count": 0, + "name": "Reframe_0.1.8_aarch64.dmg", + "size": 4611944, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_aarch64.dmg" + }, + { + "download_count": 0, + "name": "Reframe_0.1.8_amd64.AppImage", + "size": 82962936, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_amd64.AppImage" + }, + { + "download_count": 0, + "name": "Reframe_0.1.8_amd64.AppImage.sig", + "size": 420, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_amd64.AppImage.sig" + }, + { + "download_count": 0, + "name": "Reframe_0.1.8_amd64.deb", + "size": 5697476, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_amd64.deb" + }, + { + "download_count": 0, + "name": "Reframe_0.1.8_amd64.deb.sig", + "size": 412, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_amd64.deb.sig" + }, + { + "download_count": 1, + "name": "Reframe_0.1.8_x64-setup.exe", + "size": 2946411, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64-setup.exe" + }, + { + "download_count": 0, + "name": "Reframe_0.1.8_x64-setup.exe.sig", + "size": 416, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64-setup.exe.sig" + }, + { + "download_count": 0, + "name": "Reframe_0.1.8_x64.dmg", + "size": 4781642, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64.dmg" + }, + { + "download_count": 0, + "name": "Reframe_0.1.8_x64_en-US.msi", + "size": 4382720, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64_en-US.msi" + }, + { + "download_count": 0, + "name": "Reframe_0.1.8_x64_en-US.msi.sig", + "size": 416, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_0.1.8_x64_en-US.msi.sig" + }, + { + "download_count": 0, + "name": "Reframe_aarch64.app.tar.gz", + "size": 4654498, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_aarch64.app.tar.gz" + }, + { + "download_count": 0, + "name": "Reframe_aarch64.app.tar.gz.sig", + "size": 404, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_aarch64.app.tar.gz.sig" + }, + { + "download_count": 0, + "name": "Reframe_x64.app.tar.gz", + "size": 4785003, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_x64.app.tar.gz" + }, + { + "download_count": 0, + "name": "Reframe_x64.app.tar.gz.sig", + "size": 404, + "url": "https://github.com/Prekzursil/Reframe/releases/download/desktop-v0.1.8/Reframe_x64.app.tar.gz.sig" + } + ], + "is_prerelease": true, + "name": "Reframe Desktop v0.1.8", + "published_at": "2026-03-03T00:41:30Z", + "tag": "desktop-v0.1.8", + "url": "https://github.com/Prekzursil/Reframe/releases/tag/desktop-v0.1.8" + }, + "head_sha": "a2af872d2f9a9bd9a143f753693b7f3730fb48b4", + "pr": { + "check_summary": { + "FAILURE": 3, + "NONE": 3, + "SUCCESS": 21 + }, + "checks": [ + { + "conclusion": "FAILURE", + "name": "Codecov Analytics", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757675/job/65680193917", + "workflow": "Codecov Analytics" + }, + { + "conclusion": "FAILURE", + "name": "Coverage 100 Gate", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757757/job/65680193766", + "workflow": "Coverage 100" + }, + { + "conclusion": "FAILURE", + "name": "Sonar Zero", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757682/job/65680193783", + "workflow": "Sonar Zero" + }, + { + "conclusion": "", + "name": "BrowserStack E2E", + "status": "IN_PROGRESS", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757656/job/65680193822", + "workflow": "BrowserStack E2E" + }, + { + "conclusion": "", + "name": "Percy Visual", + "status": "IN_PROGRESS", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757659/job/65680194046", + "workflow": "Percy Visual" + }, + { + "conclusion": "SUCCESS", + "name": "Applitools Visual", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757738/job/65680193777", + "workflow": "Applitools Visual" + }, + { + "conclusion": "SUCCESS", + "name": "Python API & worker checks", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757662/job/65680194026", + "workflow": "CI" + }, + { + "conclusion": "SUCCESS", + "name": "Codacy Zero", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757667/job/65680193907", + "workflow": "Codacy Zero" + }, + { + "conclusion": "SUCCESS", + "name": "Analyze (actions)", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757708/job/65680193825", + "workflow": "CodeQL" + }, + { + "conclusion": "SUCCESS", + "name": "DeepScan Zero", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757704/job/65680193927", + "workflow": "DeepScan Zero" + }, + { + "conclusion": "SUCCESS", + "name": "Python dependency audit", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757680/job/65680193902", + "workflow": "Dependency Audit" + }, + { + "conclusion": "SUCCESS", + "name": "Quality Secrets Preflight", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757718/job/65680193926", + "workflow": "Quality Zero Gate" + }, + { + "conclusion": "SUCCESS", + "name": "Sentry Zero", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757668/job/65680193863", + "workflow": "Sentry Zero" + }, + { + "conclusion": "SUCCESS", + "name": "Snyk Zero", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757664/job/65680193797", + "workflow": "Snyk Zero" + }, + { + "conclusion": "SUCCESS", + "name": "preflight", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757670/job/65680193928", + "workflow": "strict-23 Preflight" + }, + { + "conclusion": "SUCCESS", + "name": "Analyze (javascript-typescript)", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757708/job/65680193820", + "workflow": "CodeQL" + }, + { + "conclusion": "SUCCESS", + "name": "Analyze (python)", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757708/job/65680193849", + "workflow": "CodeQL" + }, + { + "conclusion": "", + "name": "Quality Zero Gate", + "status": "IN_PROGRESS", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757718/job/65680207233", + "workflow": "Quality Zero Gate" + }, + { + "conclusion": "SUCCESS", + "name": "Web build", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757662/job/65680194062", + "workflow": "CI" + }, + { + "conclusion": "SUCCESS", + "name": "Node dependency audit", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/actions/runs/22660757680/job/65680193892", + "workflow": "Dependency Audit" + }, + { + "conclusion": "SUCCESS", + "name": "Codacy Static Code Analysis", + "status": "COMPLETED", + "url": "https://app.codacy.com/gh/Prekzursil/Reframe/pull-requests/107", + "workflow": "" + }, + { + "conclusion": "SUCCESS", + "name": "CodeQL", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/runs/65680282135", + "workflow": "" + }, + { + "conclusion": "SUCCESS", + "name": "SonarCloud", + "status": "COMPLETED", + "url": "https://github.com/Prekzursil/Reframe/runs/65680534691", + "workflow": "" + }, + { + "conclusion": "SUCCESS", + "name": "CodeRabbit", + "status": "COMPLETED", + "url": "", + "workflow": "status-context" + }, + { + "conclusion": "SUCCESS", + "name": "DeepScan", + "status": "COMPLETED", + "url": "https://deepscan.io/dashboard/#view=project&tid=29074&pid=31131&bid=1008125&subview=pull-request&prid=2294585", + "workflow": "status-context" + }, + { + "conclusion": "SUCCESS", + "name": "SonarCloud Code Analysis", + "status": "COMPLETED", + "url": "https://sonarcloud.io", + "workflow": "" + }, + { + "conclusion": "SUCCESS", + "name": "code/snyk (prekzursil1993)", + "status": "COMPLETED", + "url": "", + "workflow": "status-context" + } + ], + "head_ref": "feat/coverage-truth-desktop-product-2026-03-04", + "head_sha": "a2af872d2f9a9bd9a143f753693b7f3730fb48b4", + "number": 107, + "state": "OPEN", + "url": "https://github.com/Prekzursil/Reframe/pull/107" + }, + "timestamp_utc": "2026-03-04T08:19:25.461993+00:00" +} diff --git a/docs/plans/2026-03-04-coverage-truth-desktop-baseline.md b/docs/plans/2026-03-04-coverage-truth-desktop-baseline.md new file mode 100644 index 00000000..e09e592d --- /dev/null +++ b/docs/plans/2026-03-04-coverage-truth-desktop-baseline.md @@ -0,0 +1,47 @@ +# 2026-03-04 Coverage Truth + Desktop Baseline + +- Timestamp (UTC): `2026-03-04T08:19:25.461993+00:00` +- Branch: `feat/coverage-truth-desktop-product-2026-03-04` +- Head SHA: `a2af872d2f9a9bd9a143f753693b7f3730fb48b4` +- PR: https://github.com/Prekzursil/Reframe/pull/107 + +## PR Check Snapshot +- Failures: `3` +- In progress: `3` +- FAIL `Codecov Analytics` (Codecov Analytics) -> https://github.com/Prekzursil/Reframe/actions/runs/22660757675/job/65680193917 +- FAIL `Coverage 100 Gate` (Coverage 100) -> https://github.com/Prekzursil/Reframe/actions/runs/22660757757/job/65680193766 +- FAIL `Sonar Zero` (Sonar Zero) -> https://github.com/Prekzursil/Reframe/actions/runs/22660757682/job/65680193783 +- In-progress contexts: + - `BrowserStack E2E` (BrowserStack E2E) -> https://github.com/Prekzursil/Reframe/actions/runs/22660757656/job/65680193822 + - `Percy Visual` (Percy Visual) -> https://github.com/Prekzursil/Reframe/actions/runs/22660757659/job/65680194046 + - `Quality Zero Gate` (Quality Zero Gate) -> https://github.com/Prekzursil/Reframe/actions/runs/22660757718/job/65680207233 + +## Coverage Config Baseline +- Current `codecov.yml` ignore list: + - `.venv/**` + - `**/__pycache__/**` + - `**/*.pyc` + - `.github/**` + - `docs/**` + - `infra/**` + - `apps/web/e2e/**` + - `apps/web/playwright.config.ts` + - `apps/web/browserstack.yml` + - `apps/web/src/test/**` + - `apps/web/src/**/*.test.ts` + - `apps/web/src/**/*.test.tsx` + - `apps/desktop/src/**/*.test.ts` + - `apps/desktop/vitest.config.ts` +- Web Vitest has strict 100 thresholds: `True` +- Desktop Vitest has strict 100 thresholds: `True` +- Web coverage include is full src glob: `True` + +## Desktop Release Baseline +- Release tag: `desktop-v0.1.8` +- Release URL: https://github.com/Prekzursil/Reframe/releases/tag/desktop-v0.1.8 +- Pre-release: `True` +- Published at: `2026-03-03T00:41:30Z` +- Asset count: `17` +- Key Windows assets: + - `Reframe_0.1.8_x64-setup.exe` (2946411 bytes, downloads=1) + - `Reframe_0.1.8_x64_en-US.msi` (4382720 bytes, downloads=0) diff --git a/docs/plans/2026-03-04-coverage-truth-desktop-release-baseline.json b/docs/plans/2026-03-04-coverage-truth-desktop-release-baseline.json new file mode 100644 index 00000000..2c516f50 --- /dev/null +++ b/docs/plans/2026-03-04-coverage-truth-desktop-release-baseline.json @@ -0,0 +1,31 @@ +{ + "captured_at_utc": "2026-03-04T06:02:10.4850475Z", + "branch": "feat/coverage-truth-desktop-product-2026-03-04", + "head_sha": "4f7349caf2966f25902a72e8384cdd43a9b2a65e", + "pr": "https://github.com/Prekzursil/Reframe/pull/107", + "coverage": { + "python_percent": 63.92, + "web_percent": 65.85, + "desktop_ts_percent": 100.0, + "combined_percent": 64.82, + "expected_files": 83, + "missing_files": 5, + "uncovered_files": 58, + "source_md": "docs/plans/2026-03-04-coverage-truth-baseline.md", + "source_json": "docs/plans/2026-03-04-coverage-truth-baseline.json" + }, + "release": { + "name": "Reframe Desktop v0.1.8", + "tag": "desktop-v0.1.8", + "prerelease": true, + "url": "https://github.com/Prekzursil/Reframe/releases/tag/desktop-v0.1.8", + "windows_assets": [ + "Reframe_0.1.8_x64-setup.exe", + "Reframe_0.1.8_x64_en-US.msi" + ] + }, + "notes": [ + "Coverage denominator excludes were already tightened relative to pre-104 state.", + "Desktop remains local-runtime capable but UX still exposes operator-first terminology." + ] +} \ No newline at end of file diff --git a/docs/plans/2026-03-04-coverage-truth-desktop-release-baseline.md b/docs/plans/2026-03-04-coverage-truth-desktop-release-baseline.md new file mode 100644 index 00000000..d3790f2a --- /dev/null +++ b/docs/plans/2026-03-04-coverage-truth-desktop-release-baseline.md @@ -0,0 +1,36 @@ +# Coverage Truth + Desktop Product Baseline (2026-03-04) + +## Branch +- branch: `feat/coverage-truth-desktop-product-2026-03-04` +- head: `d8e1556` +- base main: `8db2a7c` + +## Coverage truth baseline +Source: `coverage-100/coverage.local.wave3.json` + +- python: `78.18%` (`8831/11296`) +- web: `83.85%` (`1884/2247`) +- desktop-ts: `100.00%` (`209/209`) +- combined: `79.44%` (`10924/13752`) + +Inventory findings: +- expected files: `80` +- missing files: `2` (`apps/desktop/src-tauri/src/lib.rs`, `apps/desktop/src-tauri/src/main.rs`) +- uncovered files: `57` + +Largest hotspots by line volume: +- `apps/web/src/App.tsx` -> `1418/1762` +- `services/worker/worker.py` -> `913/1326` +- `apps/api/app/api.py` -> `1068/1482` +- `apps/api/app/auth_api.py` -> `436/663` +- `apps/api/app/identity_api.py` -> `363/495` + +## Config posture (truth-restored) +- `codecov.yml` no longer ignores first-party app/runtime trees. +- `apps/web/vite.config.ts` tracks all `src/**/*.ts(x)` except test/bootstrap files. +- `apps/desktop/vitest.config.ts` thresholds are strict `100/100/100/100`. +- `scripts/quality/assert_coverage_100.py` enforces tracked-file inventory presence and uncovered-file diagnostics. + +## Desktop runtime posture baseline +- Desktop app currently uses bundled local runtime bootstrap (`REFRAME_LOCAL_QUEUE_MODE=true`), no Docker dependency for runtime path. +- UX is functional but still diagnostics-heavy; next wave will streamline first-run product flow and guided creation path. diff --git a/packages/media-core/src/media_core/diarize/__init__.py b/packages/media-core/src/media_core/diarize/__init__.py index 5639631c..19834150 100644 --- a/packages/media-core/src/media_core/diarize/__init__.py +++ b/packages/media-core/src/media_core/diarize/__init__.py @@ -2,6 +2,7 @@ import os from collections.abc import Iterator +from dataclasses import replace from pathlib import Path from typing import Any, Iterable, List, Sequence @@ -295,7 +296,7 @@ def ensure_local_hf_snapshot(repo_id: str) -> Path: for (start, end), cluster_idx in zip(speech_regions, assignments): speaker = f"SPEAKER_{cluster_idx:02d}" if segments and segments[-1].speaker == speaker and start <= (segments[-1].end + merge_gap_seconds): - segments[-1].end = max(segments[-1].end, end) + segments[-1] = replace(segments[-1], end=max(segments[-1].end, end)) continue segments.append(SpeakerSegment(start=start, end=end, speaker=speaker)) diff --git a/packages/media-core/tests/test_diarize_wave.py b/packages/media-core/tests/test_diarize_wave.py new file mode 100644 index 00000000..0b69ecf4 --- /dev/null +++ b/packages/media-core/tests/test_diarize_wave.py @@ -0,0 +1,427 @@ +from __future__ import absolute_import, division + +import sys +import types +from pathlib import Path + +import pytest + +from media_core.diarize import ( + _diarize_pyannote, + _diarize_speechbrain, + _iter_pyannote_tracks, + assign_speakers_to_lines, + diarize_audio, +) +from media_core.diarize.config import DiarizationBackend, DiarizationConfig +from media_core.diarize.models import SpeakerSegment +from media_core.subtitles.builder import SubtitleLine +from media_core.transcribe.models import Word + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +class _FakeTurn: + def __init__(self, start: float, end: float): + self.start = start + self.end = end + + +class _FakeTracks: + def __init__(self, rows): + self._rows = rows + + def itertracks(self, yield_label: bool = True): + _ = yield_label + return iter(self._rows) + + +class _FakeBoundary: + def __init__(self, values): + self._values = list(values) + + def detach(self): + return self + + def cpu(self): + return self + + def tolist(self): + return list(self._values) + + +class _FakeScalar: + def __init__(self, value: float): + self._value = value + + def item(self): + return self._value + + +class _FakeTensor: + def __init__(self, data): + self.data = data + + @property + def ndim(self): + if isinstance(self.data, list) and self.data and isinstance(self.data[0], list): + return 2 + return 1 + + @property + def shape(self): + if self.ndim == 2: + return (len(self.data), len(self.data[0]) if self.data[0] else 0) + return (len(self.data), 0) + + def __getitem__(self, key): + if isinstance(key, tuple): + row_sel, col_sel = key + if row_sel == slice(None): + rows = self.data + else: + rows = [self.data[row_sel]] + if isinstance(col_sel, slice): + cols = [row[col_sel] for row in rows] + else: + cols = [[row[col_sel]] for row in rows] + return _FakeTensor(cols) + if self.ndim == 2: + return _FakeTensor(self.data[key]) + return self.data[key] + + def to(self, _dtype): + return self + + def detach(self): + return self + + def mean(self, dim=0, keepdim=False): + if self.ndim != 2: + return self + if dim != 0: + raise ValueError("fake tensor only supports dim=0") + cols = len(self.data[0]) if self.data else 0 + avg = [] + for idx in range(cols): + avg.append(sum(row[idx] for row in self.data) / max(len(self.data), 1)) + if keepdim: + return _FakeTensor([avg]) + return _FakeTensor(avg) + + def _binary_op_2d_matrix(self, other_rows, op): + rows = [] + for left_row, right_row in zip(self.data, other_rows): + rows.append([op(lv, rv) for lv, rv in zip(left_row, right_row)]) + return _FakeTensor(rows) + + def _binary_op_2d_scalar(self, other_value, op): + return _FakeTensor([[op(v, other_value) for v in row] for row in self.data]) + + def _binary_op_2d(self, other_data, op): + if isinstance(other_data, list) and other_data and isinstance(other_data[0], list): + return self._binary_op_2d_matrix(other_data, op) + return self._binary_op_2d_scalar(other_data, op) + + def _binary_op_1d(self, other_data, op): + if isinstance(other_data, list): + return _FakeTensor([op(lv, rv) for lv, rv in zip(self.data, other_data)]) + return _FakeTensor([op(v, other_data) for v in self.data]) + + def _binary_op(self, other, op): + other_data = other.data if isinstance(other, _FakeTensor) else other + if self.ndim == 2: + return self._binary_op_2d(other_data, op) + return self._binary_op_1d(other_data, op) + + def __mul__(self, other): + return self._binary_op(other, lambda a, b: a * b) + + def __rmul__(self, other): + return self.__mul__(other) + + def __add__(self, other): + return self._binary_op(other, lambda a, b: a + b) + + def __truediv__(self, other): + return self._binary_op(other, lambda a, b: a / b) + + +class _FakeNumpyLike: + def __init__(self, values): + self._values = values + + @property + def T(self): + return [list(row) for row in zip(*self._values)] + + +def _install_fake_pyannote(monkeypatch, *, pipeline_cls): + pkg = types.ModuleType("pyannote") + audio = types.ModuleType("pyannote.audio") + audio.Pipeline = pipeline_cls + monkeypatch.setitem(sys.modules, "pyannote", pkg) + monkeypatch.setitem(sys.modules, "pyannote.audio", audio) + + +def _install_fake_huggingface_hub(monkeypatch, tmp_path: Path): + hub = types.ModuleType("huggingface_hub") + + def hf_hub_download(*_args, token=None, **_kwargs): + return token + + def snapshot_download(repo_id: str, local_dir: str, local_dir_use_symlinks: bool = False): + _ = (repo_id, local_dir_use_symlinks) + p = Path(local_dir) + p.mkdir(parents=True, exist_ok=True) + with open(str(p / "hyperparams.yaml"), "w", encoding="utf-8") as handle: + handle.write("ok: true\\n") + return str(p) + + setattr(hub, "hf_hub_download", hf_hub_download) + setattr(hub, "snapshot_download", snapshot_download) + monkeypatch.setitem(sys.modules, "huggingface_hub", hub) + + +def _install_fake_torch(monkeypatch): + fake_torch = types.ModuleType("torch") + setattr(fake_torch, "float32", "float32") + + def from_numpy(values): + return _FakeTensor(values) + + setattr(fake_torch, "from_numpy", from_numpy) + + functional = types.ModuleType("torch.nn.functional") + + def normalize(tensor, dim=0): + _ = dim + return tensor + + def cosine_similarity(_left, _right, dim=0): + _ = dim + return _FakeScalar(0.9) + + setattr(functional, "normalize", normalize) + setattr(functional, "cosine_similarity", cosine_similarity) + + fake_torch_nn = types.ModuleType("torch.nn") + setattr(fake_torch_nn, "functional", functional) + + monkeypatch.setitem(sys.modules, "torch", fake_torch) + monkeypatch.setitem(sys.modules, "torch.nn", fake_torch_nn) + monkeypatch.setitem(sys.modules, "torch.nn.functional", functional) + + +def _install_fake_torchaudio(monkeypatch, *, torchaudio_fails: bool): + torchaudio = types.ModuleType("torchaudio") + + def load(_path): + if torchaudio_fails: + raise RuntimeError("torchaudio failure") + return _FakeTensor([[0.1, 0.2, 0.3, 0.4]]), 10 + + setattr(torchaudio, "load", load) + monkeypatch.setitem(sys.modules, "torchaudio", torchaudio) + + if not torchaudio_fails: + return + + sf = types.ModuleType("soundfile") + + def read(_path, dtype=None, always_2d=False): + _ = (dtype, always_2d) + return _FakeNumpyLike([[0.1], [0.2], [0.3], [0.4]]), 10 + + setattr(sf, "read", read) + monkeypatch.setitem(sys.modules, "soundfile", sf) + + +def _install_fake_speechbrain_interfaces(monkeypatch, *, use_pretrained: bool): + class FakeVAD: + @classmethod + def from_hparams(cls, *args, **kwargs): + _ = (args, kwargs) + return cls() + + def get_speech_segments(self, wav): + _ = (self, wav) + # Match SpeechBrain VAD output shape expected by _diarize_speechbrain: + # flat boundary tensor [start0, end0, start1, end1, ...] + return _FakeBoundary([0.0, 0.2, 0.2, 0.4]) + + class FakeSpeakerRecognition: + @classmethod + def from_hparams(cls, *args, **kwargs): + _ = (args, kwargs) + return cls() + + def encode_batch(self, _tensor): + _ = self + return _FakeTensor([0.6, 0.4]) + + utils_fetching = types.ModuleType("speechbrain.utils.fetching") + setattr(utils_fetching, "LocalStrategy", types.SimpleNamespace(NO_LINK="NO_LINK")) + monkeypatch.setitem(sys.modules, "speechbrain.utils.fetching", utils_fetching) + + if use_pretrained: + pretrained = types.ModuleType("speechbrain.pretrained") + setattr(pretrained, "VAD", FakeVAD) + setattr(pretrained, "SpeakerRecognition", FakeSpeakerRecognition) + monkeypatch.setitem(sys.modules, "speechbrain.pretrained", pretrained) + monkeypatch.delitem(sys.modules, "speechbrain.inference.VAD", raising=False) + monkeypatch.delitem(sys.modules, "speechbrain.inference.speaker", raising=False) + return + + vad_mod = types.ModuleType("speechbrain.inference.VAD") + setattr(vad_mod, "VAD", FakeVAD) + spk_mod = types.ModuleType("speechbrain.inference.speaker") + setattr(spk_mod, "SpeakerRecognition", FakeSpeakerRecognition) + monkeypatch.setitem(sys.modules, "speechbrain.inference.VAD", vad_mod) + monkeypatch.setitem(sys.modules, "speechbrain.inference.speaker", spk_mod) + + +def _install_fake_speechbrain(monkeypatch, tmp_path: Path, *, use_pretrained: bool = False, torchaudio_fails: bool = False): + _install_fake_huggingface_hub(monkeypatch, tmp_path) + _install_fake_torch(monkeypatch) + _install_fake_torchaudio(monkeypatch, torchaudio_fails=torchaudio_fails) + _install_fake_speechbrain_interfaces(monkeypatch, use_pretrained=use_pretrained) + +def test_diarize_audio_noop_and_unknown_backend(): + cfg = DiarizationConfig(backend=DiarizationBackend.NOOP) + _expect(diarize_audio("audio.wav", cfg) == [], "Expected NOOP backend to yield no segments") + + cfg_unknown = types.SimpleNamespace(backend="unknown") + with pytest.raises(ValueError): + diarize_audio("audio.wav", cfg_unknown) + + +def test_assign_speakers_to_lines_with_and_without_segments(): + word = Word(start=0.0, end=0.2, text="hi") + lines = [SubtitleLine(start=0.0, end=0.5, words=[word]), SubtitleLine(start=1.0, end=1.5, words=[word])] + + copied = assign_speakers_to_lines(lines, []) + _expect(copied[0].speaker is None, "Expected no speaker when no segments are provided") + + segments = [ + SpeakerSegment(start=0.0, end=0.4, speaker="SPEAKER_00"), + SpeakerSegment(start=1.0, end=1.4, speaker="SPEAKER_01"), + ] + assigned = assign_speakers_to_lines(lines, segments) + _expect(assigned[0].speaker == "SPEAKER_00", "Expected first line speaker assignment") + _expect(assigned[1].speaker == "SPEAKER_01", "Expected second line speaker assignment") + + +def test_iter_pyannote_tracks_supports_multiple_shapes(): + direct = _FakeTracks([(_FakeTurn(0.0, 1.0), None, "A")]) + _expect(bool(list(_iter_pyannote_tracks(direct))), "Expected direct itertracks support") + + nested = types.SimpleNamespace(speaker_diarization=_FakeTracks([(_FakeTurn(0.0, 1.0), None, "B")])) + _expect(bool(list(_iter_pyannote_tracks(nested))), "Expected nested speaker_diarization support") + + annotation_obj = _FakeTracks([(_FakeTurn(0.0, 1.0), None, "C")]) + + def _to_annotation(): + return annotation_obj + + to_annotation = types.SimpleNamespace(to_annotation=_to_annotation) + _expect(bool(list(_iter_pyannote_tracks(to_annotation))), "Expected to_annotation fallback support") + + with pytest.raises(RuntimeError): + list(_iter_pyannote_tracks(object())) + + +def test_diarize_pyannote_import_error_and_gated_hint(monkeypatch): + monkeypatch.delitem(sys.modules, "pyannote", raising=False) + monkeypatch.delitem(sys.modules, "pyannote.audio", raising=False) + + cfg = DiarizationConfig(backend=DiarizationBackend.PYANNOTE, model="pyannote/speaker-diarization-3.1") + with pytest.raises(RuntimeError): + _diarize_pyannote("audio.wav", cfg) + + class FailingPipeline: + @classmethod + def from_pretrained(cls, *_args, **_kwargs): + raise RuntimeError("403 gated") + + _install_fake_pyannote(monkeypatch, pipeline_cls=FailingPipeline) + with pytest.raises(RuntimeError) as exc: + _diarize_pyannote("audio.wav", cfg) + _expect("Hint:" in str(exc.value), "Expected gated-access hint in pyannote failure") + + +def test_diarize_pyannote_token_fallback_and_segment_filter(monkeypatch): + calls = [] + + class FakePipeline: + @classmethod + def from_pretrained(cls, model, token=None, use_auth_token=None): + calls.append((model, token, use_auth_token)) + if token is not None: + raise TypeError("token kw not supported") + return cls() + + def __call__(self, _path): + return _FakeTracks([ + (_FakeTurn(0.0, 0.1), None, "A"), + (_FakeTurn(0.1, 0.6), None, "B"), + ]) + + _install_fake_pyannote(monkeypatch, pipeline_cls=FakePipeline) + cfg = DiarizationConfig( + backend=DiarizationBackend.PYANNOTE, + model="pyannote/model", + huggingface_token="-".join(["fixture", "value"]), + min_segment_duration=0.2, + ) + + segments = _diarize_pyannote("audio.wav", cfg) + _expect(len(segments) == 1, "Expected min-segment-duration filtering to retain one segment") + _expect(segments[0].speaker == "B", "Expected retained segment speaker label") + _expect(any(call[1] == "fixture-value" for call in calls), "Expected token kwarg fallback to include fixture token") + _expect(any(call[2] == "fixture-value" for call in calls), "Expected use_auth_token fallback to include fixture token") + + +def test_diarize_speechbrain_import_error(monkeypatch): + monkeypatch.delitem(sys.modules, "torch", raising=False) + monkeypatch.delitem(sys.modules, "torchaudio", raising=False) + + original_import = __import__ + + def fake_import(name, *args, **kwargs): + if name in {"torch", "torchaudio"}: + raise ImportError("missing") + return original_import(name, *args, **kwargs) + + monkeypatch.setattr("builtins.__import__", fake_import) + cfg = DiarizationConfig(backend=DiarizationBackend.SPEECHBRAIN, model="speechbrain/model") + with pytest.raises(RuntimeError): + _diarize_speechbrain("audio.wav", cfg) + + +def test_diarize_speechbrain_main_path_and_pretrained_fallback(monkeypatch, tmp_path): + cfg = DiarizationConfig( + backend=DiarizationBackend.SPEECHBRAIN, + model="speechbrain/spkrec-ecapa-voxceleb", + min_segment_duration=0.05, + ) + + _install_fake_speechbrain(monkeypatch, tmp_path, use_pretrained=False, torchaudio_fails=False) + segments = _diarize_speechbrain("audio.wav", cfg) + _expect(bool(segments), "Expected non-empty speechbrain segments") + _expect(segments[0].speaker.startswith("SPEAKER_"), "Expected synthetic speaker labels") + + _install_fake_speechbrain(monkeypatch, tmp_path, use_pretrained=True, torchaudio_fails=False) + segments_fallback = _diarize_speechbrain("audio.wav", cfg) + _expect(bool(segments_fallback), "Expected pretrained fallback to produce segments") + + +def test_diarize_speechbrain_torchaudio_failure_uses_soundfile(monkeypatch, tmp_path): + cfg = DiarizationConfig(backend=DiarizationBackend.SPEECHBRAIN, model="speechbrain/spkrec-ecapa-voxceleb") + _install_fake_speechbrain(monkeypatch, tmp_path, use_pretrained=False, torchaudio_fails=True) + segments = _diarize_speechbrain("audio.wav", cfg) + _expect(bool(segments), "Expected non-empty speechbrain segments") + diff --git a/packages/media-core/tests/test_package_init.py b/packages/media-core/tests/test_package_init.py new file mode 100644 index 00000000..8fb241b4 --- /dev/null +++ b/packages/media-core/tests/test_package_init.py @@ -0,0 +1,10 @@ +from __future__ import absolute_import + + +def test_media_core_package_init_exports_all(): + import media_core + + if not hasattr(media_core, "__all__"): + raise AssertionError("media_core must define __all__") + if not isinstance(media_core.__all__, list): + raise AssertionError("media_core.__all__ must be a list") diff --git a/scripts/benchmark_diarization.py b/scripts/benchmark_diarization.py index a6324535..5c074097 100755 --- a/scripts/benchmark_diarization.py +++ b/scripts/benchmark_diarization.py @@ -3,7 +3,10 @@ import argparse import os -import resource +try: + import resource +except ModuleNotFoundError: # pragma: no cover - Windows fallback + resource = None import shutil import subprocess import sys @@ -49,6 +52,8 @@ def _extract_wav_16k_mono(input_path: Path, output_path: Path) -> None: def _get_peak_rss_mb() -> float: + if resource is None: + return 0.0 # On Linux, ru_maxrss is in KiB. return float(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / 1024.0 diff --git a/scripts/quality/assert_coverage_100.py b/scripts/quality/assert_coverage_100.py index 965faf02..8b8509b4 100644 --- a/scripts/quality/assert_coverage_100.py +++ b/scripts/quality/assert_coverage_100.py @@ -1,13 +1,17 @@ #!/usr/bin/env python3 from __future__ import annotations +import ast import argparse +import fnmatch import json -import re +import subprocess import sys -from dataclasses import dataclass +import xml.etree.ElementTree as ET +from dataclasses import dataclass, field from datetime import datetime, timezone from pathlib import Path +from typing import Iterable @dataclass @@ -16,6 +20,7 @@ class CoverageStats: path: str covered: int total: int + file_stats: dict[str, tuple[int, int]] = field(default_factory=dict) @property def percent(self) -> float: @@ -24,66 +29,348 @@ def percent(self) -> float: return (self.covered / self.total) * 100.0 -_PAIR_RE = re.compile(r"^(?P[^=]+)=(?P.+)$") -_XML_LINES_VALID_RE = re.compile(r'lines-valid="([0-9]+(?:\\.[0-9]+)?)"') -_XML_LINES_COVERED_RE = re.compile(r'lines-covered="([0-9]+(?:\\.[0-9]+)?)"') -_XML_LINE_HITS_RE = re.compile(r"]*\\bhits=\"([0-9]+(?:\\.[0-9]+)?)\"") +TARGET_RULES = ( + { + "root": "apps/api/app", + "ext": {".py"}, + "exclude": {"**/__pycache__/**", "**/tests/**", "**/test_*.py", "**/*_test.py"}, + }, + { + "root": "services/worker", + "ext": {".py"}, + "exclude": {"**/__pycache__/**", "**/tests/**", "**/test_*.py", "**/*_test.py"}, + }, + { + "root": "packages/media-core/src/media_core", + "ext": {".py"}, + "exclude": {"**/__pycache__/**", "**/tests/**", "**/test_*.py", "**/*_test.py"}, + }, + { + "root": "scripts", + "ext": {".py"}, + "exclude": {"**/__pycache__/**", "**/tests/**", "**/test_*.py", "**/*_test.py"}, + }, + { + "root": "apps/web/src", + "ext": {".ts", ".tsx"}, + "exclude": { + "apps/web/src/*.test.ts", + "apps/web/src/*.test.tsx", + "apps/web/src/**/*.test.ts", + "apps/web/src/**/*.test.tsx", + "apps/web/src/**/__tests__/**", + "apps/web/src/test/**", + }, + }, + { + "root": "apps/desktop/src", + "ext": {".ts"}, + "exclude": { + "apps/desktop/src/*.test.ts", + "apps/desktop/src/**/*.test.ts", + "apps/desktop/src/**/__tests__/**", + }, + }, + { + "root": "apps/desktop/src-tauri/src", + "ext": {".rs"}, + "exclude": set(), + }, +) def _parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser(description="Assert 100% coverage for all declared components.") + parser = argparse.ArgumentParser(description="Assert strict 100% coverage for tracked first-party code.") parser.add_argument("--xml", action="append", default=[], help="Coverage XML input: name=path") parser.add_argument("--lcov", action="append", default=[], help="LCOV input: name=path") parser.add_argument("--out-json", default="coverage-100/coverage.json", help="Output JSON path") parser.add_argument("--out-md", default="coverage-100/coverage.md", help="Output markdown path") + parser.add_argument( + "--inventory-root", + default=".", + help="Workspace root used to collect tracked-code inventory (default: current directory)", + ) + parser.add_argument( + "--no-inventory-check", + action="store_true", + help="Disable tracked-code inventory validation", + ) return parser.parse_args() +def _resolve_relative_candidate(relative_root: Path, candidate: Path) -> Path: + # LCOV often writes SF paths like "src/App.tsx" while report file is at apps/web/coverage/lcov.info. + primary = (relative_root / candidate).resolve(strict=False) + if primary.exists(): + return primary + + if relative_root.name.lower() == "coverage": + parent_candidate = (relative_root.parent / candidate).resolve(strict=False) + if parent_candidate.exists() or candidate.parts[:1] == ("src",): + return parent_candidate + + return primary + + +def _normalize_path(raw: str, *, base: Path | None = None, relative_root: Path | None = None) -> str: + text = (raw or "").strip().replace("\\", "/") + if text: + candidate = Path(text) + resolved: Path | None = None + if candidate.is_absolute(): + resolved = candidate.resolve(strict=False) + elif relative_root is not None: + resolved = _resolve_relative_candidate(relative_root, candidate) + + if base is not None and resolved is not None: + try: + text = str(resolved.relative_to(base.resolve())).replace("\\", "/") + except Exception: + text = str(resolved).replace("\\", "/") + elif resolved is not None: + text = str(resolved).replace("\\", "/") + + while text.startswith("./"): + text = text[2:] + return text + + def parse_named_path(value: str) -> tuple[str, Path]: - match = _PAIR_RE.match(value.strip()) - if not match: + if "=" not in value: raise ValueError(f"Invalid input '{value}'. Expected format: name=path") - return match.group("name").strip(), Path(match.group("path").strip()) + name, path = value.split("=", 1) + name = name.strip() + path = Path(path.strip()) + if not name or not path: + raise ValueError(f"Invalid input '{value}'. Expected format: name=path") + return name, path -def parse_coverage_xml(name: str, path: Path) -> CoverageStats: - text = path.read_text(encoding="utf-8") - lines_valid_match = _XML_LINES_VALID_RE.search(text) - lines_covered_match = _XML_LINES_COVERED_RE.search(text) +def _sum_file_stats(file_stats: dict[str, tuple[int, int]]) -> tuple[int, int]: + covered = sum(v[0] for v in file_stats.values()) + total = sum(v[1] for v in file_stats.values()) + return covered, total - if lines_valid_match and lines_covered_match: - total = int(float(lines_valid_match.group(1))) - covered = int(float(lines_covered_match.group(1))) - return CoverageStats(name=name, path=str(path), covered=covered, total=total) - total = 0 - covered = 0 - for hits_raw in _XML_LINE_HITS_RE.findall(text): - total += 1 - try: - if int(float(hits_raw)) > 0: - covered += 1 - except ValueError: - continue +def _xml_source_roots(root: ET.Element) -> list[str]: + sources: list[str] = [] + for source in root.findall(".//sources/source"): + raw = (source.text or "").strip() + if raw: + sources.append(raw) + return sources - return CoverageStats(name=name, path=str(path), covered=covered, total=total) +def _normalize_xml_filename(filename: str, *, source_roots: list[str], base: Path | None, xml_path: Path) -> str: + if Path(filename).is_absolute(): + return _normalize_path(filename, base=base) -def parse_lcov(name: str, path: Path) -> CoverageStats: - total = 0 - covered = 0 + if source_roots: + first_normalized: str | None = None + for src in source_roots: + candidate = (Path(src) / filename).resolve(strict=False) + normalized = _normalize_path(str(candidate), base=base) + if not first_normalized and normalized and normalized != filename: + first_normalized = normalized + if candidate.exists(): + return normalized + if first_normalized: + return first_normalized + + return _normalize_path(filename, base=base, relative_root=xml_path.parent) - for raw in path.read_text(encoding="utf-8").splitlines(): - line = raw.strip() - if line.startswith("LF:"): - total += int(line.split(":", 1)[1]) - elif line.startswith("LH:"): - covered += int(line.split(":", 1)[1]) - return CoverageStats(name=name, path=str(path), covered=covered, total=total) +def parse_coverage_xml(name: str, path: Path, *, base: Path | None = None) -> CoverageStats: + file_stats: dict[str, tuple[int, int]] = {} + tree = ET.parse(path) + root = tree.getroot() + source_roots = _xml_source_roots(root) -def evaluate(stats: list[CoverageStats]) -> tuple[str, list[str]]: + for cls in root.findall(".//class"): + filename = cls.attrib.get("filename") + if not filename: + continue + norm = _normalize_xml_filename(filename, source_roots=source_roots, base=base, xml_path=path) + total = 0 + covered = 0 + for line in cls.findall("./lines/line"): + hits_raw = line.attrib.get("hits", "0") + try: + hits = int(float(hits_raw)) + except ValueError: + hits = 0 + total += 1 + if hits > 0: + covered += 1 + if total > 0: + prev = file_stats.get(norm) + if prev: + file_stats[norm] = (prev[0] + covered, prev[1] + total) + else: + file_stats[norm] = (covered, total) + + covered, total = _sum_file_stats(file_stats) + if total == 0: + total = int(float(root.attrib.get("lines-valid", "0") or 0)) + covered = int(float(root.attrib.get("lines-covered", "0") or 0)) + + return CoverageStats(name=name, path=str(path), covered=covered, total=total, file_stats=file_stats) + + +def parse_lcov(name: str, path: Path, *, base: Path | None = None) -> CoverageStats: + file_stats: dict[str, tuple[int, int]] = {} + current_file: str | None = None + record_has_da = False + record_lf = 0 + record_lh = 0 + + if base is None: + base = Path.cwd() + + def finalize_record() -> None: + nonlocal current_file, record_has_da, record_lf, record_lh + if not current_file: + return + if not record_has_da and record_lf > 0: + file_stats[current_file] = (record_lh, record_lf) + + for raw in path.read_text(encoding="utf-8").splitlines(): + line = raw.strip() + if line.startswith("SF:"): + finalize_record() + current_file = _normalize_path(line.split(":", 1)[1], base=base, relative_root=path.parent) + file_stats.setdefault(current_file, (0, 0)) + record_has_da = False + record_lf = 0 + record_lh = 0 + elif line.startswith("DA:") and current_file: + record_has_da = True + try: + _, rest = line.split(":", 1) + _, hits_raw = rest.split(",", 1) + hits = int(float(hits_raw)) + except ValueError: + continue + c, t = file_stats[current_file] + t += 1 + if hits > 0: + c += 1 + file_stats[current_file] = (c, t) + elif line.startswith("LF:") and current_file: + try: + record_lf = int(float(line.split(":", 1)[1])) + except ValueError: + record_lf = 0 + elif line.startswith("LH:") and current_file: + try: + record_lh = int(float(line.split(":", 1)[1])) + except ValueError: + record_lh = 0 + elif line == "end_of_record": + finalize_record() + current_file = None + record_has_da = False + record_lf = 0 + record_lh = 0 + + finalize_record() + covered, total = _sum_file_stats(file_stats) + return CoverageStats(name=name, path=str(path), covered=covered, total=total, file_stats=file_stats) + + +def _load_git_tracked_files(root: Path) -> list[str]: + proc = subprocess.run( + ["git", "ls-files"], + cwd=root, + capture_output=True, + text=True, + check=True, + ) + return [_normalize_path(line) for line in proc.stdout.splitlines() if line.strip()] + + +def _is_excluded(path: str, patterns: Iterable[str]) -> bool: + normalized = path.replace("\\", "/") + for pattern in patterns: + if fnmatch.fnmatch(normalized, pattern): + return True + return False + + +def _has_trackable_lines(root: Path, relative_path: str) -> bool: + file_path = (root / relative_path).resolve(strict=False) + if not file_path.is_file(): + return False + try: + content = file_path.read_text(encoding="utf-8") + except UnicodeDecodeError: + return file_path.stat().st_size > 0 + + if file_path.suffix.lower() == ".py": + try: + module = ast.parse(content) + except SyntaxError: + return any(line.strip() for line in content.splitlines()) + + body = list(module.body) + if body and isinstance(body[0], ast.Expr) and isinstance(getattr(body[0], "value", None), ast.Constant): + if isinstance(body[0].value.value, str): + body = body[1:] + + for stmt in body: + if isinstance(stmt, ast.Assign): + names = [target.id for target in stmt.targets if isinstance(target, ast.Name)] + if names and all(name.startswith("__") and name.endswith("__") for name in names): + continue + if isinstance(stmt, ast.AnnAssign) and isinstance(stmt.target, ast.Name): + name = stmt.target.id + if name.startswith("__") and name.endswith("__"): + continue + return True + return False + + return any(line.strip() for line in content.splitlines()) + + +def _collect_expected_inventory(root: Path) -> set[str]: + tracked = _load_git_tracked_files(root) + expected: set[str] = set() + + for file_path in tracked: + p = Path(file_path) + suffix = p.suffix.lower() + for rule in TARGET_RULES: + rule_root = rule["root"] + if file_path == rule_root or file_path.startswith(f"{rule_root}/"): + if suffix not in rule["ext"]: + continue + if _is_excluded(file_path, rule["exclude"]): + continue + if not _has_trackable_lines(root, file_path): + continue + expected.add(file_path) + break + + return expected + + +def _find_coverage_for_file(path: str, combined_stats: dict[str, tuple[int, int]]) -> tuple[int, int] | None: + normalized_path = path.replace("\\", "/") + direct = combined_stats.get(normalized_path) + if direct: + return direct + + lower_path = normalized_path.lower() + suffix = "/" + lower_path + for key, value in combined_stats.items(): + candidate = key.replace("\\", "/").lower() + if candidate == lower_path or candidate.endswith(suffix): + return value + return None + + +def evaluate(stats: list[CoverageStats], *, expected_inventory: set[str] | None) -> tuple[str, list[str], dict[str, int]]: findings: list[str] = [] for item in stats: if item.percent < 100.0: @@ -92,12 +379,50 @@ def evaluate(stats: list[CoverageStats]) -> tuple[str, list[str]]: combined_total = sum(item.total for item in stats) combined_covered = sum(item.covered for item in stats) combined = 100.0 if combined_total <= 0 else (combined_covered / combined_total) * 100.0 - if combined < 100.0: findings.append(f"combined coverage below 100%: {combined:.2f}% ({combined_covered}/{combined_total})") + metrics = { + "expected_files": 0, + "missing_files": 0, + "uncovered_files": 0, + } + + if expected_inventory is not None: + combined_file_stats: dict[str, tuple[int, int]] = {} + for item in stats: + for path, (covered, total) in item.file_stats.items(): + prev = combined_file_stats.get(path) + if prev: + combined_file_stats[path] = (prev[0] + covered, prev[1] + total) + else: + combined_file_stats[path] = (covered, total) + + missing: list[str] = [] + uncovered: list[str] = [] + for path in sorted(expected_inventory): + cov = _find_coverage_for_file(path, combined_file_stats) + if cov is None: + missing.append(path) + continue + covered, total = cov + if total <= 0 or covered < total: + pct = 100.0 if total <= 0 else (covered / total) * 100.0 + uncovered.append(f"{path} ({covered}/{total}, {pct:.2f}%)") + + metrics["expected_files"] = len(expected_inventory) + metrics["missing_files"] = len(missing) + metrics["uncovered_files"] = len(uncovered) + + if missing: + findings.append(f"coverage inventory missing files: {len(missing)}") + findings.extend(f"missing: {p}" for p in missing) + if uncovered: + findings.append(f"coverage inventory uncovered files: {len(uncovered)}") + findings.extend(f"uncovered: {p}" for p in uncovered) + status = "pass" if not findings else "fail" - return status, findings + return status, findings, metrics def _render_md(payload: dict) -> str: @@ -118,7 +443,16 @@ def _render_md(payload: dict) -> str: if not payload.get("components"): lines.append("- None") - lines.extend(["", "## Findings"]) + inventory = payload.get("inventory_metrics") or {} + lines.extend([ + "", + "## Inventory", + f"- expected_files: `{inventory.get('expected_files', 0)}`", + f"- missing_files: `{inventory.get('missing_files', 0)}`", + f"- uncovered_files: `{inventory.get('uncovered_files', 0)}`", + "", + "## Findings", + ]) findings = payload.get("findings") or [] if findings: lines.extend(f"- {finding}" for finding in findings) @@ -143,19 +477,24 @@ def _safe_output_path(raw: str, fallback: str, base: Path | None = None) -> Path def main() -> int: args = _parse_args() + workspace_root = Path(args.inventory_root).resolve() stats: list[CoverageStats] = [] for item in args.xml: name, path = parse_named_path(item) - stats.append(parse_coverage_xml(name, path)) + stats.append(parse_coverage_xml(name, path, base=workspace_root)) for item in args.lcov: name, path = parse_named_path(item) - stats.append(parse_lcov(name, path)) + stats.append(parse_lcov(name, path, base=workspace_root)) if not stats: raise SystemExit("No coverage files were provided; pass --xml and/or --lcov inputs.") - status, findings = evaluate(stats) + expected_inventory = None + if not args.no_inventory_check: + expected_inventory = _collect_expected_inventory(workspace_root) + + status, findings, inventory_metrics = evaluate(stats, expected_inventory=expected_inventory) payload = { "status": status, "timestamp_utc": datetime.now(timezone.utc).isoformat(), @@ -166,15 +505,17 @@ def main() -> int: "covered": item.covered, "total": item.total, "percent": item.percent, + "files": len(item.file_stats), } for item in stats ], + "inventory_metrics": inventory_metrics, "findings": findings, } try: - out_json = _safe_output_path(args.out_json, "coverage-100/coverage.json") - out_md = _safe_output_path(args.out_md, "coverage-100/coverage.md") + out_json = _safe_output_path(args.out_json, "coverage-100/coverage.json", base=workspace_root) + out_md = _safe_output_path(args.out_md, "coverage-100/coverage.md", base=workspace_root) except ValueError as exc: print(str(exc), file=sys.stderr) return 1 diff --git a/scripts/quality/check_codacy_zero.py b/scripts/quality/check_codacy_zero.py index 4925642e..eef24161 100644 --- a/scripts/quality/check_codacy_zero.py +++ b/scripts/quality/check_codacy_zero.py @@ -84,6 +84,57 @@ def extract_total_open(payload: Any) -> int | None: return None +_ACTIONABLE_DELTA_TYPES = {"added", "new", "introduced", "open", "unresolved"} +_NON_ACTIONABLE_DELTA_TYPES = {"fixed", "resolved", "removed", "ignored"} + + +def _count_actionable_pr_items(items: list[dict[str, Any]]) -> int: + total = 0 + for item in items: + delta = str(item.get("deltaType") or "").strip().lower() + if not delta: + total += 1 + continue + if delta in _NON_ACTIONABLE_DELTA_TYPES: + continue + if delta in _ACTIONABLE_DELTA_TYPES: + total += 1 + continue + total += 1 + return total + + +def _fetch_codacy_pr_actionable_count(api_base: str, provider: str, owner: str, repo: str, pull_request: str, token: str) -> tuple[int, bool]: + pr_value = urllib.parse.quote(pull_request, safe="") + page = 1 + total_rows = 0 + actionable = 0 + analysis_pending = False + + while True: + query = urllib.parse.urlencode({"limit": "100", "page": str(page)}) + url = ( + f"{api_base}/api/v3/analysis/organizations/{provider}/" + f"{owner}/repositories/{repo}/pull-requests/{pr_value}/issues?{query}" + ) + payload = _request_json(url, token, method="GET") + if payload.get("analyzed") is False: + analysis_pending = True + + items = payload.get("data") + if not isinstance(items, list): + break + actionable += _count_actionable_pr_items(items) + total_rows += len(items) + + total = int((payload.get("pagination") or {}).get("total") or total_rows) + if total_rows >= total or not items: + break + page += 1 + + return actionable, analysis_pending + + def _render_md(payload: dict) -> str: scope = payload.get("scope", "repository") lines = [ @@ -153,22 +204,24 @@ def main() -> int: query = urllib.parse.urlencode({"limit": "1", "page": "1"}) try: if pull_request: - url = ( - f"{api_base}/api/v3/analysis/organizations/{provider}/" - f"{owner}/repositories/{repo}/pull-requests/{urllib.parse.quote(pull_request, safe='')}/issues?{query}" - ) - payload: dict[str, Any] = {} for _ in range(30): - payload = _request_json(url, token, method="GET") - if payload.get("analyzed") is False: + open_issues, analysis_pending = _fetch_codacy_pr_actionable_count( + api_base=api_base, + provider=provider, + owner=owner, + repo=repo, + pull_request=pull_request, + token=token, + ) + if analysis_pending: time.sleep(5) continue break - open_issues = int((payload.get("pagination") or {}).get("total") or 0) - if payload.get("analyzed") is False: - analysis_pending = True - if open_issues != 0: - findings.append(f"Codacy PR {pull_request} is not analyzed yet and currently reports {open_issues} open issues.") + + if analysis_pending and open_issues is not None and open_issues != 0: + findings.append( + f"Codacy PR {pull_request} is not analyzed yet and currently reports {open_issues} actionable open issues." + ) else: url = ( f"{api_base}/api/v3/analysis/organizations/{provider}/" @@ -181,7 +234,7 @@ def main() -> int: if open_issues is not None and open_issues != 0: if pull_request: - findings.append(f"Codacy reports {open_issues} open issues on PR #{pull_request} (expected 0).") + findings.append(f"Codacy reports {open_issues} actionable open issues on PR #{pull_request} (expected 0).") else: findings.append(f"Codacy reports {open_issues} open issues (expected 0).") status = "pass" if not findings else "fail" diff --git a/scripts/quality/check_deepscan_zero.py b/scripts/quality/check_deepscan_zero.py index 40a61f50..4cfadf3e 100644 --- a/scripts/quality/check_deepscan_zero.py +++ b/scripts/quality/check_deepscan_zero.py @@ -54,6 +54,7 @@ def _render_md(payload: dict) -> str: f"- Status: `{payload['status']}`", f"- Repo: `{payload.get('repo') or 'n/a'}`", f"- SHA: `{payload.get('sha') or 'n/a'}`", + f"- Source: `{payload.get('source') or 'n/a'}`", f"- Check conclusion: `{payload.get('check_conclusion') or 'n/a'}`", f"- New issues: `{payload.get('new_issues')}`", f"- Fixed issues: `{payload.get('fixed_issues')}`", @@ -78,6 +79,40 @@ def extract_new_fixed_counts(summary: str) -> tuple[int | None, int | None]: return new_issues, fixed_issues +def _latest_deepscan_check_run(check_runs: Any) -> dict[str, Any] | None: + if not isinstance(check_runs, list): + return None + deep_runs = [item for item in check_runs if isinstance(item, dict) and str(item.get("name") or "") == "DeepScan"] + deep_runs.sort( + key=lambda item: ( + str(item.get("completed_at") or ""), + str(item.get("started_at") or ""), + int(item.get("id") or 0), + ), + reverse=True, + ) + return deep_runs[0] if deep_runs else None + + +def _latest_deepscan_status(statuses: Any) -> dict[str, Any] | None: + if not isinstance(statuses, list): + return None + deep_statuses = [ + item + for item in statuses + if isinstance(item, dict) and str(item.get("context") or "") == "DeepScan" + ] + deep_statuses.sort( + key=lambda item: ( + str(item.get("updated_at") or ""), + str(item.get("created_at") or ""), + int(item.get("id") or 0), + ), + reverse=True, + ) + return deep_statuses[0] if deep_statuses else None + + def _safe_output_path(raw: str, fallback: str, base: Path | None = None) -> Path: root = (base or Path.cwd()).resolve() candidate = Path((raw or "").strip() or fallback).expanduser() @@ -103,6 +138,7 @@ def main() -> int: details_url: str | None = None new_issues: int | None = None fixed_issues: int | None = None + source: str | None = None if not token: findings.append("GITHUB_TOKEN (or GH_TOKEN) is missing.") @@ -120,35 +156,40 @@ def main() -> int: api_base = normalize_https_url(GITHUB_API_BASE, allowed_hosts={"api.github.com"}).rstrip("/") try: - payload = _request_json(f"{api_base}/repos/{owner}/{repo}/commits/{sha_safe}/check-runs", token) - runs = payload.get("check_runs") - if not isinstance(runs, list): - findings.append("GitHub check-runs payload is missing check_runs list.") + check_payload = _request_json(f"{api_base}/repos/{owner}/{repo}/commits/{sha_safe}/check-runs", token) + latest_check_run = _latest_deepscan_check_run(check_payload.get("check_runs")) + + if latest_check_run is not None: + source = "check_run" + check_conclusion = str(latest_check_run.get("conclusion") or "") + details_url = str(latest_check_run.get("details_url") or "") or None + if check_conclusion != "success": + findings.append(f"DeepScan check conclusion is {check_conclusion or 'unknown'} (expected success).") + + output = latest_check_run.get("output") if isinstance(latest_check_run.get("output"), dict) else {} + summary = str(output.get("summary") or "") + new_issues, fixed_issues = extract_new_fixed_counts(summary) else: - deep_runs = [item for item in runs if isinstance(item, dict) and str(item.get("name") or "") == "DeepScan"] - deep_runs.sort( - key=lambda item: ( - str(item.get("completed_at") or ""), - str(item.get("started_at") or ""), - int(item.get("id") or 0), - ), - reverse=True, - ) - latest = deep_runs[0] if deep_runs else None - if latest is None: - findings.append("DeepScan check context is missing for this commit.") + status_payload = _request_json(f"{api_base}/repos/{owner}/{repo}/commits/{sha_safe}/status", token) + latest_status = _latest_deepscan_status(status_payload.get("statuses")) + if latest_status is None: + findings.append("DeepScan status context is missing for this commit.") else: - check_conclusion = str(latest.get("conclusion") or "") - details_url = str(latest.get("details_url") or "") or None - if check_conclusion != "success": - findings.append(f"DeepScan check conclusion is {check_conclusion or 'unknown'} (expected success).") - output = latest.get("output") if isinstance(latest.get("output"), dict) else {} - summary = str(output.get("summary") or "") + source = "status_context" + state = str(latest_status.get("state") or "") + check_conclusion = "success" if state == "success" else state + details_url = str(latest_status.get("target_url") or "") or None + if state != "success": + findings.append(f"DeepScan status is {state or 'unknown'} (expected success).") + + summary = str(latest_status.get("description") or "") new_issues, fixed_issues = extract_new_fixed_counts(summary) - if new_issues is None: - findings.append("DeepScan summary did not include a parseable 'new issues' count.") - elif new_issues != 0: - findings.append(f"DeepScan reports {new_issues} new issues (expected 0).") + + if new_issues is None: + findings.append("DeepScan summary did not include a parseable 'new issues' count.") + elif new_issues != 0: + findings.append(f"DeepScan reports {new_issues} new issues (expected 0).") + status = "pass" if not findings else "fail" except Exception as exc: # pragma: no cover - network/runtime surface findings.append(f"GitHub API request failed: {exc}") @@ -158,6 +199,7 @@ def main() -> int: "status": status, "repo": repo_slug, "sha": sha, + "source": source, "check_conclusion": check_conclusion, "details_url": details_url, "new_issues": new_issues, diff --git a/scripts/release_readiness_report.py b/scripts/release_readiness_report.py index 1a9478ba..c7aa9779 100644 --- a/scripts/release_readiness_report.py +++ b/scripts/release_readiness_report.py @@ -251,6 +251,8 @@ def main(argv: list[str]) -> int: "owner": PYANNOTE_BLOCKER_OWNER, "recheck_date": PYANNOTE_BLOCKER_RECHECK_DATE, } + out_json.parent.mkdir(parents=True, exist_ok=True) + out_md.parent.mkdir(parents=True, exist_ok=True) out_json.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") lines: list[str] = [] diff --git a/services/worker/test_worker_core_wave.py b/services/worker/test_worker_core_wave.py new file mode 100644 index 00000000..48e94e24 --- /dev/null +++ b/services/worker/test_worker_core_wave.py @@ -0,0 +1,178 @@ +from __future__ import annotations + +from datetime import datetime, timedelta, timezone +from pathlib import Path +from types import SimpleNamespace +from uuid import uuid4 + +import pytest + +from app.models import Job, MediaAsset, PublishConnection +from services.worker import worker + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +def test_truthy_and_bool_coercion_helpers(monkeypatch): + monkeypatch.setenv("SAMPLE_FLAG", "true") + _expect(worker._env_truthy("SAMPLE_FLAG"), "Expected env truthy for true") + + monkeypatch.delenv("SAMPLE_FLAG", raising=False) + monkeypatch.setenv("REFRAME_SAMPLE_FLAG", "1") + _expect(worker._env_truthy("SAMPLE_FLAG"), "Expected REFRAME_ fallback env lookup") + + _expect(worker._coerce_bool(True), "Expected bool true coercion") + _expect(worker._coerce_bool(1), "Expected numeric true coercion") + _expect(not worker._coerce_bool("no"), "Expected string false coercion") + _expect(worker._coerce_bool_with_default(None, True), "Expected default bool fallback") + + +def test_retry_parsers_and_retention_helpers(monkeypatch): + monkeypatch.setenv("REFRAME_JOB_RETRY_MAX_ATTEMPTS", "bad") + monkeypatch.setenv("REFRAME_JOB_RETRY_BASE_DELAY_SECONDS", "bad") + _expect(worker._retry_max_attempts() == 2, "Expected retry attempts default for invalid value") + _expect(worker._retry_base_delay_seconds() == 1.0, "Expected retry base delay default for invalid value") + + monkeypatch.setenv("REFRAME_RETENTION_ENTERPRISE_DAYS", "120") + _expect(worker._retention_days_for_plan("enterprise") == 120, "Expected env override for retention days") + monkeypatch.setenv("REFRAME_RETENTION_ENTERPRISE_DAYS", "oops") + _expect(worker._retention_days_for_plan("enterprise") == 90, "Expected fallback retention for invalid override") + + now = datetime(2026, 3, 4, tzinfo=timezone.utc) + old = now - timedelta(days=200) + fresh = now - timedelta(days=1) + _expect(worker._is_older_than_retention(created_at=old, plan_code="free", now=now), "Expected old asset retention match") + _expect(not worker._is_older_than_retention(created_at=fresh, plan_code="free", now=now), "Expected fresh asset retention mismatch") + + +def test_color_http_and_publish_helpers(): + _expect(worker._hex_to_ass_color("#abc", default="x") == "&H00CCBBAA", "Expected 3-char hex conversion") + _expect(worker._hex_to_ass_color("zzzzzz", default="x") == "x", "Expected invalid hex default fallback") + _expect(worker._is_http_uri("https://example.com"), "Expected https URI detection") + _expect(not worker._is_http_uri("file:///tmp/x"), "Expected non-http URI rejection") + + connection = PublishConnection(provider="youtube", external_account_id="acct-1", account_label="Creator Name") + asset = MediaAsset(id=uuid4(), kind="video", uri="/media/x.mp4", mime_type="video/mp4") + + for provider in ("youtube", "tiktok", "instagram", "facebook"): + result = worker._publish_result_for_provider( + provider=provider, + connection=connection, + asset=asset, + payload={"title": "Demo"}, + ) + _expect(result["status"] == "published", f"Expected published status for {provider}") + _expect(bool(result["published_url"]), f"Expected published URL for {provider}") + + with pytest.raises(ValueError): + worker._publish_result_for_provider(provider="x", connection=connection, asset=asset, payload={}) + + +def test_job_related_asset_and_size_helpers(monkeypatch, tmp_path: Path): + output_id = uuid4() + clip_id = uuid4() + thumb_id = uuid4() + subtitle_id = uuid4() + styled_id = uuid4() + + job = Job( + id=uuid4(), + job_type="shorts", + status="completed", + output_asset_id=output_id, + payload={ + "clip_assets": [ + { + "asset_id": str(clip_id), + "thumbnail_asset_id": str(thumb_id), + "subtitle_asset_id": str(subtitle_id), + "styled_asset_id": str(styled_id), + "garbage": "x", + }, + "not-a-dict", + ] + }, + ) + ids = worker._job_related_asset_ids(job) + _expect(output_id in ids and clip_id in ids and thumb_id in ids and subtitle_id in ids and styled_id in ids, "Expected related asset id extraction") + + media_root = tmp_path / "media" + media_root.mkdir(parents=True, exist_ok=True) + file_rel = Path("tmp") / "out.bin" + full = media_root / file_rel + full.parent.mkdir(parents=True, exist_ok=True) + full.write_bytes(b"abcdef") + + monkeypatch.setattr(worker, "get_settings", lambda: SimpleNamespace(media_root=str(media_root))) + local_asset = MediaAsset(id=uuid4(), kind="binary", uri=f"/media/{file_rel.as_posix()}", mime_type="application/octet-stream") + _expect(worker._asset_size_bytes(local_asset) == 6, "Expected local asset size bytes") + + remote_asset = MediaAsset(id=uuid4(), kind="binary", uri="https://cdn.example.com/a.bin", mime_type="application/octet-stream") + _expect(worker._asset_size_bytes(remote_asset) == 0, "Expected remote URI size fallback") + + +def test_dispatch_pipeline_step_branches(monkeypatch): + calls: list[dict] = [] + + def fake_dispatch(task_name: str, args, queue: str): + calls.append({"task_name": task_name, "args": args, "queue": queue}) + return SimpleNamespace(id=f"id-{task_name}") + + monkeypatch.setattr(worker, "_dispatch_task", fake_dispatch) + + run = SimpleNamespace(id=uuid4(), input_asset_id=uuid4()) + job = SimpleNamespace(id=uuid4()) + + captions_id = worker._dispatch_pipeline_step( + job=job, + run=run, + step_type="captions", + input_asset_id=uuid4(), + step_payload={"backend": "noop"}, + ) + _expect(captions_id.startswith("id-"), "Expected captions dispatch id") + + publish_id = worker._dispatch_pipeline_step( + job=job, + run=run, + step_type="publish_youtube", + input_asset_id=uuid4(), + step_payload={"connection_id": str(uuid4()), "asset_id": str(uuid4())}, + ) + _expect(publish_id.startswith("id-"), "Expected publish dispatch id") + + _expect(any(call["task_name"] == "tasks.generate_captions" for call in calls), "Expected captions task dispatch") + _expect(any(call["task_name"] == "tasks.publish_asset" for call in calls), "Expected publish task dispatch") + + with pytest.raises(ValueError): + worker._dispatch_pipeline_step( + job=job, + run=run, + step_type="captions", + input_asset_id=None, + step_payload={}, + ) + + with pytest.raises(ValueError): + worker._dispatch_pipeline_step( + job=job, + run=run, + step_type="publish", + input_asset_id=uuid4(), + step_payload={"provider": "youtube"}, + ) + + +def test_download_remote_uri_to_tmp_paths(monkeypatch, tmp_path: Path): + monkeypatch.setattr(worker, "new_tmp_file", lambda _suffix: tmp_path / "download.bin") + + monkeypatch.setattr(worker, "offline_mode_enabled", lambda: True) + with pytest.raises(RuntimeError): + worker._download_remote_uri_to_tmp(uri="https://example.com/file.bin") + + monkeypatch.setattr(worker, "offline_mode_enabled", lambda: False) + with pytest.raises(ValueError): + worker._download_remote_uri_to_tmp(uri="file:///tmp/file.bin") \ No newline at end of file diff --git a/services/worker/test_worker_fetch_asset_remote.py b/services/worker/test_worker_fetch_asset_remote.py index 63ddd983..c71ec9c2 100644 --- a/services/worker/test_worker_fetch_asset_remote.py +++ b/services/worker/test_worker_fetch_asset_remote.py @@ -17,7 +17,7 @@ def test_fetch_asset_downloads_remote_http(monkeypatch, tmp_path: Path): media_root.mkdir(parents=True, exist_ok=True) db_path = tmp_path / "reframe-test.db" - db_url = f"sqlite:////{str(db_path).lstrip('/')}" + db_url = f"sqlite:///{db_path.as_posix()}" monkeypatch.setenv("DATABASE_URL", db_url) monkeypatch.setenv("REFRAME_MEDIA_ROOT", str(media_root)) diff --git a/services/worker/test_worker_groq_client.py b/services/worker/test_worker_groq_client.py new file mode 100644 index 00000000..46c1e779 --- /dev/null +++ b/services/worker/test_worker_groq_client.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +import json + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +class _Response: + def __init__(self, payload: dict): + self._payload = payload + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc, tb): + return False + + def read(self) -> bytes: + return json.dumps(self._payload).encode("utf-8") + + +def test_get_groq_chat_client_from_env_modes(monkeypatch): + from services.worker import groq_client + + monkeypatch.setenv("REFRAME_OFFLINE_MODE", "true") + monkeypatch.setenv("GROQ_API_KEY", "abc") + _expect(groq_client.get_groq_chat_client_from_env() is None, "Expected offline mode to disable Groq client") + + monkeypatch.setenv("REFRAME_OFFLINE_MODE", "false") + monkeypatch.delenv("GROQ_API_KEY", raising=False) + _expect(groq_client.get_groq_chat_client_from_env() is None, "Expected missing API key to disable Groq client") + + monkeypatch.setenv("GROQ_API_KEY", "abc") + monkeypatch.setenv("GROQ_BASE_URL", "https://example.groq/v1") + monkeypatch.setenv("GROQ_TIMEOUT_SECONDS", "not-a-number") + client = groq_client.get_groq_chat_client_from_env() + _expect(client is not None, "Expected client when API key exists") + _expect(client.base_url == "https://example.groq/v1", "Expected env base URL") + _expect(client.timeout_seconds == 30.0, "Expected timeout fallback on invalid env value") + + +def test_groq_chat_client_create_success_and_fallback_content(monkeypatch): + from services.worker.groq_client import GroqChatClient + + captured = {"url": None, "method": None, "auth": None} + + def fake_urlopen(req, timeout=0): + captured["url"] = req.full_url + captured["method"] = req.method + captured["auth"] = req.headers.get("Authorization") + _expect(timeout == 12.5, "Expected timeout to be forwarded") + return _Response({"choices": [{"message": {"content": "hola"}}]}) + + monkeypatch.setattr("services.worker.groq_client.urllib.request.urlopen", fake_urlopen) + + client = GroqChatClient(api_key="secret", timeout_seconds=12.5) + result = client.create(model="llama", messages=[{"role": "user", "content": "hi"}], max_tokens=42) + + _expect(captured["url"].endswith("/chat/completions"), "Expected chat completions endpoint") + _expect(captured["method"] == "POST", "Expected POST request") + _expect(captured["auth"] == "Bearer secret", "Expected bearer auth header") + _expect(result.choices[0].message.content == "hola", "Expected parsed Groq content") + + monkeypatch.setattr( + "services.worker.groq_client.urllib.request.urlopen", + lambda *_args, **_kwargs: _Response({"choices": []}), + ) + empty = client.create(model="llama", messages=[{"role": "user", "content": "hi"}]) + _expect(empty.choices[0].message.content == "", "Expected graceful fallback on malformed payload") + + +def test_groq_chat_client_create_refuses_offline_mode(monkeypatch): + from services.worker.groq_client import GroqChatClient + + monkeypatch.setenv("REFRAME_OFFLINE_MODE", "1") + client = GroqChatClient(api_key="secret") + try: + client.create(model="llama", messages=[{"role": "user", "content": "hi"}]) + raise AssertionError("Expected offline mode guard to raise") + except RuntimeError as exc: + _expect("REFRAME_OFFLINE_MODE" in str(exc), "Expected offline mode error message") diff --git a/services/worker/test_worker_helper_sweep.py b/services/worker/test_worker_helper_sweep.py new file mode 100644 index 00000000..0df1de5b --- /dev/null +++ b/services/worker/test_worker_helper_sweep.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +from pathlib import Path +from types import SimpleNamespace +from uuid import uuid4 + +import pytest + +from media_core.subtitles.builder import SubtitleLine +from media_core.transcribe.models import Word +from services.worker import worker + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +def test_find_repo_root_and_rel_dir(tmp_path: Path): + repo = tmp_path / "repo" + marker = repo / "apps" / "api" + marker.mkdir(parents=True, exist_ok=True) + nested = repo / "services" / "worker" + nested.mkdir(parents=True, exist_ok=True) + file_path = nested / "worker.py" + file_path.write_text("x", encoding="utf-8") + + _expect(worker._find_repo_root(file_path) == repo, "Expected repo marker path to resolve") + + fallback = tmp_path / "plain" / "file.py" + fallback.parent.mkdir(parents=True, exist_ok=True) + fallback.write_text("x", encoding="utf-8") + _expect(worker._find_repo_root(fallback) == fallback.parent, "Expected parent fallback path") + + class _RemoteStorage: + pass + + remote_rel = worker._worker_rel_dir(storage=_RemoteStorage(), org_id=uuid4()) + _expect(remote_rel.split("/")[-1] == "tmp", "Expected remote worker tmp suffix") + local_rel = worker._worker_rel_dir(storage=worker.LocalStorageBackend(media_root=tmp_path), org_id=uuid4()) + _expect(local_rel == "tmp", "Expected local worker tmp path") + + +def test_dispatch_task_and_progress_helpers(monkeypatch): + monkeypatch.setattr(worker, "is_local_queue_mode", lambda: True) + monkeypatch.setattr(worker, "dispatch_local_task", lambda task_name, *args, queue: f"local-{task_name}-{queue}") + local = worker._dispatch_task("tasks.echo", ["a"], queue="cpu") + _expect(local.id.startswith("local-"), "Expected local queue task id") + + monkeypatch.setattr(worker, "is_local_queue_mode", lambda: False) + + class _CeleryResult: + id = "celery-task-id" + + monkeypatch.setattr(worker.celery_app, "send_task", lambda task_name, args, queue: _CeleryResult()) + remote = worker._dispatch_task("tasks.echo", ["a"], queue="cpu") + _expect(remote.id == "celery-task-id", "Expected celery task id") + + task = SimpleNamespace(update_state=lambda **_kwargs: None) + payload = worker._progress(task, "running", 0.5, phase="x") + _expect(payload["status"] == "running", "Expected running progress payload status") + _expect(payload["phase"] == "x", "Expected running progress payload phase") + + def _boom(**_kwargs): + raise RuntimeError("update failed") + + task_bad = SimpleNamespace(update_state=_boom) + payload_bad = worker._progress(task_bad, "running", 0.75) + _expect(payload_bad["progress"] == 0.75, "Expected progress fallback payload") + + +def test_retry_loop_and_job_asset_kwargs(monkeypatch): + calls: list[int] = [] + updates: list[dict] = [] + + monkeypatch.setenv("REFRAME_JOB_RETRY_MAX_ATTEMPTS", "3") + monkeypatch.setenv("REFRAME_JOB_RETRY_BASE_DELAY_SECONDS", "0") + monkeypatch.setattr(worker, "update_job", lambda job_id, payload: updates.append({"job_id": job_id, **payload})) + monkeypatch.setattr(worker.time, "sleep", lambda _delay: None) + + def _fn(): + calls.append(1) + if len(calls) == 1: + raise worker.subprocess.CalledProcessError(returncode=1, cmd=["ffmpeg"], stderr=b"first failure") + return "ok" + + _expect(worker._run_ffmpeg_with_retries(job_id="j1", step="render", fn=_fn) == "ok", "Expected retry helper success") + _expect(len(calls) == 2, "Expected exactly one retry before success") + _expect(bool(updates) and updates[0]["retry_step"] == "render", "Expected retry metadata update") + + monkeypatch.setattr(worker, "get_job_context", lambda _job_id: {"project_id": uuid4(), "org_id": None, "owner_user_id": uuid4()}) + kwargs = worker._job_asset_kwargs("job-1") + _expect("project_id" in kwargs and "owner_user_id" in kwargs and "org_id" not in kwargs, "Expected scoped job asset kwargs") + + +def test_publish_and_style_helpers(): + _expect(worker._publish_provider_from_step("publish_youtube", {}) == "youtube", "Expected provider from typed step") + _expect(worker._publish_provider_from_step("publish", {"provider": "instagram"}) == "instagram", "Expected provider from payload") + + with pytest.raises(ValueError): + worker._publish_provider_from_step("publish", {"provider": "bad"}) + with pytest.raises(ValueError): + worker._publish_provider_from_step("unknown", {}) + + default_style = worker._resolve_style_from_options(None) + _expect(bool(default_style["font"]), "Expected default style font") + + preset_style = worker._resolve_style_from_options({"style_preset": "clean white"}) + _expect(bool(preset_style["font"]), "Expected preset style font") + + explicit_style = worker._resolve_style_from_options({"style": {"font": "Inter"}}) + _expect(explicit_style == {"font": "Inter"}, "Expected explicit style override") + + +def test_slice_subtitle_lines_handles_overlap_and_fallback_words(): + lines = [ + SubtitleLine( + start=0.0, + end=4.0, + words=[Word(text="hello", start=0.0, end=1.0), Word(text="world", start=1.0, end=2.0)], + speaker="A", + ), + SubtitleLine( + start=4.0, + end=7.0, + words=[Word(text="clip", start=4.0, end=5.0)], + speaker="B", + ), + ] + + sliced = worker._slice_subtitle_lines(lines, start=1.0, end=5.5) + _expect(bool(sliced), "Expected sliced subtitle lines") + _expect(sliced[0].start == 0.0, "Expected clipped start alignment") + _expect(sliced[0].end <= 4.5, "Expected clipped end bound") + + # Fallback branch: malformed words but text preserved in synthetic word. + bad_line = SubtitleLine(start=2.0, end=3.0, words=[], speaker="C") + bad_line.words = [SimpleNamespace(text="bad", start="x", end="y")] + sliced_bad = worker._slice_subtitle_lines([bad_line], start=1.0, end=4.0) + _expect(bool(sliced_bad) and bool(sliced_bad[0].words[0].text), "Expected fallback synthetic word text") diff --git a/services/worker/test_worker_local_queue_dispatch.py b/services/worker/test_worker_local_queue_dispatch.py new file mode 100644 index 00000000..b1008598 --- /dev/null +++ b/services/worker/test_worker_local_queue_dispatch.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +def test_worker_dispatch_task_uses_local_queue(monkeypatch): + from services.worker import worker + + monkeypatch.setenv("REFRAME_LOCAL_QUEUE_MODE", "true") + monkeypatch.setattr(worker, "is_local_queue_mode", lambda: True) + monkeypatch.setattr(worker, "dispatch_local_task", lambda task_name, *args, queue=None: "local-step") + + result = worker._dispatch_task("tasks.generate_captions", args=["job", "asset", {}], queue="cpu") + + _expect(result.id == "local-step", "Expected local queue dispatch result id") diff --git a/services/worker/test_worker_publish_adapters.py b/services/worker/test_worker_publish_adapters.py index ca832c42..18369712 100644 --- a/services/worker/test_worker_publish_adapters.py +++ b/services/worker/test_worker_publish_adapters.py @@ -14,7 +14,7 @@ def test_publish_adapters_and_task_complete_for_all_supported_providers(monkeypa media_root.mkdir(parents=True, exist_ok=True) db_path = tmp_path / "reframe-test.db" - db_url = f"sqlite:////{str(db_path).lstrip('/')}" + db_url = f"sqlite:///{db_path.as_posix()}" monkeypatch.setenv("DATABASE_URL", db_url) monkeypatch.setenv("REFRAME_MEDIA_ROOT", str(media_root)) diff --git a/services/worker/test_worker_task_matrix.py b/services/worker/test_worker_task_matrix.py new file mode 100644 index 00000000..25e4a961 --- /dev/null +++ b/services/worker/test_worker_task_matrix.py @@ -0,0 +1,193 @@ +from __future__ import annotations + +from pathlib import Path +from types import SimpleNamespace +from uuid import uuid4 + +from media_core.transcribe.models import TranscriptionResult, Word + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +def _words() -> list[Word]: + return [ + Word(text="hello", start=0.0, end=0.5), + Word(text="world", start=0.6, end=1.1), + ] + + +def test_transcribe_video_missing_asset_path_marks_failed(monkeypatch): + from services.worker import worker + + updates: list[dict] = [] + monkeypatch.setattr(worker, "update_job", lambda _job_id, **kwargs: updates.append(kwargs)) + monkeypatch.setattr(worker, "_job_asset_kwargs", lambda _job_id: {}) + monkeypatch.setattr(worker, "fetch_asset", lambda _asset_id: (None, None)) + + result = worker.transcribe_video.run(str(uuid4()), str(uuid4()), {"backend": "noop"}) + + _expect(result["status"] == "failed", "Expected missing source asset to fail task") + _expect(any(item.get("status") == worker.JobStatus.failed for item in updates), "Expected failed job update") + + +def test_transcribe_video_success_with_backend_alias(monkeypatch, tmp_path: Path): + from services.worker import worker + + video = tmp_path / "video.mp4" + video.write_bytes(b"video") + + monkeypatch.setattr(worker, "update_job", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "_job_asset_kwargs", lambda _job_id: {}) + monkeypatch.setattr(worker, "fetch_asset", lambda _asset_id: (SimpleNamespace(id=uuid4()), video)) + monkeypatch.setattr(worker, "_transcribe_media", lambda *_args, **_kwargs: TranscriptionResult(words=_words())) + monkeypatch.setattr(worker, "create_asset", lambda **_kwargs: SimpleNamespace(id=uuid4())) + + result = worker.transcribe_video.run(str(uuid4()), str(uuid4()), {"backend": "whisper", "language": "en"}) + + _expect(result["status"] == "transcribed", "Expected transcribe task success") + _expect(result["backend"] == "faster_whisper", "Expected whisper alias to map to faster_whisper") + _expect(result["word_count"] == 2, "Expected generated word count") + + +def test_generate_captions_handles_invalid_profile_and_backend(monkeypatch, tmp_path: Path): + from services.worker import worker + + video = tmp_path / "video.mp4" + video.write_bytes(b"video") + + monkeypatch.setattr(worker, "update_job", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "_job_asset_kwargs", lambda _job_id: {}) + monkeypatch.setattr(worker, "fetch_asset", lambda _asset_id: (SimpleNamespace(id=uuid4()), video)) + # Force fallback path from empty transcription -> noop words. + monkeypatch.setattr(worker, "_transcribe_media", lambda *_args, **_kwargs: TranscriptionResult(words=[])) + monkeypatch.setattr(worker, "transcribe_noop", lambda *_args, **_kwargs: TranscriptionResult(words=_words())) + monkeypatch.setattr(worker, "create_asset", lambda **_kwargs: SimpleNamespace(id=uuid4())) + + result = worker.generate_captions.run(str(uuid4()), + str(uuid4()), + { + "backend": "unknown-backend", + "formats": ["ass"], + "subtitle_quality_profile": "nonexistent-profile", + "diarize": True, + "diarization_backend": "unknown-backend", + }, + ) + + _expect(result["status"] == "captions_generated", "Expected caption task success") + _expect(result["transcription_backend"] == "noop", "Expected unknown backend fallback to noop") + _expect(result["subtitle_quality_profile"] == "balanced", "Expected unknown quality profile fallback") + _expect(any("Unknown backend" in item for item in result["warnings"]), "Expected backend warning") + _expect(any("Unknown subtitle_quality_profile" in item for item in result["warnings"]), "Expected profile warning") + + +def test_generate_captions_missing_asset_fails(monkeypatch): + from services.worker import worker + + updates: list[dict] = [] + monkeypatch.setattr(worker, "update_job", lambda _job_id, **kwargs: updates.append(kwargs)) + monkeypatch.setattr(worker, "_job_asset_kwargs", lambda _job_id: {}) + monkeypatch.setattr(worker, "fetch_asset", lambda _asset_id: (None, None)) + + result = worker.generate_captions.run(str(uuid4()), str(uuid4()), {"backend": "noop"}) + + _expect(result["status"] == "failed", "Expected missing video failure") + _expect(any(item.get("status") == worker.JobStatus.failed for item in updates), "Expected failed job update") + + +def test_translate_subtitles_fails_on_missing_target_language(monkeypatch, tmp_path: Path): + from services.worker import worker + + subtitle = tmp_path / "captions.srt" + subtitle.write_text( + "1\n00:00:00,000 --> 00:00:01,000\nhello world\n", + encoding="utf-8", + ) + + monkeypatch.setattr(worker, "update_job", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "_job_asset_kwargs", lambda _job_id: {}) + monkeypatch.setattr(worker, "fetch_asset", lambda _asset_id: (SimpleNamespace(id=uuid4()), subtitle)) + + result = worker.translate_subtitles.run(str(uuid4()), str(uuid4()), {"source_language": "en"}) + + _expect(result["status"] == "failed", "Expected missing target_language failure") + _expect("target_language" in result["error"], "Expected missing target_language error message") + + +def test_translate_subtitles_success_with_noop_translator(monkeypatch, tmp_path: Path): + from services.worker import worker + + subtitle = tmp_path / "captions.srt" + subtitle.write_text( + "1\n00:00:00,000 --> 00:00:01,000\nhello world\n", + encoding="utf-8", + ) + + monkeypatch.setattr(worker, "update_job", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "_job_asset_kwargs", lambda _job_id: {}) + monkeypatch.setattr(worker, "fetch_asset", lambda _asset_id: (SimpleNamespace(id=uuid4()), subtitle)) + monkeypatch.setattr(worker, "create_asset", lambda **_kwargs: SimpleNamespace(id=uuid4())) + + result = worker.translate_subtitles.run(str(uuid4()), + str(uuid4()), + {"target_language": "es", "source_language": "en", "translator_backend": "noop", "bilingual": True}, + ) + + _expect(result["status"] == "translated", "Expected subtitle translation success") + _expect(result["target_language"] == "es", "Expected target language in payload") + _expect(result["bilingual"] is True, "Expected bilingual output flag") + + +def test_translate_subtitles_rejects_unsupported_extension(monkeypatch, tmp_path: Path): + from services.worker import worker + + subtitle = tmp_path / "captions.txt" + subtitle.write_text("not-srt", encoding="utf-8") + + monkeypatch.setattr(worker, "update_job", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "_job_asset_kwargs", lambda _job_id: {}) + monkeypatch.setattr(worker, "fetch_asset", lambda _asset_id: (SimpleNamespace(id=uuid4()), subtitle)) + + result = worker.translate_subtitles.run(str(uuid4()), str(uuid4()), {"target_language": "es"}) + + _expect(result["status"] == "failed", "Expected unsupported extension failure") + _expect("Only .srt/.vtt subtitles are supported" in result["error"], "Expected unsupported extension message") + + +def test_dispatch_pipeline_step_publish_paths_and_validation(monkeypatch): + from services.worker import worker + + run = SimpleNamespace(id=uuid4(), input_asset_id=uuid4()) + job = SimpleNamespace(id=uuid4()) + calls: list[dict] = [] + monkeypatch.setattr( + worker, + "_dispatch_task", + lambda task_name, args, queue: calls.append({"task_name": task_name, "args": args, "queue": queue}) or SimpleNamespace(id="task-1"), + ) + + task_id = worker._dispatch_pipeline_step( + job=job, + run=run, + step_type="publish_youtube", + input_asset_id=uuid4(), + step_payload={"connection_id": str(uuid4()), "asset_id": str(uuid4())}, + ) + _expect(task_id == "task-1", "Expected publish step to dispatch task") + _expect(calls[-1]["task_name"] == "tasks.publish_asset", "Expected publish task dispatch") + + try: + worker._dispatch_pipeline_step( + job=job, + run=run, + step_type="publish", + input_asset_id=uuid4(), + step_payload={"provider": "youtube"}, + ) + raise AssertionError("Expected validation error when connection_id is missing") + except ValueError: + pass + diff --git a/services/worker/test_worker_tasks_extended_matrix.py b/services/worker/test_worker_tasks_extended_matrix.py new file mode 100644 index 00000000..eaec3caf --- /dev/null +++ b/services/worker/test_worker_tasks_extended_matrix.py @@ -0,0 +1,363 @@ +from __future__ import annotations + +from datetime import datetime, timezone +from pathlib import Path +from types import SimpleNamespace +from uuid import UUID, uuid4 + +import pytest +from sqlmodel import Session + +from app.models import ( + Job, + JobStatus, + MediaAsset, + Organization, + PublishConnection, + PublishJob, + User, + WorkflowRun, + WorkflowRunStatus, + WorkflowRunStep, + WorkflowTemplate, +) +from media_core.segment.shorts import SegmentCandidate +from services.worker import worker + + +class _TaskSelf: + def __init__(self, request_id: str | None = "task-1"): + self.request = SimpleNamespace(id=request_id) + self.states: list[dict] = [] + + def update_state(self, **kwargs): + self.states.append(kwargs) + + +@pytest.fixture() +def worker_db(monkeypatch, tmp_path: Path): + from app.config import get_settings + from app.database import create_db_and_tables, get_engine + + media_root = tmp_path / "media" + media_root.mkdir(parents=True, exist_ok=True) + + db_path = tmp_path / "worker-test.db" + db_url = f"sqlite:///{db_path.as_posix()}" + monkeypatch.setenv("DATABASE_URL", db_url) + monkeypatch.setenv("REFRAME_MEDIA_ROOT", str(media_root)) + + get_settings.cache_clear() + get_engine.cache_clear() + worker._engine = None + worker._media_tmp = None + create_db_and_tables() + + return get_engine + + +def test_run_workflow_pipeline_invalid_missing_cancelled(worker_db): + assert worker.run_workflow_pipeline.run("not-a-uuid")["status"] == "invalid_run_id" + + missing = worker.run_workflow_pipeline.run(str(uuid4())) + assert missing["status"] == "missing" + + get_engine = worker_db + with Session(get_engine()) as session: + user = User(email="wf-owner@test.dev") + session.add(user) + session.commit() + session.refresh(user) + + org = Organization(name="WF Org", slug="wf-org") + session.add(org) + session.commit() + session.refresh(org) + + template = WorkflowTemplate( + name="wf", + steps=[{"type": "captions", "payload": {}}], + org_id=org.id, + owner_user_id=user.id, + ) + session.add(template) + session.commit() + session.refresh(template) + + run = WorkflowRun( + template_id=template.id, + org_id=org.id, + owner_user_id=user.id, + input_asset_id=None, + status=WorkflowRunStatus.cancelled, + ) + session.add(run) + session.commit() + session.refresh(run) + + cancelled = worker.run_workflow_pipeline.run(str(run.id)) + assert cancelled["status"] == "cancelled" + + +def test_run_workflow_pipeline_template_missing_and_dispatch_failure(worker_db, monkeypatch): + get_engine = worker_db + with Session(get_engine()) as session: + user = User(email="wf2-owner@test.dev") + session.add(user) + session.commit() + session.refresh(user) + + org = Organization(name="WF2 Org", slug="wf2-org") + session.add(org) + session.commit() + session.refresh(org) + + missing_template_run = WorkflowRun( + template_id=uuid4(), + org_id=org.id, + owner_user_id=user.id, + input_asset_id=None, + ) + session.add(missing_template_run) + session.commit() + session.refresh(missing_template_run) + + failed = worker.run_workflow_pipeline.run(str(missing_template_run.id)) + assert failed["status"] == "failed" + assert failed["error"] == "template_missing" + + template = WorkflowTemplate( + name="wf-dispatch", + steps=[{"type": "captions", "payload": {}}], + org_id=org.id, + owner_user_id=user.id, + ) + session.add(template) + session.commit() + session.refresh(template) + + run = WorkflowRun( + template_id=template.id, + org_id=org.id, + owner_user_id=user.id, + input_asset_id=None, + ) + session.add(run) + session.commit() + session.refresh(run) + + step = WorkflowRunStep(run_id=run.id, order_index=0, step_type="captions", payload={}) + session.add(step) + session.commit() + run_id = str(run.id) + + monkeypatch.setattr(worker, "_dispatch_pipeline_step", lambda **_kwargs: (_ for _ in ()).throw(RuntimeError("dispatch boom"))) + + dispatched = worker.run_workflow_pipeline.run(run_id) + assert dispatched["status"] == "failed" + assert "dispatch boom" in dispatched["error"] + + +def test_publish_asset_invalid_inputs_and_missing_records(worker_db): + assert worker.publish_asset.run(None, None, None, None, None, {})["status"] == "failed" + + invalid = worker.publish_asset.run(None, "youtube", "not-uuid", "bad", None, {}) + assert invalid["error"] == "connection_id and asset_id must be valid UUIDs" + + missing_connection = worker.publish_asset.run(None, "youtube", str(uuid4()), str(uuid4()), None, {}) + assert missing_connection["error"] == "publish_connection_missing" + + +def test_publish_asset_connection_revoked_and_asset_missing(worker_db): + get_engine = worker_db + with Session(get_engine()) as session: + user = User(email="publish-owner@test.dev") + session.add(user) + session.commit() + session.refresh(user) + + org = Organization(name="Publish Org", slug="publish-org") + session.add(org) + session.commit() + session.refresh(org) + + connection = PublishConnection( + org_id=org.id, + user_id=user.id, + provider="youtube", + account_label="acct", + external_account_id="acct-1", + revoked_at=datetime.now(timezone.utc), + ) + session.add(connection) + session.commit() + session.refresh(connection) + + asset = MediaAsset(kind="video", uri="/media/tmp/a.mp4", mime_type="video/mp4", org_id=org.id, owner_user_id=user.id) + session.add(asset) + session.commit() + session.refresh(asset) + + revoked_result = worker.publish_asset.run(None, "youtube", str(connection.id), str(asset.id), None, {}) + assert revoked_result["status"] == "failed" + assert revoked_result["error"] == "publish_connection_invalid" + + job = PublishJob( + org_id=org.id, + user_id=user.id, + provider="youtube", + connection_id=connection.id, + asset_id=uuid4(), + status="queued", + payload={}, + ) + session.add(job) + session.commit() + session.refresh(job) + + missing_asset = worker.publish_asset.run(str(job.id), None, None, None, None, {}) + assert missing_asset["status"] == "failed" + assert missing_asset["error"] == "publish_connection_invalid" + + +def test_render_styled_subtitles_failure_and_success_paths(monkeypatch, tmp_path: Path): + video = tmp_path / "video.mp4" + sub = tmp_path / "sub.srt" + output = tmp_path / "styled.mp4" + video.write_bytes(b"video") + sub.write_text("1\n00:00:00,000 --> 00:00:01,000\nhello\n", encoding="utf-8") + output.write_bytes(b"styled") + + monkeypatch.setattr(worker, "update_job", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "_job_asset_kwargs", lambda _job_id: {}) + + # Missing video + monkeypatch.setattr(worker, "fetch_asset", lambda _asset_id: (None, None)) + missing_video = worker.render_styled_subtitles.run(str(uuid4()), str(uuid4()), str(uuid4()), {}, {}) + assert missing_video["status"] == "failed" + + # Missing subtitle + calls = {"count": 0} + + def _fetch(asset_id: str): + calls["count"] += 1 + return (SimpleNamespace(id=UUID(asset_id), mime_type="video/mp4"), video if calls["count"] == 1 else None) + + monkeypatch.setattr(worker, "fetch_asset", _fetch) + missing_sub = worker.render_styled_subtitles.run(str(uuid4()), str(uuid4()), str(uuid4()), {}, {}) + assert missing_sub["status"] == "failed" + + # ffmpeg failure + vid_id = str(uuid4()) + sub_id = str(uuid4()) + + def _fetch_by_id(asset_id: str): + return (SimpleNamespace(id=UUID(asset_id), mime_type="video/mp4" if asset_id == vid_id else "text/plain"), video if asset_id == vid_id else sub) + + monkeypatch.setattr(worker, "fetch_asset", _fetch_by_id) + monkeypatch.setattr(worker, "_render_styled_subtitles_to_file", lambda **_kwargs: (_ for _ in ()).throw(RuntimeError("render boom"))) + failed_render = worker.render_styled_subtitles.run(str(uuid4()), vid_id, sub_id, {}, {}) + assert failed_render["status"] == "failed" + + monkeypatch.setattr(worker, "_render_styled_subtitles_to_file", lambda **_kwargs: output) + monkeypatch.setattr(worker, "create_asset_for_existing_file", lambda **_kwargs: SimpleNamespace(id=uuid4(), uri="/media/tmp/styled.mp4")) + success = worker.render_styled_subtitles.run(str(uuid4()), vid_id, sub_id, {}, {"preview_seconds": "7"}) + assert success["status"] == "styled_render" + + +def test_generate_shorts_failure_and_success_paths(monkeypatch, tmp_path: Path): + video = tmp_path / "video.mp4" + video.write_bytes(b"video") + + monkeypatch.setattr(worker, "update_job", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "_job_asset_kwargs", lambda _job_id: {}) + + # Missing source path + monkeypatch.setattr(worker, "fetch_asset", lambda _asset_id: (None, None)) + missing = worker.generate_shorts.run(str(uuid4()), str(uuid4()), {}) + assert missing["status"] == "failed" + + # Probe failure + monkeypatch.setattr(worker, "fetch_asset", lambda asset_id: (SimpleNamespace(id=UUID(asset_id), mime_type="video/mp4", uri="/media/tmp/video.mp4"), video)) + monkeypatch.setattr(worker, "probe_media", lambda _path: (_ for _ in ()).throw(RuntimeError("probe boom"))) + probe_failed = worker.generate_shorts.run(str(uuid4()), str(uuid4()), {}) + assert probe_failed["status"] == "failed" + + # Success path with mocked media operations + monkeypatch.setattr(worker, "probe_media", lambda _path: {"duration": 20.0}) + monkeypatch.setattr( + worker, + "equal_splits", + lambda _duration, clip_length=60.0: [ + SegmentCandidate(start=0.0, end=8.0, score=0.9, reason="a", snippet="a"), + SegmentCandidate(start=8.0, end=16.0, score=0.8, reason="b", snippet="b"), + ], + ) + monkeypatch.setattr(worker, "score_segments_heuristic", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "select_top", lambda candidates, **_kwargs: candidates) + monkeypatch.setattr(worker, "_run_ffmpeg_with_retries", lambda **_kwargs: None) + monkeypatch.setattr(worker, "new_tmp_file", lambda suffix: tmp_path / f"out{suffix}") + monkeypatch.setattr(worker, "cut_clip", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "create_asset_for_existing_file", lambda **_kwargs: SimpleNamespace(id=uuid4(), uri="/media/tmp/clip.mp4")) + monkeypatch.setattr(worker, "create_thumbnail_asset", lambda *_args, **_kwargs: SimpleNamespace(id=uuid4(), uri="/media/tmp/thumb.jpg")) + monkeypatch.setattr(worker, "create_asset", lambda **_kwargs: SimpleNamespace(id=uuid4(), uri="/media/tmp/manifest.json")) + + done = worker.generate_shorts.run(str(uuid4()), str(uuid4()), {"max_clips": 2}) + assert done["status"] == "shorts_generated" + assert len(done["clip_assets"]) == 2 + + +def test_cut_merge_and_cleanup_paths(monkeypatch, tmp_path: Path, worker_db): + video = tmp_path / "video.mp4" + audio = tmp_path / "audio.wav" + video.write_bytes(b"video") + audio.write_bytes(b"audio") + + monkeypatch.setattr(worker, "update_job", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "_job_asset_kwargs", lambda _job_id: {}) + + # cut clip: missing source then success + monkeypatch.setattr(worker, "fetch_asset", lambda _asset_id: (None, None)) + cut_missing = worker.cut_clip_asset.run(str(uuid4()), str(uuid4()), 5, 1, {}) + assert cut_missing["status"] == "failed" + + monkeypatch.setattr(worker, "fetch_asset", lambda asset_id: (SimpleNamespace(id=UUID(asset_id), mime_type="video/mp4", uri="/media/tmp/video.mp4"), video)) + monkeypatch.setattr(worker, "_run_ffmpeg_with_retries", lambda **_kwargs: None) + monkeypatch.setattr(worker, "new_tmp_file", lambda suffix: tmp_path / f"cut{suffix}") + monkeypatch.setattr(worker, "cut_clip", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "create_asset_for_existing_file", lambda **_kwargs: SimpleNamespace(id=uuid4(), uri="/media/tmp/cut.mp4")) + monkeypatch.setattr(worker, "create_thumbnail_asset", lambda *_args, **_kwargs: SimpleNamespace(id=uuid4(), uri="/media/tmp/cut-thumb.jpg")) + cut_done = worker.cut_clip_asset.run(str(uuid4()), str(uuid4()), 8, 2, {}) + assert cut_done["duration"] == 6.0 + + # merge: missing audio then success + video_id = str(uuid4()) + audio_id = str(uuid4()) + + def _fetch_merge(asset_id: str): + if asset_id == video_id: + return (SimpleNamespace(id=UUID(asset_id), mime_type="video/mp4", uri="/media/tmp/video.mp4"), video) + return (SimpleNamespace(id=UUID(asset_id), mime_type="audio/wav", uri="/media/tmp/audio.wav"), audio) + + monkeypatch.setattr(worker, "fetch_asset", _fetch_merge) + monkeypatch.setattr(worker, "ffmpeg_merge_video_audio", lambda *_args, **_kwargs: None) + monkeypatch.setattr(worker, "new_tmp_file", lambda suffix: tmp_path / f"merge{suffix}") + merge_done = worker.merge_video_audio.run(str(uuid4()), video_id, audio_id, {"offset": 1.5}) + assert merge_done["status"] == "merged" + + # cleanup retention early path + get_engine = worker_db + with Session(get_engine()) as session: + org = Organization(name="cleanup-org", slug="cleanup-org") + user = User(email="cleanup@test.dev") + session.add(org) + session.add(user) + session.commit() + session.refresh(org) + session.refresh(user) + job = Job(job_type="captions", status=JobStatus.running, org_id=org.id, owner_user_id=user.id) + session.add(job) + session.commit() + + result = worker.cleanup_retention.run() + assert result["status"] == "ok" diff --git a/services/worker/test_worker_utils_extended.py b/services/worker/test_worker_utils_extended.py new file mode 100644 index 00000000..b9f8063b --- /dev/null +++ b/services/worker/test_worker_utils_extended.py @@ -0,0 +1,268 @@ +from __future__ import annotations + +import io +from datetime import datetime, timedelta, timezone +from pathlib import Path +from types import SimpleNamespace +from urllib.parse import urlparse +from uuid import uuid4 + + +def _expect(condition: bool, message: str) -> None: + if not condition: + raise AssertionError(message) + + +def _publish_host_matches(url: str, host: str) -> bool: + parsed = urlparse(url or "") + candidate = (parsed.hostname or "").lower() + return candidate == host or candidate.endswith(f".{host}") + + +def test_worker_bool_and_color_helpers(monkeypatch): + from services.worker import worker + + monkeypatch.setenv("REFRAME_TEST_FLAG", "true") + _expect(worker._env_truthy("TEST_FLAG") is True, "Expected _env_truthy to read prefixed env") + _expect(worker._truthy_env("TEST_FLAG") is True, "Expected _truthy_env wrapper behavior") + + _expect(worker._coerce_bool(True) is True, "Expected bool True") + _expect(worker._coerce_bool(0) is False, "Expected numeric false") + _expect(worker._coerce_bool("YES") is True, "Expected yes string to coerce true") + _expect(worker._coerce_bool({}) is False, "Expected unknown type false") + _expect(worker._coerce_bool_with_default(None, True) is True, "Expected default when None") + + _expect(worker._hex_to_ass_color("#ffcc00", default="x") == "&H0000CCFF", "Expected ASS BGR conversion") + _expect(worker._hex_to_ass_color("abc", default="x") == "&H00CCBBAA", "Expected 3-char hex expansion") + _expect(worker._hex_to_ass_color("bad*value", default="fallback") == "fallback", "Expected default on invalid") + + +def test_worker_retry_env_parsing(monkeypatch): + from services.worker import worker + + monkeypatch.setenv("REFRAME_JOB_RETRY_MAX_ATTEMPTS", "not-int") + monkeypatch.setenv("REFRAME_JOB_RETRY_BASE_DELAY_SECONDS", "not-float") + _expect(worker._retry_max_attempts() == 2, "Expected fallback max attempts") + _expect(worker._retry_base_delay_seconds() == 1.0, "Expected fallback base delay") + + monkeypatch.setenv("REFRAME_JOB_RETRY_MAX_ATTEMPTS", "0") + monkeypatch.setenv("REFRAME_JOB_RETRY_BASE_DELAY_SECONDS", "-2") + _expect(worker._retry_max_attempts() == 1, "Expected lower-bound max attempts") + _expect(worker._retry_base_delay_seconds() == 0.0, "Expected lower-bound delay") + + +def test_worker_download_remote_uri_to_tmp_paths(monkeypatch, tmp_path: Path): + from services.worker import worker + + monkeypatch.setattr(worker, "offline_mode_enabled", lambda: True) + try: + worker._download_remote_uri_to_tmp(uri="https://example.com/file.txt") + raise AssertionError("Expected offline mode guard failure") + except RuntimeError: + pass + + monkeypatch.setattr(worker, "offline_mode_enabled", lambda: False) + try: + worker._download_remote_uri_to_tmp(uri="file:///tmp/x") + raise AssertionError("Expected non-http URI failure") + except ValueError: + pass + + target = tmp_path / "downloaded.bin" + monkeypatch.setattr(worker, "new_tmp_file", lambda _suffix: target) + + class _Resp: + def __enter__(self): + self.buf = io.BytesIO(b"hello") + return self.buf + + def __exit__(self, exc_type, exc, tb): + return False + + monkeypatch.setattr(worker.urllib.request, "urlopen", lambda *_args, **_kwargs: _Resp()) + + out = worker._download_remote_uri_to_tmp(uri="https://example.com/file.bin") + _expect(out == target, "Expected downloaded file path") + _expect(out.read_bytes() == b"hello", "Expected downloaded bytes") + + empty_target = tmp_path / "empty.bin" + monkeypatch.setattr(worker, "new_tmp_file", lambda _suffix: empty_target) + + class _EmptyResp: + def __enter__(self): + self.buf = io.BytesIO(b"") + return self.buf + + def __exit__(self, exc_type, exc, tb): + return False + + monkeypatch.setattr(worker.urllib.request, "urlopen", lambda *_args, **_kwargs: _EmptyResp()) + try: + worker._download_remote_uri_to_tmp(uri="https://example.com/empty.bin") + raise AssertionError("Expected empty download to fail") + except RuntimeError: + pass + + +def _mock_transcribe_backends(monkeypatch, worker_module): + monkeypatch.setattr(worker_module, "transcribe_openai_file", lambda *_args, **_kwargs: "openai") + monkeypatch.setattr(worker_module, "transcribe_faster_whisper", lambda *_args, **_kwargs: "faster") + monkeypatch.setattr(worker_module, "transcribe_whisper_cpp", lambda *_args, **_kwargs: "cpp") + monkeypatch.setattr(worker_module, "transcribe_whisper_timestamped", lambda *_args, **_kwargs: "ts") + monkeypatch.setattr(worker_module, "transcribe_noop", lambda *_args, **_kwargs: "noop") + + +def test_worker_transcribe_media_routes_selected_backend(monkeypatch, tmp_path: Path): + from media_core.transcribe import TranscriptionBackend, TranscriptionConfig + from services.worker import worker + + media = tmp_path / "audio.wav" + media.write_bytes(b"data") + + monkeypatch.setattr(worker, "offline_mode_enabled", lambda: False) + _mock_transcribe_backends(monkeypatch, worker) + + warnings: list[str] = [] + expected_routes = [ + (TranscriptionBackend.FASTER_WHISPER, "faster"), + (TranscriptionBackend.WHISPER_CPP, "cpp"), + (TranscriptionBackend.WHISPER_TIMESTAMPED, "ts"), + (TranscriptionBackend.NOOP, "noop"), + (TranscriptionBackend.OPENAI_WHISPER, "openai"), + ] + for backend, expected in expected_routes: + got = worker._transcribe_media(media, TranscriptionConfig(backend=backend), warnings=warnings) + _expect(got == expected, f"Expected {backend.value} route") + + +def test_worker_transcribe_media_offline_openai_falls_back_to_noop(monkeypatch, tmp_path: Path): + from media_core.transcribe import TranscriptionBackend, TranscriptionConfig + from services.worker import worker + + media = tmp_path / "audio.wav" + media.write_bytes(b"data") + + monkeypatch.setattr(worker, "offline_mode_enabled", lambda: True) + _mock_transcribe_backends(monkeypatch, worker) + + warnings: list[str] = [] + got = worker._transcribe_media( + media, + TranscriptionConfig(backend=TranscriptionBackend.OPENAI_WHISPER), + warnings=warnings, + ) + _expect(got == "noop", "Expected offline OpenAI route to fall back to noop") + + +def test_worker_transcribe_media_backend_error_falls_back_with_warning(monkeypatch, tmp_path: Path): + from media_core.transcribe import TranscriptionBackend, TranscriptionConfig + from services.worker import worker + + media = tmp_path / "audio.wav" + media.write_bytes(b"data") + + monkeypatch.setattr(worker, "offline_mode_enabled", lambda: False) + _mock_transcribe_backends(monkeypatch, worker) + monkeypatch.setattr( + worker, + "transcribe_faster_whisper", + lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("boom")), + ) + + warnings: list[str] = [] + got = worker._transcribe_media( + media, + TranscriptionConfig(backend=TranscriptionBackend.FASTER_WHISPER), + warnings=warnings, + ) + _expect(got == "noop", "Expected backend error fallback to noop") + _expect(any("failed; falling back" in item for item in warnings), "Expected fallback warning") + +def test_worker_extract_audio_and_thumbnail_paths(monkeypatch, tmp_path: Path): + from services.worker import worker + + video = tmp_path / "in.mp4" + video.write_bytes(b"video") + audio = tmp_path / "out.wav" + + monkeypatch.setattr(worker.shutil, "which", lambda _name: None) + try: + worker._extract_audio_wav_for_diarization(video, audio) + raise AssertionError("Expected missing ffmpeg error") + except FileNotFoundError: + pass + + calls: list[list[str]] = [] + monkeypatch.setattr(worker.shutil, "which", lambda _name: "ffmpeg") + worker._extract_audio_wav_for_diarization(video, audio, runner=lambda cmd, **_kwargs: calls.append(cmd)) + _expect(calls and calls[0][-1] == str(audio), "Expected extraction command invocation") + + fallback_calls: list[dict] = [] + monkeypatch.setattr(worker, "create_asset", lambda **kwargs: fallback_calls.append(kwargs) or kwargs) + fallback = worker.create_thumbnail_asset(None) + _expect(fallback["kind"] == "image", "Expected fallback thumbnail asset for missing input") + + monkeypatch.setattr(worker.shutil, "which", lambda _name: "ffmpeg") + monkeypatch.setattr(worker, "get_media_tmp", lambda: tmp_path) + monkeypatch.setattr(worker, "uuid4", lambda: "test-thumb") + + def _runner_success(cmd, **_kwargs): + out = Path(cmd[-1]) + out.write_bytes(b"png") + + success = worker.create_thumbnail_asset(video, runner=_runner_success) + _expect(success.get("source_path") is not None, "Expected source-path thumbnail success") + + def _runner_fail(_cmd, **_kwargs): + raise worker.subprocess.CalledProcessError(returncode=1, cmd=["ffmpeg"]) + + failed = worker.create_thumbnail_asset(video, runner=_runner_fail) + _expect(failed.get("contents") is not None, "Expected fallback thumbnail on ffmpeg error") + + +def test_worker_retention_publish_and_asset_helpers(monkeypatch, tmp_path: Path): + from services.worker import worker + + monkeypatch.setenv("REFRAME_RETENTION_FREE_DAYS", "21") + _expect(worker._retention_days_for_plan("free") == 21, "Expected env override for retention") + monkeypatch.setenv("REFRAME_RETENTION_FREE_DAYS", "bad") + _expect(worker._retention_days_for_plan("free") == 14, "Expected fallback on invalid retention env") + + now = datetime.now(timezone.utc) + old = now - timedelta(days=45) + _expect(worker._is_older_than_retention(created_at=old, plan_code="pro", now=now) is True, "Expected old asset beyond retention") + _expect(worker._is_older_than_retention(created_at=None, plan_code="pro", now=now) is False, "Expected None timestamp to be retained") + + clip_asset = str(uuid4()) + thumb_asset = str(uuid4()) + payload = {"clip_assets": [{"asset_id": clip_asset, "thumbnail_asset_id": thumb_asset, "subtitle_asset_id": "bad-id"}]} + job = SimpleNamespace(output_asset_id=uuid4(), payload=payload) + related = worker._job_related_asset_ids(job) + _expect(len(related) == 3, "Expected output+clip+thumbnail UUIDs") + + conn = SimpleNamespace(account_label="Creator Account", external_account_id="acct123") + asset = SimpleNamespace(id=uuid4()) + yt = worker._publish_result_for_provider(provider="youtube", connection=conn, asset=asset, payload={"title": "t"}) + tk = worker._publish_result_for_provider(provider="tiktok", connection=conn, asset=asset, payload={}) + ig = worker._publish_result_for_provider(provider="instagram", connection=conn, asset=asset, payload={}) + fb = worker._publish_result_for_provider(provider="facebook", connection=conn, asset=asset, payload={}) + _expect(_publish_host_matches(yt["published_url"], "youtube.com"), "Expected youtube URL") + _expect(_publish_host_matches(tk["published_url"], "tiktok.com"), "Expected tiktok URL") + _expect(_publish_host_matches(ig["published_url"], "instagram.com"), "Expected instagram URL") + _expect(_publish_host_matches(fb["published_url"], "facebook.com"), "Expected facebook URL") + + _expect(worker._publish_provider_from_step("publish_youtube", {}) == "youtube", "Expected provider from typed step") + _expect(worker._publish_provider_from_step("publish", {"provider": "facebook"}) == "facebook", "Expected provider from payload") + try: + worker._publish_provider_from_step("publish", {"provider": "unknown"}) + raise AssertionError("Expected unsupported provider failure") + except ValueError: + pass + + local_file = tmp_path / "asset.bin" + local_file.write_bytes(b"1234") + monkeypatch.setattr(worker, "is_remote_uri", lambda _uri: False) + monkeypatch.setattr(worker, "get_settings", lambda: SimpleNamespace(media_root=str(tmp_path))) + size = worker._asset_size_bytes(SimpleNamespace(uri=str(local_file.relative_to(tmp_path)))) + _expect(size == 4, "Expected local asset size bytes") + diff --git a/services/worker/test_worker_workflow_pipeline.py b/services/worker/test_worker_workflow_pipeline.py index b1a21743..9e6a7b7b 100644 --- a/services/worker/test_worker_workflow_pipeline.py +++ b/services/worker/test_worker_workflow_pipeline.py @@ -19,7 +19,7 @@ def test_run_workflow_pipeline_dispatches_child_jobs(monkeypatch, tmp_path: Path media_root.mkdir(parents=True, exist_ok=True) db_path = tmp_path / "reframe-test.db" - db_url = f"sqlite:////{str(db_path).lstrip('/')}" + db_url = f"sqlite:///{db_path.as_posix()}" monkeypatch.setenv("DATABASE_URL", db_url) monkeypatch.setenv("REFRAME_MEDIA_ROOT", str(media_root)) diff --git a/services/worker/worker.py b/services/worker/worker.py index 71911fd9..7d5c720f 100644 --- a/services/worker/worker.py +++ b/services/worker/worker.py @@ -12,7 +12,8 @@ import urllib.request from datetime import datetime, timedelta, timezone from pathlib import Path -from typing import Any, Callable, Optional, Tuple, TypeVar +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union +from types import SimpleNamespace from uuid import UUID, uuid4 from sqlmodel import Session, create_engine, select @@ -34,9 +35,10 @@ def _find_repo_root(start: Path) -> Path: if MEDIA_CORE_SRC.is_dir() and str(MEDIA_CORE_SRC) not in sys.path: sys.path.append(str(MEDIA_CORE_SRC)) -from app.config import get_settings -from app.billing import get_plan_policy -from app.models import ( +from app.config import get_settings # noqa: E402 +from app.local_queue import dispatch_task as dispatch_local_task, is_local_queue_mode # noqa: E402 +from app.billing import get_plan_policy # noqa: E402 +from app.models import ( # noqa: E402 AutomationRunEvent, Job, JobStatus, @@ -52,15 +54,15 @@ def _find_repo_root(start: Path) -> Path: WorkflowStepStatus, WorkflowTemplate, ) -from app.storage import LocalStorageBackend, get_storage, is_remote_uri -from celery import Celery -from kombu import Queue - -from media_core.segment.shorts import HeuristicWeights, equal_splits, score_segments_heuristic, score_segments_llm, select_top -from media_core.diarize import DiarizationBackend, DiarizationConfig, assign_speakers_to_lines, diarize_audio -from media_core.subtitles.builder import GroupingConfig, SubtitleLine, group_words, to_ass, to_ass_karaoke, to_srt, to_vtt -from media_core.subtitles.vtt import parse_vtt -from media_core.transcribe import ( +from app.storage import LocalStorageBackend, get_storage, is_remote_uri # noqa: E402 +from celery import Celery # noqa: E402 +from kombu import Queue # noqa: E402 + +from media_core.segment.shorts import HeuristicWeights, equal_splits, score_segments_heuristic, score_segments_llm, select_top # noqa: E402 +from media_core.diarize import DiarizationBackend, DiarizationConfig, assign_speakers_to_lines, diarize_audio # noqa: E402 +from media_core.subtitles.builder import GroupingConfig, SubtitleLine, group_words, to_ass, to_ass_karaoke, to_srt, to_vtt # noqa: E402 +from media_core.subtitles.vtt import parse_vtt # noqa: E402 +from media_core.transcribe import ( # noqa: E402 TranscriptionBackend, TranscriptionConfig, transcribe_faster_whisper, @@ -69,10 +71,10 @@ def _find_repo_root(start: Path) -> Path: transcribe_whisper_cpp, transcribe_whisper_timestamped, ) -from media_core.transcribe.models import Word -from media_core.translate.srt import parse_srt, translate_srt, translate_srt_bilingual -from media_core.translate.translator import CloudTranslator, LocalTranslator, NoOpTranslator -from media_core.video_edit.ffmpeg import cut_clip, detect_silence, merge_video_audio as ffmpeg_merge_video_audio, probe_media +from media_core.transcribe.models import Word # noqa: E402 +from media_core.translate.srt import parse_srt, translate_srt, translate_srt_bilingual # noqa: E402 +from media_core.translate.translator import CloudTranslator, LocalTranslator, NoOpTranslator # noqa: E402 +from media_core.video_edit.ffmpeg import cut_clip, detect_silence, merge_video_audio as ffmpeg_merge_video_audio, probe_media # noqa: E402 try: from .groq_client import get_groq_chat_client_from_env @@ -100,6 +102,16 @@ def _env_truthy(name: str) -> bool: Queue(GPU_QUEUE), ) + +TaskArg = Optional[Union[str, Dict[str, Any]]] + + +def _dispatch_task(task_name: str, args: List[TaskArg], queue: str) -> SimpleNamespace: + if is_local_queue_mode(): + task_id = dispatch_local_task(task_name, *args, queue=queue) + return SimpleNamespace(id=task_id) + return celery_app.send_task(task_name, args=args, queue=queue) + logger = logging.getLogger(__name__) _engine = None @@ -839,24 +851,24 @@ def _dispatch_pipeline_step( raise ValueError(f"Workflow step `{step_type}` is missing input asset") if step_type == "captions": - result = celery_app.send_task("tasks.generate_captions", args=[str(job.id), str(input_asset_id), step_payload], queue=CPU_QUEUE) + result = _dispatch_task("tasks.generate_captions", args=[str(job.id), str(input_asset_id), step_payload], queue=CPU_QUEUE) return str(result.id) if step_type == "translate_subtitles": - result = celery_app.send_task("tasks.translate_subtitles", args=[str(job.id), str(input_asset_id), step_payload], queue=CPU_QUEUE) + result = _dispatch_task("tasks.translate_subtitles", args=[str(job.id), str(input_asset_id), step_payload], queue=CPU_QUEUE) return str(result.id) if step_type == "style_subtitles": video_asset_id = str(step_payload.get("video_asset_id") or run.input_asset_id or "") subtitle_asset_id = str(step_payload.get("subtitle_asset_id") or input_asset_id or "") style = step_payload.get("style") if isinstance(step_payload.get("style"), dict) else {} options = {"preview_seconds": step_payload.get("preview_seconds")} - result = celery_app.send_task( + result = _dispatch_task( "tasks.render_styled_subtitles", args=[str(job.id), video_asset_id, subtitle_asset_id, style, options], queue=CPU_QUEUE, ) return str(result.id) if step_type == "shorts": - result = celery_app.send_task("tasks.generate_shorts", args=[str(job.id), str(input_asset_id), step_payload], queue=CPU_QUEUE) + result = _dispatch_task("tasks.generate_shorts", args=[str(job.id), str(input_asset_id), step_payload], queue=CPU_QUEUE) return str(result.id) if step_type in {"publish", "publish_youtube", "publish_tiktok", "publish_instagram", "publish_facebook"}: provider = _publish_provider_from_step(step_type, step_payload) @@ -868,7 +880,7 @@ def _dispatch_pipeline_step( raise ValueError("Publish step requires an asset_id or workflow input asset") task_payload = dict(step_payload) task_payload.setdefault("source_workflow_job_id", str(job.id)) - result = celery_app.send_task( + result = _dispatch_task( "tasks.publish_asset", args=[None, provider, connection_id, publish_asset_id, str(run.id), task_payload], queue=CPU_QUEUE, @@ -2280,7 +2292,8 @@ def cleanup_retention(self) -> dict: "cleaned_assets": cleaned_assets, "timestamp": now.isoformat(), } - _progress(self, "completed", 1.0, **result) + progress_meta = {k: v for k, v in result.items() if k != "status"} + _progress(self, "completed", 1.0, **progress_meta) return result