From 163cb62f31fb7a7434dd50f623724606157b82c3 Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Mon, 17 Nov 2025 07:34:38 -0500 Subject: [PATCH 01/65] [DAPS-1770] - release: v4.0.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [DAPS-1585] - update: dependencies, upgrade ssl dependency. 3.2.5 (#1646) * [DAPS-1605] - fix: scripts, install_foxx.sh by splitting ssl_args (#1623) * [DAPS-1651] - refactor: scripts, compose, univify treatment of env variables in compose env generator (#1656) (#1658) * [DAPS-1675] - feature: foxx, adding the logger functions for future PR's (#1675) * [DAPS-1659] - refactor: scripts, remove dependencies install scripts (#1660) * [DAPS-1670] - feature: common, core, repo, python_client, web, allow passing repo types in protobuf messages (#1670) * [DAPS-1671] - feature: foxx, add repository and execution strategy types (#1672) * [DAPS-1661] - refactor: compose, scripts, remove remaining occurences of zeromq system secret. (#1661) * [DAPS-1522] - refactor: foxx, user router logging improvements, remove non helpful logs from tasks.js (#1629) * [DAPS-1691] - refactor: foxx, adjust validation.js swap g_lib with error_code require rem… (#1691) * [DAPS-1692] - tests: ci, End-to-end web tests, fix flaky test (#1693) * [DAPS-1694] - refactor: foxx, move permissions functions from support.js to lib/permissions (#1695) * [DAPS-1685] - feature: compose, enable arangodb ssl (#1687) * [DAPS-1700] - fix: ci, limit arangodb job output to last 3 hours. (#1701) * [DAPS-1676] - feature: foxx, arango add factory for repositories for metadata and globus (#1697) * [DAPS-1718] - feature: web, core, python client, Protobuf ExecutionMethod enum, add RepoAllocationCreateResponse (#1719) * [DAPS-1713] - refactor: core, web, python client, protobuf, allow optional fields when creating repo to support metadat… (#1714) * [DAPS-1715] - refactor: core, make path, pub_key, address, endpoint optional in repoCreateRequest (#1716) * [DAPS-1705] - feature: foxx, integrate metadata globus factory repo router create (#1706) * [DAPS-1688] - update: dependencies, core, repo, authz, gcs, Crypto libssl switched to version 3 globus_sdk version pinned (#1689) * [DAPS-1729] - fix: ci, downstream datafed dependencies pipelines are building the container image from incorrect sha (#1732) * [DAPS-1711] - refactor: foxx standardize repo response schema (#1712) * [DAPS-1725] - refactor: remove confusing apache conf file. (#1728) * [DAPS-1707] - update: dependencies, web, update web dependencies before install (#1709) * [DAPS-1522] - refactor: foxx, task router logging improvements (#1648) * [DAPS-1522] - refactor: foxx, query router logging improvements (#1627) * [DAPS-1735] - bug: foxx, remove duplicate user_router test (#1736) * [DAPS-1731] - reature: scripts, compose, add scripts to generate globus credentials for web service (#1731) * [DAPS-1725] - refactor: tests, mock core server centralized (#1726) * [DAPS-1741] - update: scripts, native client id in intialize_globus_endpoint and globus_clea… (#1741) * [DAPS-1745] - fix: scripts, account for nested client credentials. (#1746) * [DAPS-1725-2] - fix; tests, centralized mock core service libraries fixed (part 2) (#1747) * [DAPS-1742] - refactor script replace os.path.join with urllib.parse.urljoin (#1744) * [DAPS-1749] - refactor: cmake, set cmake policy to silence noisy warning. (#1750) * [DAPS-1522] - refactor: foxx, feature tag router logging improvements (#1734) * [DAPS-1378] - fix: web, mapping of multiple globus accounts. (#1753) * [DAPS-1756] - fix: scripts, foxx, add retries and connection check to install_foxx.sh script. (#1757) * [DAPS-1522] - refactor: foxx, Version Router Logging Improvements (#1758) * [DAPS-1737] - refactor: compose, cleanup arango ssl env variables (#1765) * [DAPS-1766] - fix: ci, python client provisioning job * [DAPS-1663] - feature: core Service, adding Correlation ID to Logging (#1704) Co-authored-by: Aaron Perez Co-authored-by: AronPerez Co-authored-by: Blake Nedved Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com> Co-authored-by: nedvedba <145805866+nedvedba@users.noreply.github.com> Co-authored-by: Austin Hampton Co-authored-by: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Co-authored-by: Blake Nedved Co-authored-by: Polina Shpilker --- .github/workflows/build-docs.yml | 2 +- .github/workflows/unit-tests.yml | 5 +- .gitignore | 5 +- .gitlab-ci.yml | 3 + .gitlab/build/build_core_image.yml | 90 +- .gitlab/build/build_foxx_image.yml | 96 +- .gitlab/build/build_gcs_base_image.yml | 131 ++- .gitlab/build/build_gcs_image.yml | 158 ++-- .gitlab/build/build_repo_image.yml | 95 +- .gitlab/build/build_ws_image.yml | 98 +- .gitlab/build/force_build_core_image.yml | 28 +- .../build/force_build_dependencies_image.yml | 71 ++ .gitlab/build/force_build_foxx_image.yml | 26 +- .gitlab/build/force_build_gcs_base_image.yml | 69 +- .gitlab/build/force_build_gcs_image.yml | 65 +- .gitlab/build/force_build_repo_image.yml | 28 +- .gitlab/build/force_build_ws_image.yml | 32 +- .../build/skip_build_dependencies_image.yml | 11 + .gitlab/common.yml | 8 +- .gitlab/end_to_end.yml | 13 +- .gitlab/stage_base_image_check.yml | 38 + .gitlab/stage_build.yml | 12 +- .gitlab/stage_build_base.yml | 21 +- .gitlab/stage_provision_client.yml | 13 +- .gitlab/stage_unit.yml | 2 - .gitmodules | 6 +- CMakeLists.txt | 44 +- cmake/Boost.cmake | 5 + cmake/Utils.cmake | 21 + cmake/Version.cmake | 28 +- common/proto/common/SDMS.proto | 9 +- common/proto/common/SDMS_Auth.proto | 23 +- common/tests/unit/test_Buffer.cpp | 64 ++ compose/README.md | 67 +- compose/all/compose.yml | 30 +- compose/all/generate_env.sh | 2 +- compose/all/generate_globus_files.sh | 4 +- compose/metadata/compose.yml | 23 +- compose/metadata/generate_env.sh | 2 +- compose/metadata/generate_globus_files.sh | 8 + compose/repo/compose.yml | 2 +- compose/repo/generate_globus_files.sh | 4 +- core/database/CMakeLists.txt | 45 +- core/database/foxx/api/acl_router.js | 53 +- core/database/foxx/api/admin_router.js | 5 +- core/database/foxx/api/authz.js | 26 +- core/database/foxx/api/authz_router.js | 52 +- core/database/foxx/api/coll_router.js | 119 ++- core/database/foxx/api/data_router.js | 108 +-- core/database/foxx/api/group_router.js | 36 +- core/database/foxx/api/lib/error_codes.js | 42 + core/database/foxx/api/lib/execution_types.js | 15 + core/database/foxx/api/lib/logger.js | 114 +++ core/database/foxx/api/lib/permissions.js | 653 ++++++++++++++ core/database/foxx/api/lib/result.js | 25 + core/database/foxx/api/lib/user_token.js | 5 +- .../foxx/api/models/globus_collection.js | 3 +- .../models/repositories/base_repository.js | 228 +++++ .../api/models/repositories/repositories.js | 130 +++ .../models/repositories/repository/globus.js | 275 ++++++ .../repositories/repository/metadata.js | 297 +++++++ .../foxx/api/models/repositories/types.js | 39 + .../api/models/repositories/validation.js | 231 +++++ core/database/foxx/api/note_router.js | 50 +- core/database/foxx/api/process.js | 62 +- core/database/foxx/api/proj_router.js | 36 +- core/database/foxx/api/query_router.js | 267 +++++- core/database/foxx/api/record.js | 15 +- core/database/foxx/api/repo.js | 9 +- core/database/foxx/api/repo_router.js | 151 ++-- core/database/foxx/api/schema_router.js | 67 +- core/database/foxx/api/schemas/repo.js | 24 + .../foxx/api/schemas/response_envelope.js | 31 + core/database/foxx/api/schemas/task.js | 27 + core/database/foxx/api/support.js | 841 ++---------------- core/database/foxx/api/tag_router.js | 110 ++- core/database/foxx/api/task_router.js | 249 +++++- core/database/foxx/api/tasks.js | 105 +-- core/database/foxx/api/topic_router.js | 5 +- core/database/foxx/api/user_router.js | 822 +++++++++++++++-- core/database/foxx/api/version_router.js.in | 29 + core/database/foxx/tests/authz.test.js | 16 +- core/database/foxx/tests/authz_router.test.js | 6 +- .../foxx/tests/base_repository.test.js | 101 +++ core/database/foxx/tests/query_router.test.js | 166 ++++ core/database/foxx/tests/record.test.js | 9 +- core/database/foxx/tests/repo.test.js | 5 +- core/database/foxx/tests/repo_globus.test.js | 498 +++++++++++ .../database/foxx/tests/repo_metadata.test.js | 651 ++++++++++++++ core/database/foxx/tests/repo_router.test.js | 256 ++++++ core/database/foxx/tests/repositories.test.js | 114 +++ core/database/foxx/tests/tag_router.test.js | 63 ++ core/database/foxx/tests/task_router.test.js | 154 ++++ core/database/foxx/tests/validation.test.js | 111 +++ .../foxx/tests/version_router.test.js | 53 ++ core/database/tests/test_fixture_setup.sh | 10 +- core/database/tests/test_foxx.sh | 10 +- core/database/tests/test_setup.sh | 12 +- core/database/tests/test_teardown.sh | 4 +- core/docker/Dockerfile | 51 +- core/server/DatabaseAPI.cpp | 204 +++-- core/server/DatabaseAPI.hpp | 2 +- core/server/tests/unit/CMakeLists.txt | 1 + core/server/tests/unit/test_DatabaseAPI.cpp | 215 +++++ .../source/admin/install_bare_metal.rst | 18 +- doc_source/source/admin/install_docker.rst | 48 +- doc_source/source/dev/design.rst | 72 +- doc_source/source/dev/project.rst | 18 +- doc_source/source/dev/release.rst | 5 + doc_source/source/dev/roadmap.rst | 74 +- doc_source/source/dev/testing.rst | 3 +- doc_source/source/system/getting_started.rst | 122 +-- doc_source/source/system/introduction.rst | 45 +- doc_source/source/system/overview.rst | 731 ++++++++------- docker/Dockerfile.dependencies | 70 -- docker/Dockerfile.foxx | 17 +- docker/entrypoint_foxx.sh | 13 +- docs/_sources/admin/general.rst.txt | 7 +- docs/admin/general.html | 7 +- external/DataFedDependencies | 1 + external/protobuf | 1 - python/datafed_pkg/requirements.txt | 6 - .../Dockerfile.python-client-base.ubuntu | 2 +- repository/docker/000-default.conf | 60 -- repository/docker/Dockerfile | 69 +- repository/docker/Dockerfile.gcs | 44 +- repository/docker/entrypoint_authz.sh | 10 +- .../gridftp/globus5/authz/CMakeLists.txt | 1 - .../globus5/authz/tests/CMakeLists.txt | 26 +- .../authz/tests/integration/CMakeLists.txt | 3 + .../tests/integration/test_gsi_authz_init.cpp | 11 +- .../globus5/authz/tests/mock/CMakeLists.txt | 20 - .../globus5/authz/tests/mock/mock_start.sh | 8 - scripts/compose_build_images.sh | 22 +- scripts/compose_cleanup_globus_files.sh | 6 +- scripts/compose_generate_env.sh | 252 ++++-- scripts/compose_generate_globus_files.sh | 4 +- ..._generate_web_server_globus_credentials.sh | 87 ++ scripts/dependency_install_functions.sh | 727 --------------- scripts/export_dependency_version.sh | 2 +- scripts/generate_datafed.sh | 2 + .../generate_metadata_container_scripts.sh | 2 - scripts/generate_repo_container_scripts.sh | 2 - scripts/generate_ws_config.sh | 8 +- .../globus/generate_web_server_credentials.py | 108 +++ scripts/globus/globus_cleanup.py | 2 +- scripts/globus/initialize_globus_endpoint.py | 4 +- scripts/globus/utils.py | 69 +- scripts/install_authz_dependencies.sh | 54 -- scripts/install_client_dependencies.sh | 32 - scripts/install_core_dependencies.sh | 54 -- scripts/install_dependencies.sh | 136 --- scripts/install_docker_dependencies.sh | 24 - scripts/install_docs_dependencies.sh | 53 -- .../install_end_to_end_test_dependencies.sh | 56 -- scripts/install_foxx.sh | 131 +-- scripts/install_gcs.sh | 10 - scripts/install_lego_and_certificates.sh | 2 +- scripts/install_python_client_dependencies.sh | 35 - scripts/install_repo_dependencies.sh | 55 -- scripts/install_ws.sh | 4 +- scripts/install_ws_dependencies.sh | 77 -- scripts/utils.sh | 126 --- tests/CMakeLists.txt | 55 ++ tests/end-to-end/setup.sh | 12 +- tests/end-to-end/web-UI/auth.setup.js.in | 90 +- tests/end-to-end/web-UI/playwright.config.js | 16 +- tests/mock/CMakeLists.txt | 24 + tests/mock/mock_start.sh | 10 + .../authz/tests => tests}/mock/mock_stop.sh | 3 + .../tests => tests}/mock/test_getVersion.cpp | 15 +- .../authz => tests}/mock_core/AuthMap.cpp | 0 .../authz => tests}/mock_core/AuthMap.hpp | 0 .../mock_core/AuthenticationManager.cpp | 0 .../mock_core/AuthenticationManager.hpp | 0 .../authz => tests}/mock_core/CMakeLists.txt | 0 .../mock_core/ClientWorker.cpp | 0 .../mock_core/ClientWorker.hpp | 0 .../authz => tests}/mock_core/Condition.cpp | 0 .../authz => tests}/mock_core/Condition.hpp | 0 .../authz => tests}/mock_core/Config.cpp | 0 .../authz => tests}/mock_core/Config.hpp | 0 .../mock_core/IMockCoreServer.hpp | 0 .../mock_core/MockCoreServer.cpp | 0 .../mock_core/MockCoreServer.hpp | 0 .../authz => tests}/mock_core/MockGlobals.cpp | 0 .../authz => tests}/mock_core/MockGlobals.hpp | 0 .../mock_core/PublicKeyTypes.hpp | 0 .../authz => tests}/mock_core/Version.hpp.in | 0 .../authz => tests}/mock_core/main.cpp | 0 web/datafed-ws.js | 14 +- web/docker/Dockerfile | 67 +- 192 files changed, 8962 insertions(+), 4577 deletions(-) create mode 100644 .gitlab/build/force_build_dependencies_image.yml create mode 100644 .gitlab/build/skip_build_dependencies_image.yml create mode 100644 .gitlab/stage_base_image_check.yml create mode 100755 compose/metadata/generate_globus_files.sh create mode 100644 core/database/foxx/api/lib/error_codes.js create mode 100644 core/database/foxx/api/lib/execution_types.js create mode 100644 core/database/foxx/api/lib/logger.js create mode 100644 core/database/foxx/api/lib/permissions.js create mode 100644 core/database/foxx/api/lib/result.js create mode 100644 core/database/foxx/api/models/repositories/base_repository.js create mode 100644 core/database/foxx/api/models/repositories/repositories.js create mode 100644 core/database/foxx/api/models/repositories/repository/globus.js create mode 100644 core/database/foxx/api/models/repositories/repository/metadata.js create mode 100644 core/database/foxx/api/models/repositories/types.js create mode 100644 core/database/foxx/api/models/repositories/validation.js create mode 100644 core/database/foxx/api/schemas/repo.js create mode 100644 core/database/foxx/api/schemas/response_envelope.js create mode 100644 core/database/foxx/api/schemas/task.js create mode 100644 core/database/foxx/tests/base_repository.test.js create mode 100644 core/database/foxx/tests/query_router.test.js create mode 100644 core/database/foxx/tests/repo_globus.test.js create mode 100644 core/database/foxx/tests/repo_metadata.test.js create mode 100644 core/database/foxx/tests/repo_router.test.js create mode 100644 core/database/foxx/tests/repositories.test.js create mode 100644 core/database/foxx/tests/tag_router.test.js create mode 100644 core/database/foxx/tests/task_router.test.js create mode 100644 core/database/foxx/tests/validation.test.js create mode 100644 core/database/foxx/tests/version_router.test.js create mode 100644 core/server/tests/unit/test_DatabaseAPI.cpp delete mode 100644 docker/Dockerfile.dependencies create mode 160000 external/DataFedDependencies delete mode 160000 external/protobuf delete mode 100644 python/datafed_pkg/requirements.txt delete mode 100644 repository/docker/000-default.conf delete mode 100644 repository/gridftp/globus5/authz/tests/mock/CMakeLists.txt delete mode 100755 repository/gridftp/globus5/authz/tests/mock/mock_start.sh create mode 100755 scripts/compose_generate_web_server_globus_credentials.sh delete mode 100644 scripts/dependency_install_functions.sh create mode 100644 scripts/globus/generate_web_server_credentials.py delete mode 100755 scripts/install_authz_dependencies.sh delete mode 100755 scripts/install_client_dependencies.sh delete mode 100755 scripts/install_core_dependencies.sh delete mode 100755 scripts/install_dependencies.sh delete mode 100755 scripts/install_docker_dependencies.sh delete mode 100755 scripts/install_docs_dependencies.sh delete mode 100755 scripts/install_end_to_end_test_dependencies.sh delete mode 100755 scripts/install_gcs.sh delete mode 100755 scripts/install_python_client_dependencies.sh delete mode 100755 scripts/install_repo_dependencies.sh delete mode 100755 scripts/install_ws_dependencies.sh delete mode 100755 scripts/utils.sh create mode 100644 tests/CMakeLists.txt create mode 100644 tests/mock/CMakeLists.txt create mode 100755 tests/mock/mock_start.sh rename {repository/gridftp/globus5/authz/tests => tests}/mock/mock_stop.sh (60%) rename {repository/gridftp/globus5/authz/tests => tests}/mock/test_getVersion.cpp (93%) rename {repository/gridftp/globus5/authz => tests}/mock_core/AuthMap.cpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/AuthMap.hpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/AuthenticationManager.cpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/AuthenticationManager.hpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/CMakeLists.txt (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/ClientWorker.cpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/ClientWorker.hpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/Condition.cpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/Condition.hpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/Config.cpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/Config.hpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/IMockCoreServer.hpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/MockCoreServer.cpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/MockCoreServer.hpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/MockGlobals.cpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/MockGlobals.hpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/PublicKeyTypes.hpp (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/Version.hpp.in (100%) rename {repository/gridftp/globus5/authz => tests}/mock_core/main.cpp (100%) diff --git a/.github/workflows/build-docs.yml b/.github/workflows/build-docs.yml index 02c97851e..41c56b0cf 100644 --- a/.github/workflows/build-docs.yml +++ b/.github/workflows/build-docs.yml @@ -40,7 +40,7 @@ jobs: ./scripts/generate_datafed.sh - name: Install documentation build dependencies run: | - sudo ./scripts/install_docs_dependencies.sh + sudo ./external/DataFedDependencies/scripts/install_docs_dependencies.sh - name: Build documentation run: | cmake -S. -B build -DBUILD_AUTHZ=OFF -DBUILD_CORE_SERVER=OFF -DBUILD_COMMON=OFF -DBUILD_DOCS=ON -DBUILD_FOXX=OFF -DBUILD_REPO_SERVER=OFF -DBUILD_PYTHON_CLIENT=ON -DBUILD_TESTS=OFF -DBUILD_WEB_SERVER=OFF -DENABLE_UNIT_TESTS=OFF diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index fdf076171..b59915ac1 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -10,9 +10,12 @@ jobs: - name: Update debian run: apt update - name: Install dependencies + with: + submodules: recursive + fetch-depth: 0 run: | ./scripts/generate_datafed.sh - ./scripts/install_core_dependencies.sh + ./external/DataFedDependencies/scripts/install_core_dependencies.sh - name: Build run: | /opt/datafed/dependencies/bin/cmake -S. -B build -DCMAKE_BUILD_TYPE=Debug -DBUILD_WEB_SERVER=OFF diff --git a/.gitignore b/.gitignore index 420465cb9..b5450c6c5 100644 --- a/.gitignore +++ b/.gitignore @@ -53,7 +53,8 @@ scripts/admin_datafed_backup.sh scripts/admin_refresh_certs.sh scripts/globus/__pycache__ services/ -tmp/ +tests/mock_core/Version.hpp +tmp web/SDMS.proto web/SDMS_Anon.proto web/SDMS_Auth.proto @@ -65,6 +66,8 @@ web/package-lock.json web/static/datafed-core-key.pub *.swp *.swo +*.tar.gz +*.tgz # for web tests tests/end-to-end/web-UI/node_modules/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1aa273329..d3c5504aa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -18,6 +18,7 @@ include: - local: .gitlab/stage_pipeline_serialize.yml - local: .gitlab/infrastructure.yml - local: .gitlab/stage_clear_cache.yml + - local: .gitlab/stage_base_image_check.yml - local: .gitlab/stage_build_base.yml - local: .gitlab/stage_provision_client.yml - local: .gitlab/stage_image_check.yml @@ -32,6 +33,7 @@ stages: - trigger-infrastructure - signal - clear-docker-cache + - base-image-check - build-base - provision-client - image-check @@ -56,3 +58,4 @@ variables: REGISTRY: "camden.ornl.gov" DATAFED_DEPENDENCIES_INSTALL_PATH: "/shared/install" DOCKER_TLS_CERTDIR: "" # Required for running docker in docker + GIT_SUBMODULE_STRATEGY: recursive diff --git a/.gitlab/build/build_core_image.yml b/.gitlab/build/build_core_image.yml index 1e162cabc..fcc80d443 100644 --- a/.gitlab/build/build_core_image.yml +++ b/.gitlab/build/build_core_image.yml @@ -1,53 +1,53 @@ --- stages: - - build + - build include: - - local: .gitlab/common.yml + - local: .gitlab/common.yml build-core: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "core" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "core/docker/Dockerfile" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - ci-datafed-core - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - core/**/* - - common/**/* - - CMakeLists.txt - - cmake/**/* - - .gitlab-ci.yml - when: on_success + extends: .docker_build_script + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "core" + GIT_STRATEGY: clone + DOCKER_FILE_PATH: "core/docker/Dockerfile" + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - ci-datafed-core + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - core/**/* + - common/**/* + - CMakeLists.txt + - cmake/**/* + - .gitlab-ci.yml + when: on_success retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "core" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - core/**/* - - common/**/* - - CMakeLists.txt - - cmake/**/* - - .gitlab-ci.yml - when: never - - when: on_success + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "core" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - core/**/* + - common/**/* + - CMakeLists.txt + - cmake/**/* + - .gitlab-ci.yml + when: never + - when: on_success diff --git a/.gitlab/build/build_foxx_image.yml b/.gitlab/build/build_foxx_image.yml index 076bc0c7c..c0b45a74a 100644 --- a/.gitlab/build/build_foxx_image.yml +++ b/.gitlab/build/build_foxx_image.yml @@ -1,56 +1,56 @@ --- stages: - - build + - build include: - - local: .gitlab/common.yml + - local: .gitlab/common.yml build-foxx: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "foxx" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "docker/Dockerfile.foxx" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - cmake/**/* - - core/database/**/* - - core/CMakeLists.txt - - common/proto/**/* - - .gitlab-ci.yml - - .gitlab/**/* - - CMakeLists.txt - when: on_success + extends: .docker_build_script + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "foxx" + GIT_STRATEGY: clone + DOCKER_FILE_PATH: "docker/Dockerfile.foxx" + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - cmake/**/* + - core/database/**/* + - core/CMakeLists.txt + - common/proto/**/* + - .gitlab-ci.yml + - .gitlab/**/* + - CMakeLists.txt + when: on_success retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "foxx" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - cmake/**/* - - core/database/**/* - - core/CMakeLists.txt - - common/proto/**/* - - .gitlab-ci.yml - - .gitlab/**/* - - CMakeLists.txt - when: never - - when: on_success + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "foxx" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - cmake/**/* + - core/database/**/* + - core/CMakeLists.txt + - common/proto/**/* + - .gitlab-ci.yml + - .gitlab/**/* + - CMakeLists.txt + when: never + - when: on_success diff --git a/.gitlab/build/build_gcs_base_image.yml b/.gitlab/build/build_gcs_base_image.yml index 5661df305..a437f7502 100644 --- a/.gitlab/build/build_gcs_base_image.yml +++ b/.gitlab/build/build_gcs_base_image.yml @@ -1,74 +1,73 @@ --- stages: - - build + - build include: - - local: .gitlab/common.yml + - local: .gitlab/common.yml build-gcs-base: - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "gcs-base" - GIT_SUBMODULE_STRATEGY: recursive - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - GCS_BASE_IMAGE_DISTRO: "debian-12" - tags: - - ci-datafed-globus - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/**/* - - .gitlab-ci.yml - - CMakeLists.txt - - cmake/**/* - when: on_success - script: - - BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') - - echo "$BRANCH_LOWER" - - source "scripts/dependency_versions.sh" - - cd "external/globus-connect-server-deploy/docker" - - git checkout "$DATAFED_GCS_SUBMODULE_VERSION" - - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - docker build --no-cache --progress plain -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" - < "./docker-files/Dockerfile.${GCS_BASE_IMAGE_DISTRO}" - - docker tag "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:$CI_COMMIT_SHA" - - export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${BRANCH_LOWER}" - - export DATAFED_HARBOR_USERNAME="${HARBOR_USER}" - - export DATAFED_HARBOR_PASSWORD="${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - - cd "${CI_PROJECT_DIR}" - - | - while [ "$(${CI_PROJECT_DIR}/scripts/ci_harbor_artifact_count.sh -r ${DATAFED_HARBOR_REPOSITORY})" == "0" ]; do - echo "Artifact missing from harbor..." - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - sleep 5 # Optional: Add a sleep to avoid busy waiting - done - - cat "${CI_PROJECT_DIR}/harbor_check.log" + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "gcs-base" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + GCS_BASE_IMAGE_DISTRO: "debian-12" + tags: + - ci-datafed-globus + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - common/**/* + - .gitlab-ci.yml + - CMakeLists.txt + - cmake/**/* + when: on_success + script: + - BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') + - echo "$BRANCH_LOWER" + - source "scripts/dependency_versions.sh" + - cd "external/globus-connect-server-deploy/docker" + - git checkout "$DATAFED_GCS_SUBMODULE_VERSION" + - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" + - docker build --no-cache --progress plain -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" - < "./docker-files/Dockerfile.${GCS_BASE_IMAGE_DISTRO}" + - docker tag "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:$CI_COMMIT_SHA" + - export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${BRANCH_LOWER}" + - export DATAFED_HARBOR_USERNAME="${HARBOR_USER}" + - export DATAFED_HARBOR_PASSWORD="${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" + - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" + - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" + - cd "${CI_PROJECT_DIR}" + - | + while [ "$(${CI_PROJECT_DIR}/scripts/ci_harbor_artifact_count.sh -r ${DATAFED_HARBOR_REPOSITORY})" == "0" ]; do + echo "Artifact missing from harbor..." + docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" + docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" + sleep 5 # Optional: Add a sleep to avoid busy waiting + done + - cat "${CI_PROJECT_DIR}/harbor_check.log" retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "gcs-base" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/**/* - - .gitlab-ci.yml - - CMakeLists.txt - - cmake/**/* - when: never - - when: on_success + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "gcs-base" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - common/**/* + - .gitlab-ci.yml + - CMakeLists.txt + - cmake/**/* + when: never + - when: on_success diff --git a/.gitlab/build/build_gcs_image.yml b/.gitlab/build/build_gcs_image.yml index 5acf0b47a..4d53e04eb 100644 --- a/.gitlab/build/build_gcs_image.yml +++ b/.gitlab/build/build_gcs_image.yml @@ -1,86 +1,88 @@ --- stages: - - build + - build include: - - local: .gitlab/common.yml + - local: .gitlab/common.yml build-gcs: - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "gcs" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - ci-datafed-globus - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/**/* - - .gitlab-ci.yml - - .gitlab/build/build_gcs_base_image.sh - - .gitlab/build/build_gcs_image.sh - - .gitlab/stage_build_base.sh - - external/globus-connect-server/**/* - - CMakeLists.txt - - cmake/**/* - - repository/docker/entrypoint_authz.sh - - repository/docker/Dockerfile.gcs - - repository/CMakeLists.txt - - repository/gridftp/**/* - when: on_success - script: - - BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') - - echo "$BRANCH_LOWER" - - "${CI_PROJECT_DIR}/scripts/generate_datafed.sh" - - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - docker build --build-arg DEPENDENCIES="${REGISTRY}/datafed/dependencies-${BRANCH_LOWER}:latest" --build-arg RUNTIME="${REGISTRY}/datafed/runtime-${BRANCH_LOWER}:latest" --build-arg GCS_IMAGE="${REGISTRY}/datafed/gcs-base-${BRANCH_LOWER}:latest" -f repository/docker/Dockerfile.gcs -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" . - - docker tag "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:$CI_COMMIT_SHA" - - export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${BRANCH_LOWER}" - - export DATAFED_HARBOR_USERNAME="${HARBOR_USER}" - - export DATAFED_HARBOR_PASSWORD="${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - - | - while [ "$(${CI_PROJECT_DIR}/scripts/ci_harbor_artifact_count.sh -r ${DATAFED_HARBOR_REPOSITORY})" == "0" ]; do - echo "Artifact missing from harbor..." - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - sleep 5 # Optional: Add a sleep to avoid busy waiting - done - - cat "${CI_PROJECT_DIR}/harbor_check.log" + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "gcs" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - ci-datafed-globus + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - common/**/* + - .gitlab-ci.yml + - .gitlab/build/build_gcs_base_image.sh + - .gitlab/build/build_gcs_image.sh + - .gitlab/stage_build_base.sh + - external/globus-connect-server/**/* + - CMakeLists.txt + - cmake/**/* + - repository/docker/entrypoint_authz.sh + - repository/docker/Dockerfile.gcs + - repository/CMakeLists.txt + - repository/gridftp/**/* + when: on_success + script: + - BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') + - echo "$BRANCH_LOWER" + - "${CI_PROJECT_DIR}/scripts/generate_datafed.sh" + - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" + - DOWNSTREAM_SHA=$( git submodule status ./external/DataFedDependencies/ | awk '{print $1}' ) + - DOWNSTREAM_SHA=${DOWNSTREAM_SHA#-} + - docker build --build-arg DEPENDENCIES="${REGISTRY}/datafed/dependencies:$DOWNSTREAM_SHA" --build-arg RUNTIME="${REGISTRY}/datafed/runtime-${BRANCH_LOWER}:latest" --build-arg GCS_IMAGE="${REGISTRY}/datafed/gcs-base-${BRANCH_LOWER}:latest" -f repository/docker/Dockerfile.gcs -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" . + - docker tag "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:$CI_COMMIT_SHA" + - export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${BRANCH_LOWER}" + - export DATAFED_HARBOR_USERNAME="${HARBOR_USER}" + - export DATAFED_HARBOR_PASSWORD="${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" + - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" + - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" + - | + while [ "$(${CI_PROJECT_DIR}/scripts/ci_harbor_artifact_count.sh -r ${DATAFED_HARBOR_REPOSITORY})" == "0" ]; do + echo "Artifact missing from harbor..." + docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" + docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" + sleep 5 # Optional: Add a sleep to avoid busy waiting + done + - cat "${CI_PROJECT_DIR}/harbor_check.log" retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "gcs" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - ci-datafed-globus - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/**/* - - .gitlab-ci.yml - - .gitlab/build/build_gcs_base_image.sh - - .gitlab/build/build_gcs_image.sh - - .gitlab/stage_build_base.sh - - external/globus-connect-server/**/* - - CMakeLists.txt - - cmake/**/* - - repository/docker/entrypoint_authz.sh - - repository/docker/Dockerfile.gcs - - repository/CMakeLists.txt - - repository/gridftp/**/* - when: never - - when: on_success + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "gcs" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - ci-datafed-globus + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - common/**/* + - .gitlab-ci.yml + - .gitlab/build/build_gcs_base_image.sh + - .gitlab/build/build_gcs_image.sh + - .gitlab/stage_build_base.sh + - external/globus-connect-server/**/* + - CMakeLists.txt + - cmake/**/* + - repository/docker/entrypoint_authz.sh + - repository/docker/Dockerfile.gcs + - repository/CMakeLists.txt + - repository/gridftp/**/* + when: never + - when: on_success diff --git a/.gitlab/build/build_repo_image.yml b/.gitlab/build/build_repo_image.yml index 059b30745..9ee9fd4b5 100644 --- a/.gitlab/build/build_repo_image.yml +++ b/.gitlab/build/build_repo_image.yml @@ -1,56 +1,55 @@ --- stages: - - build + - build include: - - local: .gitlab/common.yml + - local: .gitlab/common.yml build-repo: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "repo" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "repository/docker/Dockerfile" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - ci-datafed-repo - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/proto/**/* - - .gitlab-ci.yml - - CMakeLists.txt - - cmake/**/* - - repository/CMakeLists.txt - - repository/server/**/* - when: on_success + extends: .docker_build_script + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "repo" + GIT_STRATEGY: clone + DOCKER_FILE_PATH: "repository/docker/Dockerfile" + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - ci-datafed-repo + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - common/proto/**/* + - .gitlab-ci.yml + - CMakeLists.txt + - cmake/**/* + - repository/CMakeLists.txt + - repository/server/**/* + when: on_success retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "repo" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/proto/**/* - - .gitlab-ci.yml - - CMakeLists.txt - - cmake/**/* - - repository/CMakeLists.txt - - repository/server/**/* - when: never - - when: on_success - + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "repo" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - common/proto/**/* + - .gitlab-ci.yml + - CMakeLists.txt + - cmake/**/* + - repository/CMakeLists.txt + - repository/server/**/* + when: never + - when: on_success diff --git a/.gitlab/build/build_ws_image.yml b/.gitlab/build/build_ws_image.yml index 6c710caa0..9ad5767c2 100644 --- a/.gitlab/build/build_ws_image.yml +++ b/.gitlab/build/build_ws_image.yml @@ -1,57 +1,57 @@ --- stages: - - build + - build include: - - local: .gitlab/common.yml + - local: .gitlab/common.yml build-ws: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "ws" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "web/docker/Dockerfile" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "TRUE" - INTERMEDIATE_TARGET: "ws-build" # Name of the layer in the dockerfile - INTERMEDIATE_LAYER_NAME: "build" - tags: - - ci-datafed-core - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - web/**/* - - cmake/**/* - - common/proto/**/* - - .gitlab-ci.yml - - CMakeLists.txt - when: on_success + extends: .docker_build_script + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "ws" + GIT_STRATEGY: clone + DOCKER_FILE_PATH: "web/docker/Dockerfile" + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "TRUE" + INTERMEDIATE_TARGET: "ws-build" # Name of the layer in the dockerfile + INTERMEDIATE_LAYER_NAME: "build" + tags: + - ci-datafed-core + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - web/**/* + - cmake/**/* + - common/proto/**/* + - .gitlab-ci.yml + - CMakeLists.txt + when: on_success retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "ws" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "TRUE" - INTERMEDIATE_TARGET: "ws-build" # Name of the layer in the dockerfile - INTERMEDIATE_LAYER_NAME: "build" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - web/**/* - - cmake/**/* - - common/proto/**/* - - .gitlab-ci.yml - - CMakeLists.txt - when: never - - when: on_success + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "ws" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "TRUE" + INTERMEDIATE_TARGET: "ws-build" # Name of the layer in the dockerfile + INTERMEDIATE_LAYER_NAME: "build" + tags: + - docker + rules: + - changes: + - docker/**/* + - scripts/**/* + - web/**/* + - cmake/**/* + - common/proto/**/* + - .gitlab-ci.yml + - CMakeLists.txt + when: never + - when: on_success diff --git a/.gitlab/build/force_build_core_image.yml b/.gitlab/build/force_build_core_image.yml index 4458ada62..1520cd505 100644 --- a/.gitlab/build/force_build_core_image.yml +++ b/.gitlab/build/force_build_core_image.yml @@ -1,20 +1,20 @@ --- stages: - - build + - build include: - - local: .gitlab/common.yml + - local: .gitlab/common.yml build-core: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "core" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "core/docker/Dockerfile" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - ci-datafed-core - - docker + extends: .docker_build_script + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "core" + GIT_STRATEGY: clone + DOCKER_FILE_PATH: "core/docker/Dockerfile" + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - ci-datafed-core + - docker diff --git a/.gitlab/build/force_build_dependencies_image.yml b/.gitlab/build/force_build_dependencies_image.yml new file mode 100644 index 000000000..e44f3bda2 --- /dev/null +++ b/.gitlab/build/force_build_dependencies_image.yml @@ -0,0 +1,71 @@ +--- +stages: + - build + +build-dependencies: + stage: build + tags: + - ci-datafed-core + - docker + script: + - | + # Variables you need set in CI/CD Settings or here + DOWNSTREAM_SHA=$(git submodule status ./external/DataFedDependencies/ | awk '{print $1}') # Commit SHA to trigger on + # Remove leading '-' if one exists + DOWNSTREAM_SHA=${DOWNSTREAM_SHA#-} + echo "DataFedDependencies current commit: $DOWNSTREAM_SHA" + # Trigger the downstream pipeline + echo "Triggering downstream pipeline... $CI_API_V4_URL/projects/$DATAFED_DEPENDENCIES_GITLAB_PROJECT_ID/trigger/pipeline" + PIPELINE=$(curl --silent --fail --show-error -X POST \ + -F "token=$CI_JOB_TOKEN" \ + -F "ref=main" \ + --form "variables[UPSTREAM_DATAFED_DEPENDENCIES_SUBMODULE_SHA]=$DOWNSTREAM_SHA" \ + "$CI_API_V4_URL/projects/$DATAFED_DEPENDENCIES_GITLAB_PROJECT_ID/trigger/pipeline" ) + PIPELINE_ID=$(echo "$PIPELINE" | jq -r '.id') + if [ "$PIPELINE_ID" = "null" ] || [ -z "$PIPELINE_ID" ]; then + echo "❌ Failed to create pipeline" + echo "$PIPELINE" + exit 1 + fi + echo "✅ Triggered pipeline $PIPELINE_ID for commit $DOWNSTREAM_SHA" + # Wait for the downstream pipeline to complete + echo "⏳ Waiting for downstream pipeline to complete..." + TIMEOUT=3600 # 1 hour timeout + ELAPSED=0 + POLL_INTERVAL=30 # Check every 30 seconds + + while [ $ELAPSED -lt $TIMEOUT ]; do + # Get pipeline status + PIPELINE_STATUS=$(curl --silent --fail --show-error \ + --header "PRIVATE-TOKEN: $GITLAB_DATAFED_DEPENDENCIES_REPO_API_TOKEN" \ + "$CI_API_V4_URL/projects/$DATAFED_DEPENDENCIES_GITLAB_PROJECT_ID/pipelines/$PIPELINE_ID" | jq -r '.status') + + echo "Pipeline $PIPELINE_ID status: $PIPELINE_STATUS (elapsed: ${ELAPSED}s)" + + case "$PIPELINE_STATUS" in + "success") + echo "✅ Downstream pipeline completed successfully!" + exit 0 + ;; + "failed"|"canceled"|"skipped") + echo "❌ Downstream pipeline failed with status: $PIPELINE_STATUS" + echo "Pipeline URL: $CI_SERVER_URL/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/-/pipelines/$PIPELINE_ID" + exit 1 + ;; + "created"|"waiting_for_resource"|"preparing"|"pending"|"running") + # Pipeline is still in progress, continue waiting + sleep $POLL_INTERVAL + ELAPSED=$((ELAPSED + POLL_INTERVAL)) + ;; + *) + echo "⚠️ Unknown pipeline status: $PIPELINE_STATUS" + sleep $POLL_INTERVAL + ELAPSED=$((ELAPSED + POLL_INTERVAL)) + ;; + esac + done + + # If we reach here, the timeout was exceeded + echo "❌ Timeout waiting for downstream pipeline to complete after ${TIMEOUT}s" + echo "Pipeline URL: $CI_SERVER_URL/$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/-/pipelines/$PIPELINE_ID" + exit 1 diff --git a/.gitlab/build/force_build_foxx_image.yml b/.gitlab/build/force_build_foxx_image.yml index afe90f1e6..2942aa700 100644 --- a/.gitlab/build/force_build_foxx_image.yml +++ b/.gitlab/build/force_build_foxx_image.yml @@ -1,19 +1,19 @@ --- stages: - - build + - build include: - - local: .gitlab/common.yml + - local: .gitlab/common.yml build-foxx: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "foxx" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "docker/Dockerfile.foxx" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - docker + extends: .docker_build_script + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "foxx" + GIT_STRATEGY: clone + DOCKER_FILE_PATH: "docker/Dockerfile.foxx" + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - docker diff --git a/.gitlab/build/force_build_gcs_base_image.yml b/.gitlab/build/force_build_gcs_base_image.yml index 06448779a..7e32fad39 100644 --- a/.gitlab/build/force_build_gcs_base_image.yml +++ b/.gitlab/build/force_build_gcs_base_image.yml @@ -1,39 +1,38 @@ --- stages: - - build + - build build-gcs-base: - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "gcs-base" - GIT_SUBMODULE_STRATEGY: recursive - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - GCS_BASE_IMAGE_DISTRO: "debian-12" - tags: - - ci-datafed-globus - - docker - script: - - BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') - - echo "$BRANCH_LOWER" - - source "scripts/dependency_versions.sh" - - cd "external/globus-connect-server-deploy/docker" - - git checkout "$DATAFED_GCS_SUBMODULE_VERSION" - - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - docker build --no-cache --progress plain -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" - < "./docker-files/Dockerfile.${GCS_BASE_IMAGE_DISTRO}" - - docker tag "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:$CI_COMMIT_SHA" - - export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${BRANCH_LOWER}" - - export DATAFED_HARBOR_USERNAME="${HARBOR_USER}" - - export DATAFED_HARBOR_PASSWORD="${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - - cd "${CI_PROJECT_DIR}" - - | - while [ "$(${CI_PROJECT_DIR}/scripts/ci_harbor_artifact_count.sh -r ${DATAFED_HARBOR_REPOSITORY})" == "0" ]; do - echo "Artifact missing from harbor..." - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - sleep 5 # Optional: Add a sleep to avoid busy waiting - done - - cat "${CI_PROJECT_DIR}/harbor_check.log" + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "gcs-base" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + GCS_BASE_IMAGE_DISTRO: "debian-12" + tags: + - ci-datafed-globus + - docker + script: + - BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') + - echo "$BRANCH_LOWER" + - source "scripts/dependency_versions.sh" + - cd "external/globus-connect-server-deploy/docker" + - git checkout "$DATAFED_GCS_SUBMODULE_VERSION" + - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" + - docker build --no-cache --progress plain -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" - < "./docker-files/Dockerfile.${GCS_BASE_IMAGE_DISTRO}" + - docker tag "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:$CI_COMMIT_SHA" + - export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${BRANCH_LOWER}" + - export DATAFED_HARBOR_USERNAME="${HARBOR_USER}" + - export DATAFED_HARBOR_PASSWORD="${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" + - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" + - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" + - cd "${CI_PROJECT_DIR}" + - | + while [ "$(${CI_PROJECT_DIR}/scripts/ci_harbor_artifact_count.sh -r ${DATAFED_HARBOR_REPOSITORY})" == "0" ]; do + echo "Artifact missing from harbor..." + docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" + docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" + sleep 5 # Optional: Add a sleep to avoid busy waiting + done + - cat "${CI_PROJECT_DIR}/harbor_check.log" diff --git a/.gitlab/build/force_build_gcs_image.yml b/.gitlab/build/force_build_gcs_image.yml index 4cae5d148..0a3eb8e3d 100644 --- a/.gitlab/build/force_build_gcs_image.yml +++ b/.gitlab/build/force_build_gcs_image.yml @@ -1,36 +1,37 @@ --- stages: - - build + - build build-gcs: - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "gcs" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - ci-datafed-globus - - docker - script: - - BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') - - echo "$BRANCH_LOWER" - - ./scripts/generate_datafed.sh - - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - docker build --no-cache --build-arg DEPENDENCIES="${REGISTRY}/datafed/dependencies-${BRANCH_LOWER}:latest" --build-arg RUNTIME="${REGISTRY}/datafed/runtime-${BRANCH_LOWER}:latest" --build-arg GCS_IMAGE="${REGISTRY}/datafed/gcs-base-${BRANCH_LOWER}:latest" -f repository/docker/Dockerfile.gcs -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" . - - docker tag "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:$CI_COMMIT_SHA" - - export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${BRANCH_LOWER}" - - export DATAFED_HARBOR_USERNAME="${HARBOR_USER}" - - export DATAFED_HARBOR_PASSWORD="${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - - | - while [ "$(${CI_PROJECT_DIR}/scripts/ci_harbor_artifact_count.sh -r ${DATAFED_HARBOR_REPOSITORY})" == "0" ]; do - echo "Artifact missing from harbor..." - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - sleep 5 # Optional: Add a sleep to avoid busy waiting - done - - cat "${CI_PROJECT_DIR}/harbor_check.log" - + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "gcs" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - ci-datafed-globus + - docker + script: + - BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') + - echo "$BRANCH_LOWER" + - ./scripts/generate_datafed.sh + - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" + - DOWNSTREAM_SHA=$( git submodule status ./external/DataFedDependencies/ | awk '{print $1}' ) + - DOWNSTREAM_SHA=${DOWNSTREAM_SHA#-} + - docker build --no-cache --build-arg DEPENDENCIES="${REGISTRY}/datafed/dependencies:$DOWNSTREAM_SHA" --build-arg RUNTIME="${REGISTRY}/datafed/runtime-${BRANCH_LOWER}:latest" --build-arg GCS_IMAGE="${REGISTRY}/datafed/gcs-base-${BRANCH_LOWER}:latest" -f repository/docker/Dockerfile.gcs -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" . + - docker tag "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:$CI_COMMIT_SHA" + - export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${BRANCH_LOWER}" + - export DATAFED_HARBOR_USERNAME="${HARBOR_USER}" + - export DATAFED_HARBOR_PASSWORD="${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" + - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" + - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" + - | + while [ "$(${CI_PROJECT_DIR}/scripts/ci_harbor_artifact_count.sh -r ${DATAFED_HARBOR_REPOSITORY})" == "0" ]; do + echo "Artifact missing from harbor..." + docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" + docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" + sleep 5 # Optional: Add a sleep to avoid busy waiting + done + - cat "${CI_PROJECT_DIR}/harbor_check.log" diff --git a/.gitlab/build/force_build_repo_image.yml b/.gitlab/build/force_build_repo_image.yml index 3c21cf10e..4efab00f2 100644 --- a/.gitlab/build/force_build_repo_image.yml +++ b/.gitlab/build/force_build_repo_image.yml @@ -1,20 +1,20 @@ --- stages: - - build + - build include: - - local: .gitlab/common.yml + - local: .gitlab/common.yml build-repo: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "repo" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "repository/docker/Dockerfile" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - ci-datafed-repo - - docker + extends: .docker_build_script + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "repo" + GIT_STRATEGY: clone + DOCKER_FILE_PATH: "repository/docker/Dockerfile" + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - ci-datafed-repo + - docker diff --git a/.gitlab/build/force_build_ws_image.yml b/.gitlab/build/force_build_ws_image.yml index 0355bfbc8..d0833890a 100644 --- a/.gitlab/build/force_build_ws_image.yml +++ b/.gitlab/build/force_build_ws_image.yml @@ -1,22 +1,22 @@ --- stages: - - build + - build include: - - local: .gitlab/common.yml + - local: .gitlab/common.yml build-ws: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "ws" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "web/docker/Dockerfile" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "TRUE" - INTERMEDIATE_TARGET: "ws-build" # Name of the layer in the dockerfile - INTERMEDIATE_LAYER_NAME: "build" - tags: - - ci-datafed-core - - docker + extends: .docker_build_script + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "ws" + GIT_STRATEGY: clone + DOCKER_FILE_PATH: "web/docker/Dockerfile" + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "TRUE" + INTERMEDIATE_TARGET: "ws-build" # Name of the layer in the dockerfile + INTERMEDIATE_LAYER_NAME: "build" + tags: + - ci-datafed-core + - docker diff --git a/.gitlab/build/skip_build_dependencies_image.yml b/.gitlab/build/skip_build_dependencies_image.yml new file mode 100644 index 000000000..36cb4fc88 --- /dev/null +++ b/.gitlab/build/skip_build_dependencies_image.yml @@ -0,0 +1,11 @@ +--- +stages: + - build + +build-dependencies: + stage: build + tags: + - ci-datafed-core + - docker + script: + - echo "Skipping dependency build, up todate dependencies image was found for commit." diff --git a/.gitlab/common.yml b/.gitlab/common.yml index 5d528b00e..ee42bffbd 100644 --- a/.gitlab/common.yml +++ b/.gitlab/common.yml @@ -168,7 +168,9 @@ .build_image: &build_image | BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${BRANCH_LOWER}" - docker build --no-cache --build-arg DEPENDENCIES="${REGISTRY}/datafed/dependencies-${BRANCH_LOWER}:latest" --build-arg RUNTIME="${REGISTRY}/datafed/runtime-${BRANCH_LOWER}:latest" -f "${DOCKER_FILE_PATH}" -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" . + DOWNSTREAM_SHA=$( git submodule status ./external/DataFedDependencies/ | awk '{print $1}' ) + DOWNSTREAM_SHA=${DOWNSTREAM_SHA#-} + docker build --no-cache --build-arg DEPENDENCIES="${REGISTRY}/datafed/dependencies:$DOWNSTREAM_SHA" --build-arg RUNTIME="${REGISTRY}/datafed/runtime-${BRANCH_LOWER}:latest" -f "${DOCKER_FILE_PATH}" -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" . docker tag "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:$CI_COMMIT_SHA" docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" @@ -194,7 +196,9 @@ then BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${INTERMEDIATE_LAYER_NAME}-${BRANCH_LOWER}" - docker build --no-cache --build-arg DEPENDENCIES="${REGISTRY}/${PROJECT}/dependencies-${BRANCH_LOWER}:latest" --build-arg RUNTIME="${REGISTRY}/${PROJECT}/runtime-${BRANCH_LOWER}:latest" --target "${INTERMEDIATE_TARGET}" -f "${DOCKER_FILE_PATH}" -t "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" . + DOWNSTREAM_SHA=$( git submodule status ./external/DataFedDependencies/ | awk '{print $1}' ) + DOWNSTREAM_SHA=${DOWNSTREAM_SHA#-} + docker build --no-cache --build-arg DEPENDENCIES="${REGISTRY}/datafed/dependencies:$DOWNSTREAM_SHA" --build-arg RUNTIME="${REGISTRY}/${PROJECT}/runtime-${BRANCH_LOWER}:latest" --target "${INTERMEDIATE_TARGET}" -f "${DOCKER_FILE_PATH}" -t "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" . echo "Tagging ${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest ${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" docker tag "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" diff --git a/.gitlab/end_to_end.yml b/.gitlab/end_to_end.yml index 0f111c70e..6e0fff518 100644 --- a/.gitlab/end_to_end.yml +++ b/.gitlab/end_to_end.yml @@ -53,7 +53,6 @@ end-to-end-foxx-setup: - echo "--name \"${CONTAINER_NAME}\" \\" >> "${RUN_FILE}" - echo "--user \"${USER_ID}:0\" \\" >> "${RUN_FILE}" - echo "--security-opt no-new-privileges \\" >> "${RUN_FILE}" - - echo "-e DATAFED_ZEROMQ_SYSTEM_SECRET=\"$CI_DATAFED_ZEROMQ_SYSTEM_SECRET\" \\" >> "${RUN_FILE}" - echo "-e ENABLE_FOXX_TESTS=\"TRUE\" \\" >> "${RUN_FILE}" - echo "-e DATAFED_DOMAIN=\"$CI_DATAFED_DOMAIN\" \\" >> "${RUN_FILE}" - echo "-e DATAFED_DATABASE_PASSWORD=\"$CI_DATAFED_DATABASE_PASSWORD\" \\" >> "${RUN_FILE}" @@ -145,7 +144,6 @@ end-to-end-core-setup: - echo "-e DATAFED_GLOBUS_APP_SECRET=\"$CI_DATAFED_GLOBUS_APP_SECRET\" \\" >> run_core.sh - echo "-e DATAFED_GLOBUS_APP_ID=\"$CI_DATAFED_GLOBUS_APP_ID\" \\" >> run_core.sh - echo "-e DATAFED_ZEROMQ_SESSION_SECRET=\"$CI_DATAFED_ZEROMQ_SESSION_SECRET\" \\" >> run_core.sh - - echo "-e DATAFED_ZEROMQ_SYSTEM_SECRET=\"$CI_DATAFED_ZEROMQ_SYSTEM_SECRET\" \\" >> run_core.sh - echo "-e DATAFED_DOMAIN=\"$CI_DATAFED_DOMAIN\" \\" >> run_core.sh - echo "-e DATAFED_DATABASE_PASSWORD=\"$CI_DATAFED_DATABASE_PASSWORD\" \\" >> run_core.sh - echo "-e DATAFED_DATABASE_IP_ADDRESS_PORT=\"$CI_DATAFED_DATABASE_IP_ADDRESS_PORT\" \\" >> run_core.sh @@ -204,7 +202,6 @@ end-to-end-ws-setup: - echo "-e DATAFED_GLOBUS_APP_SECRET=\"$CI_DATAFED_GLOBUS_APP_SECRET\" \\" >> run_web.sh - echo "-e DATAFED_GLOBUS_APP_ID=\"$CI_DATAFED_GLOBUS_APP_ID\" \\" >> run_web.sh - echo "-e DATAFED_ZEROMQ_SESSION_SECRET=\"$CI_DATAFED_ZEROMQ_SESSION_SECRET\" \\" >> run_web.sh - - echo "-e DATAFED_ZEROMQ_SYSTEM_SECRET=\"$CI_DATAFED_ZEROMQ_SYSTEM_SECRET\" \\" >> run_web.sh - echo "-e DATAFED_DOMAIN=\"$CI_DATAFED_DOMAIN\" \\" >> run_web.sh - echo "-e DATAFED_WEB_CERT_PATH=\"/opt/datafed/keys/${DATAFED_WEB_CERT_NAME}\" \\" >> run_web.sh - echo "-e DATAFED_WEB_KEY_PATH=\"/opt/datafed/keys/${DATAFED_WEB_KEY_NAME}\" \\" >> run_web.sh @@ -260,7 +257,6 @@ end-to-end-repo-setup: - echo "-e DATAFED_GLOBUS_APP_SECRET=\"$CI_DATAFED_GLOBUS_APP_SECRET\" \\" >> "${RUN_FILE}" - echo "-e DATAFED_GLOBUS_APP_ID=\"$CI_DATAFED_GLOBUS_APP_ID\" \\" >> "${RUN_FILE}" - echo "-e DATAFED_ZEROMQ_SESSION_SECRET=\"$CI_DATAFED_ZEROMQ_SESSION_SECRET\" \\" >> "${RUN_FILE}" - - echo "-e DATAFED_ZEROMQ_SYSTEM_SECRET=\"$CI_DATAFED_ZEROMQ_SYSTEM_SECRET\" \\" >> "${RUN_FILE}" - echo "-e DATAFED_HTTPS_SERVER_PORT=\"443\" \\" >> "${RUN_FILE}" - echo "-e DATAFED_DOMAIN=\"$CI_DATAFED_DOMAIN\" \\" >> "${RUN_FILE}" - echo "-e DATAFED_CORE_ADDRESS_PORT_INTERNAL=\"${CI_DATAFED_DOMAIN}:7513\" \\" >> "${RUN_FILE}" @@ -324,7 +320,6 @@ end-to-end-gcs-authz-setup: - echo "-e DATAFED_GLOBUS_APP_SECRET=\"$CI_DATAFED_GLOBUS_APP_SECRET\" \\" >> run_globus.sh - echo "-e DATAFED_GLOBUS_APP_ID=\"$CI_DATAFED_GLOBUS_APP_ID\" \\" >> run_globus.sh - echo "-e DATAFED_ZEROMQ_SESSION_SECRET=\"$CI_DATAFED_ZEROMQ_SESSION_SECRET\" \\" >> run_globus.sh - - echo "-e DATAFED_ZEROMQ_SYSTEM_SECRET=\"$CI_DATAFED_ZEROMQ_SYSTEM_SECRET\" \\" >> run_globus.sh - echo "-e DATAFED_DOMAIN=\"$CI_DATAFED_DOMAIN\" \\" >> run_globus.sh - echo "-e DATAFED_HTTPS_SERVER_PORT=\"443\" \\" >> run_globus.sh - echo "-e DATAFED_DEFAULT_LOG_PATH=\"$CONTAINER_LOG_FILE_PATH\" \\" >> run_globus.sh @@ -372,7 +367,6 @@ end_to_end_client-test: variables: GIT_STRATEGY: clone DATAFED_DATABASE_HOST: "${CI_DATAFED_DATABASE_HOST}" - DATAFED_DATABASE_ZEROMQ_SYSTEM_SECRET: "${CI_DATAFED_DATABASE_ZEROMQ_SYSTEM_SECRET}" DATAFED_DATABASE_PASSWORD: "${CI_DATAFED_DATABASE_PASSWORD}" DATAFED_USER89_PASSWORD: "${CI_DATAFED_USER89_PASSWORD}" DATAFED_USER89_GLOBUS_REFRESH_TOKEN: "${CI_DATAFED_USER89_GLOBUS_REFRESH_TOKEN}" @@ -382,9 +376,9 @@ end_to_end_client-test: DATAFED_USER99_GLOBUS_REFRESH_TOKEN: "${CI_DATAFED_USER99_GLOBUS_REFRESH_TOKEN}" DATAFED_USER99_GLOBUS_ACCESS_TOKEN: "${CI_DATAFED_USER99_GLOBUS_ACCESS_TOKEN}" DATAFED_USER99_GLOBUS_UUID: "${CI_DATAFED_USER99_GLOBUS_UUID}" - DATAFED_ZEROMQ_SYSTEM_SECRET: "${CI_DATAFED_ZEROMQ_SYSTEM_SECRET}" DATAFED_DOMAIN: "${CI_DATAFED_DOMAIN}" DATAFED_PYTHON_CLIENT_ALLOW_SELF_SIGNED_CERTS: "TRUE" + DATAFED_PYTHON_DEPENDENCIES_DIR: "${DATAFED_DEPENDENCIES_INSTALL_PATH}/python" stage: end-to-end-test dependencies: - end-to-end-gcs-authz-setup @@ -394,8 +388,7 @@ end_to_end_client-test: script: - export DATAFED_REPO_FORM_PATH="$(pwd)/${CI_DATAFED_REPO_ID_AND_DIR}-repo-form.json" - env > env_file - - echo "Testing" - - source /shared/install/python/datafed/bin/activate + - . /shared/install/python/datafed/bin/activate - ./scripts/generate_datafed.sh - > cmake -S. -B build @@ -469,7 +462,7 @@ end_to_end_error_discovery_arango: tags: - ci-datafed-arango script: - - sudo journalctl --no-pager -u arangodb3.service + - sudo journalctl --no-pager -u arangodb3.service --since "3 hours ago" end_to_end_error_discovery_gcs: needs: ["check-ci-infrastructure", "end-to-end-gcs-authz-setup", "end-to-end-signal"] diff --git a/.gitlab/stage_base_image_check.yml b/.gitlab/stage_base_image_check.yml new file mode 100644 index 000000000..37ee8510d --- /dev/null +++ b/.gitlab/stage_base_image_check.yml @@ -0,0 +1,38 @@ +--- +check-dependencies-image: + stage: base-image-check + variables: + PROJECT: "datafed" + COMPONENT: "dependencies" + BUILD_INTERMEDIATE: "FALSE" + tags: + - docker + script: + - | + cd "$CI_PROJECT_DIR/external/DataFedDependencies" + BRANCH=$(git branch --show-current) + cd "$CI_PROJECT_DIR" + DOWNSTREAM_SHA=$(git submodule status ./external/DataFedDependencies/ | awk '{print $1}') + DOWNSTREAM_SHA="${DOWNSTREAM_SHA#-}" + docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" + FORCE_BUILD="FALSE" + echo "Pulling: ${REGISTRY}/${PROJECT}/${COMPONENT}:${DOWNSTREAM_SHA}" + set +e + docker pull --quiet "${REGISTRY}/${PROJECT}/${COMPONENT}:${DOWNSTREAM_SHA}" + if [ $? -eq 0 ]; then echo "Image exists"; else FORCE_BUILD="TRUE"; fi; + set -e + if [ "$FORCE_BUILD" == "TRUE" ] + then + cp .gitlab/build/force_build_${COMPONENT}_image.yml ${COMPONENT}_image.yml + else + cp .gitlab/build/skip_build_${COMPONENT}_image.yml ${COMPONENT}_image.yml + fi + echo "REGISTRY=${REGISTRY}" >> build.env + echo "HARBOR_USER=${HARBOR_USER}" >> build.env + echo "HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN=${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" >> build.env + sed -i 's/\(HARBOR_USER=.*\)\$/\1$$/g' build.env + artifacts: + paths: + - ${COMPONENT}_image.yml + reports: + dotenv: build.env diff --git a/.gitlab/stage_build.yml b/.gitlab/stage_build.yml index c8dabacc7..52a4c1c12 100644 --- a/.gitlab/stage_build.yml +++ b/.gitlab/stage_build.yml @@ -1,7 +1,7 @@ --- run-ws-build-job: needs: - - job: build-dependencies + - job: run-build-dependencies - job: build-runtime - job: check-ws-image artifacts: true @@ -20,7 +20,7 @@ run-core-build-job: needs: - job: build-runtime - job: check-core-image - - job: build-dependencies + - job: run-build-dependencies artifacts: true stage: build trigger: @@ -35,7 +35,7 @@ run-core-build-job: run-repo-build-job: needs: - - job: build-dependencies + - job: run-build-dependencies - job: build-runtime - job: check-repo-image artifacts: true @@ -52,7 +52,7 @@ run-repo-build-job: run-gcs-base-build-job: needs: - - job: build-dependencies + - job: run-build-dependencies - job: build-runtime - job: check-gcs-base-image artifacts: true @@ -70,7 +70,7 @@ run-gcs-base-build-job: run-gcs-build-job: needs: - - job: build-dependencies + - job: run-build-dependencies - job: build-runtime - job: run-gcs-base-build-job - job: check-gcs-image @@ -88,7 +88,7 @@ run-gcs-build-job: run-foxx-build-job: needs: - - job: build-dependencies + - job: run-build-dependencies - job: build-runtime - job: check-foxx-image artifacts: true diff --git a/.gitlab/stage_build_base.yml b/.gitlab/stage_build_base.yml index adcf5e993..842728e75 100644 --- a/.gitlab/stage_build_base.yml +++ b/.gitlab/stage_build_base.yml @@ -2,17 +2,20 @@ include: - local: .gitlab/common.yml -build-dependencies: - extends: .docker_base_build_script +run-build-dependencies: + needs: + - job: check-dependencies-image + artifacts: true stage: build-base + trigger: + include: + - artifact: dependencies_image.yml + job: check-dependencies-image + strategy: depend variables: - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - PROJECT: "datafed" - COMPONENT: "dependencies" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "docker/Dockerfile.dependencies" - tags: - - docker + REGISTRY: "${REGISTRY}" + HARBOR_USER: "${HARBOR_USER}" + HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN: "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" build-runtime: extends: .docker_base_build_script diff --git a/.gitlab/stage_provision_client.yml b/.gitlab/stage_provision_client.yml index 3e2404429..2e7cdbd41 100644 --- a/.gitlab/stage_provision_client.yml +++ b/.gitlab/stage_provision_client.yml @@ -6,14 +6,15 @@ provision-client: needs: ["signal"] variables: GIT_STRATEGY: clone - DATAFED_PYTHON_DEPENDENCIES_DIR: "${CI_PROJECT_DIR}/dependencies/python" + DATAFED_PYTHON_DEPENDENCIES_DIR: "${DATAFED_DEPENDENCIES_INSTALL_PATH}/python" stage: provision-client tags: - ci-datafed-client before_script: - - export PATH=/opt/datafed/dependencies/bin:$PATH - - rm -rf $DATAFED_PYTHON_DEPENDENCIES_DIR + - sudo chown -R gitlab-runner:gitlab-runner "$DATAFED_DEPENDENCIES_INSTALL_PATH" + - rm -rf "$DATAFED_PYTHON_DEPENDENCIES_DIR" script: - - ./scripts/generate_datafed.sh - - ./scripts/install_client_dependencies.sh - - ./scripts/install_end_to_end_test_dependencies.sh + - export PATH="/opt/datafed/dependencies/bin:$DATAFED_PYTHON_DEPENDENCIES_DIR/bin:$PATH" + - ./external/DataFedDependencies/scripts/generate_dependencies_config.sh + - ./external/DataFedDependencies/scripts/install_client_dependencies.sh + - ./external/DataFedDependencies/scripts/install_end_to_end_test_dependencies.sh diff --git a/.gitlab/stage_unit.yml b/.gitlab/stage_unit.yml index e46539e5b..70e4e5a98 100644 --- a/.gitlab/stage_unit.yml +++ b/.gitlab/stage_unit.yml @@ -50,7 +50,6 @@ run-ws-unit-job: - echo "-e DATAFED_GLOBUS_APP_SECRET=\"$CI_DATAFED_GLOBUS_APP_SECRET\" \\" >> run_web.sh - echo "-e DATAFED_GLOBUS_APP_ID=\"$CI_DATAFED_GLOBUS_APP_ID\" \\" >> run_web.sh - echo "-e DATAFED_ZEROMQ_SESSION_SECRET=\"$CI_DATAFED_ZEROMQ_SESSION_SECRET\" \\" >> run_web.sh - - echo "-e DATAFED_ZEROMQ_SYSTEM_SECRET=\"$CI_DATAFED_ZEROMQ_SYSTEM_SECRET\" \\" >> run_web.sh - echo "-e DATAFED_DOMAIN=\"$CI_DATAFED_DOMAIN\" \\" >> run_web.sh - echo "-e DATAFED_WEB_CERT_PATH=\"/opt/datafed/keys/${DATAFED_WEB_CERT_NAME}\" \\" >> run_web.sh - echo "-e DATAFED_WEB_KEY_PATH=\"/opt/datafed/keys/${DATAFED_WEB_KEY_NAME}\" \\" >> run_web.sh @@ -103,7 +102,6 @@ run-authz-unit-job: - echo "-e DATAFED_GLOBUS_APP_SECRET=\"$CI_DATAFED_GLOBUS_APP_SECRET\" \\" >> run_globus.sh - echo "-e DATAFED_GLOBUS_APP_ID=\"$CI_DATAFED_GLOBUS_APP_ID\" \\" >> run_globus.sh - echo "-e DATAFED_ZEROMQ_SESSION_SECRET=\"$CI_DATAFED_ZEROMQ_SESSION_SECRET\" \\" >> run_globus.sh - - echo "-e DATAFED_ZEROMQ_SYSTEM_SECRET=\"$CI_DATAFED_ZEROMQ_SYSTEM_SECRET\" \\" >> run_globus.sh - echo "-e DATAFED_DOMAIN=\"$CI_DATAFED_DOMAIN\" \\" >> run_globus.sh - echo "-e DATAFED_HTTPS_SERVER_PORT=\"443\" \\" >> run_globus.sh - echo "-e DATAFED_DEFAULT_LOG_PATH=\"$CONTAINER_LOG_FILE_PATH\" \\" >> run_globus.sh diff --git a/.gitmodules b/.gitmodules index a7921d851..2f55771cf 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,6 @@ [submodule "external/globus-connect-server-deploy"] path = external/globus-connect-server-deploy url = https://github.com/globus/globus-connect-server-deploy.git -[submodule "external/protobuf"] - path = external/protobuf - url = https://github.com/protocolbuffers/protobuf.git +[submodule "external/DataFedDependencies"] + path = external/DataFedDependencies + url = https://github.com/ORNL/DataFedDependencies.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 72c004182..ac472ce8e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,6 +27,7 @@ OPTION(BUILD_PYTHON_CLIENT "Build python client" TRUE) OPTION(BUILD_TESTS "Build Tests" TRUE) OPTION(BUILD_WEB_SERVER "Build DataFed Web Server" TRUE) OPTION(ENABLE_UNIT_TESTS "Enable unit tests" TRUE) +OPTION(ENABLE_INTEGRATION_TESTS "Enable integration tests" TRUE) OPTION(ENABLE_MEMORY_TESTS "Enable memory tests" FALSE) OPTION(BUILD_SHARED_LIBS "By default DataFed tries to build static libraries with the exception of libdatafed-authz which must always be a shared library, @@ -52,7 +53,14 @@ if(NOT EXISTS ${DATAFED_CONFIG_SH}) "${PROJECT_SOURCE_DIR}/scripts/generate_datafed.sh") endif() -file(READ "${PROJECT_SOURCE_DIR}/scripts/dependency_versions.sh" DEPENDENCY_VERSIONS) +set(DATAFED_DEPENDENCIES_SH "${DataFed_SOURCE_DIR}/external/DataFedDependencies/config/dependencies.sh") +if(NOT EXISTS ${DATAFED_DEPENDENCIES_SH}) + message(FATAL_ERROR "Error: File '${DATAFED_DEPENDENCIES_SH}' does not exist. " + "Please run generate_dependencies.sh first to populate defaults." + "${PROJECT_SOURCE_DIR}/external/DataFedDependencies/scripts/generate_datafed.sh") +endif() + +file(READ "${PROJECT_SOURCE_DIR}/external/DataFedDependencies/scripts/dependency_versions.sh" DEPENDENCY_VERSIONS) get_version_from_script(${DEPENDENCY_VERSIONS} "DATAFED_DYNAMIC_LIBRARY_PROTOBUF_VERSION" PROTOBUF_LIBRARY_VERSION) get_version_from_script(${DEPENDENCY_VERSIONS} "DATAFED_PROTOBUF_VERSION" PROTOBUF_COMPILER_VERSION) @@ -72,11 +80,10 @@ endif() if(NOT DEFINED DATAFED_DOMAIN) get_value_from_datafed_sh("DATAFED_DOMAIN" DATAFED_DOMAIN) - message("TESTING: DATAFED_DOMAIN : ${DATAFED_DOMAIN}") endif() if(NOT DEFINED DATAFED_DEPENDENCIES_INSTALL_PATH) - get_value_from_datafed_sh("DATAFED_DEPENDENCIES_INSTALL_PATH" DEPENDENCY_INSTALL_PATH) + get_value_from_dependencies_sh("DATAFED_DEPENDENCIES_INSTALL_PATH" DEPENDENCY_INSTALL_PATH) endif() set(CMAKE_PREFIX_PATH "${DEPENDENCY_INSTALL_PATH}") @@ -181,12 +188,19 @@ if( BUILD_FOXX OR BUILD_CORE_SERVER ) add_subdirectory( core ) endif() +# Must occur before building authz, location of mock keys are defined here +# ENV DATAFED_MOCK_CORE_PUB_KEY +if (ENABLE_INTEGRATION_TESTS OR ENABLE_END_TO_END_TESTS) + add_subdirectory(tests) +endif() + if( BUILD_REPO_SERVER OR BUILD_AUTHZ) add_subdirectory( repository ) endif() if( BUILD_PYTHON_CLIENT ) # make target = pydatafed + file(COPY ${PROJECT_SOURCE_DIR}/external/DataFedDependencies/python/datafed_pkg/requirements.txt DESTINATION ${PROJECT_SOURCE_DIR}/python/datafed_pkg/requirements.txt) add_subdirectory( python EXCLUDE_FROM_ALL ) endif() @@ -225,32 +239,14 @@ if( INSTALL_FOXX ) OUTPUT_VARIABLE _out ERROR_VARIABLE _err RESULT_VARIABLE _res) - If (NOT \${_res} EQUAL \"0\") + if (NOT \${_res} EQUAL \"0\") message( FATAL_ERROR \"out: \${_out} install_foxx failed: \${_err}\") + else() + message( \"\${_out} install_foxx completed: \${_err}\") endif()" ) endif() -if (ENABLE_END_TO_END_TESTS) - if (NOT ENABLE_END_TO_END_API_TESTS AND NOT ENABLE_END_TO_END_WEB_TESTS) - message (FATAL_ERROR "Cannot run end-to-end tests because one of the ENABLE_END_TO_END web or API flag is not on") - else() - add_subdirectory(tests/end-to-end) - add_subdirectory(tests/end-to-end/web-UI) - endif() -else() - if (ENABLE_END_TO_END_API_TESTS) - if (BUILD_PYTHON_CLIENT) - add_subdirectory(tests/end-to-end) - else() - message (FATAL_ERROR "Cannot run end-to-end tests because BUILD_PYTHON_CLIENT is not on") - endif() - endif() - if (ENABLE_END_TO_END_WEB_TESTS) - add_subdirectory(tests/end-to-end/web-UI) - endif() -endif() - find_library(SODIUM NAMES libsodium.a PATHS ${DEPENDENCY_INSTALL_PATH}/lib) # This is not needed but it is used by zmq I think so we want to print what is found locally just in case diff --git a/cmake/Boost.cmake b/cmake/Boost.cmake index f32fb16f1..a6e2c1250 100644 --- a/cmake/Boost.cmake +++ b/cmake/Boost.cmake @@ -1,4 +1,9 @@ +# Allow using legacy Boost find_package call. +if(POLICY CMP0167) + cmake_policy(SET CMP0167 OLD) +endif() + function(find_boost_library) if ( DEFINED ENV{BOOST_INC} AND DEFINED ENV{BOOST_LIB} ) message( "BOOST_INC:" $ENV{BOOST_INC} ) diff --git a/cmake/Utils.cmake b/cmake/Utils.cmake index 9ae7fabab..e4f8e6819 100644 --- a/cmake/Utils.cmake +++ b/cmake/Utils.cmake @@ -45,3 +45,24 @@ function(get_value_from_datafed_sh INPUT_KEY OUTPUT_VALUE) set(${OUTPUT_VALUE} "${OUTPUT_VAR}" PARENT_SCOPE) endfunction() +# Function will get exported value from a shell script +# +# i.e. if dependencies.sh has +# +# dependencies.sh +# export MY_NAME="Barry" +# +# set(DATAFED_CONFIG_SH "external/DataFedDependencies/config/dependencies.sh") +# get_value_from_dependencies_sh "MY_NAME" name) +# message("$name") +# +# Will output "Barry" +function(get_value_from_dependencies_sh INPUT_KEY OUTPUT_VALUE) + execute_process( + COMMAND bash "-c" "source ${DATAFED_CONFIG_SH} && echo \$${INPUT_KEY}" + OUTPUT_VARIABLE OUTPUT_VAR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + set(${OUTPUT_VALUE} "${OUTPUT_VAR}" PARENT_SCOPE) +endfunction() + diff --git a/cmake/Version.cmake b/cmake/Version.cmake index 7e2f65df7..262d3deb7 100644 --- a/cmake/Version.cmake +++ b/cmake/Version.cmake @@ -1,45 +1,45 @@ set(DATAFED_RELEASE_YEAR 2025) -set(DATAFED_RELEASE_MONTH 10) -set(DATAFED_RELEASE_DAY 7) +set(DATAFED_RELEASE_MONTH 11) +set(DATAFED_RELEASE_DAY 17) set(DATAFED_RELEASE_HOUR 14) -set(DATAFED_RELEASE_MINUTE 1) +set(DATAFED_RELEASE_MINUTE 0) set(DATAFED_COMMON_LIB_MAJOR 1) set(DATAFED_COMMON_LIB_MINOR 0) -set(DATAFED_COMMON_LIB_PATCH 2) +set(DATAFED_COMMON_LIB_PATCH 3) set(DATAFED_COMMON_PROTOCOL_API_MAJOR 1) -set(DATAFED_COMMON_PROTOCOL_API_MINOR 1) +set(DATAFED_COMMON_PROTOCOL_API_MINOR 2) set(DATAFED_COMMON_PROTOCOL_API_PATCH 0) set(DATAFED_CORE_MAJOR 1) -set(DATAFED_CORE_MINOR 0) -set(DATAFED_CORE_PATCH 2) +set(DATAFED_CORE_MINOR 1) +set(DATAFED_CORE_PATCH 0) set(DATAFED_FOXX_MAJOR 1) -set(DATAFED_FOXX_MINOR 1) +set(DATAFED_FOXX_MINOR 2) set(DATAFED_FOXX_PATCH 0) set(DATAFED_FOXX_API_MAJOR 1) -set(DATAFED_FOXX_API_MINOR 1) +set(DATAFED_FOXX_API_MINOR 2) set(DATAFED_FOXX_API_PATCH 0) set(DATAFED_WEB_MAJOR 1) set(DATAFED_WEB_MINOR 1) -set(DATAFED_WEB_PATCH 0) +set(DATAFED_WEB_PATCH 1) set(DATAFED_REPO_MAJOR 1) set(DATAFED_REPO_MINOR 0) -set(DATAFED_REPO_PATCH 0) +set(DATAFED_REPO_PATCH 1) set(DATAFED_AUTHZ_MAJOR 1) set(DATAFED_AUTHZ_MINOR 0) -set(DATAFED_AUTHZ_PATCH 1) +set(DATAFED_AUTHZ_PATCH 2) set(DATAFED_PYTHON_CLIENT_MAJOR 3) -set(DATAFED_PYTHON_CLIENT_MINOR 0) -set(DATAFED_PYTHON_CLIENT_PATCH 3) +set(DATAFED_PYTHON_CLIENT_MINOR 1) +set(DATAFED_PYTHON_CLIENT_PATCH 0) set(DATAFED_PYTHON_CLIENT_RELEASE_TYPE "") set(DATAFED_PYTHON_CLIENT_PRE_RELEASE_IDENTIFER "") diff --git a/common/proto/common/SDMS.proto b/common/proto/common/SDMS.proto index f173cdde1..14c2c984a 100644 --- a/common/proto/common/SDMS.proto +++ b/common/proto/common/SDMS.proto @@ -96,6 +96,11 @@ enum AccessTokenType { ACCESS_SENTINEL = 255; } +// Used to specify how the work is being done on the servers. +enum ExecutionMethod { + DIRECT = 1; + DEFERRED = 2; +} // ============================ Data Structures // Allocation-specific statistics data @@ -115,10 +120,11 @@ message AllocData required uint64 data_size = 3; required uint32 rec_limit = 4; required uint32 rec_count = 5; - required string path = 6; + optional string path = 6; optional string id = 7; optional bool is_def = 8; optional AllocStatsData stats = 9; + optional string repo_type = 10; } // For viewing dependencies @@ -318,6 +324,7 @@ message RepoData optional string domain = 9; optional string exp_path = 10; repeated string admin = 11; + optional string type = 12; } diff --git a/common/proto/common/SDMS_Auth.proto b/common/proto/common/SDMS_Auth.proto index 861c570e3..71fbf293c 100644 --- a/common/proto/common/SDMS_Auth.proto +++ b/common/proto/common/SDMS_Auth.proto @@ -916,13 +916,14 @@ message RepoCreateRequest required string title = 2; // Title optional string desc = 3; // Description optional string domain = 5; // RESERVED - required string path = 6; // Path to storage directories + optional string path = 6; // Path to storage directories optional string exp_path = 7; // RESERVED - required string address = 8; // Repo server address - required string endpoint = 9; // Globus endpoint UUID or legacy name - required string pub_key = 10; // Public encryption key + optional string address = 8; // Repo server address + optional string endpoint = 9; // Globus endpoint UUID or legacy name + optional string pub_key = 10; // Public encryption key required uint64 capacity = 11; // Total data capacity repeated string admin = 12; // Repo admin(s) + optional string type = 13; // Repository type (defaults to "globus") } // Request to update an existing repository. Only system or repos admins may @@ -942,6 +943,7 @@ message RepoUpdateRequest optional string pub_key = 10; // Public encryption key optional uint64 capacity = 11; // Total data capacity repeated string admin = 12; // Repo admin(s) + optional string type = 13; // Repository type } // Request to delete a repository. Only system or repos admins may send this @@ -1029,8 +1031,10 @@ message RepoAllocationStatsReply // Request to create a new allocation on a repo for a user or project. // Only repo admins may make this request. On success, a background task is -// started to create the allocation. -// Reply: TaskDataReply on success, NackError on error +// started to create the allocation if it is a DataFed managed globus repo, +// for a metadata only repo it is created immediatly. +// Reply: TaskDataReply (to be superseded by RepoAllocationCreateResponse) +// on success, NackError on error message RepoAllocationCreateRequest { required string repo = 1; // Repo ID @@ -1039,6 +1043,13 @@ message RepoAllocationCreateRequest required uint32 rec_limit = 4; // Data record limit(count) } +message RepoAllocationCreateResponse +{ + required SDMS.ExecutionMethod execution_method = 1; // The execution method that was used to create the allocation + optional SDMS.TaskData task = 2; // The task data if deferred execution + optional SDMS.AllocData result = 3; // The Allocation data if direct execution +} + // Request to update an existing allocation. Only repo admins may make this // request. // Reply: AckReply on success, NackError on error diff --git a/common/tests/unit/test_Buffer.cpp b/common/tests/unit/test_Buffer.cpp index 74ff9dec8..e258b2b30 100644 --- a/common/tests/unit/test_Buffer.cpp +++ b/common/tests/unit/test_Buffer.cpp @@ -108,6 +108,70 @@ BOOST_AUTO_TEST_CASE(testing_Buffer_non_trivial2) { BOOST_CHECK(buffer.size() == array_size); } +BOOST_AUTO_TEST_CASE(testing_Buffer_googleprotobuf_repo_create_request) { + + ProtoBufMap proto_map; + ProtoBufFactory proto_factory; + + SDMS::Auth::RepoCreateRequest repo_create_req; + + const std::string id = "bonanza"; + const std::string title = "All you can eat."; + const std::string path = "/"; + const std::string address = "tcp://best_burgers.com"; + const std::string endpoint = ""; + const std::string pub_key; + uint64_t capacity = 0; + const std::string type = "globus"; + + repo_create_req.set_id(id); + repo_create_req.set_title(title); + repo_create_req.set_path(path); + repo_create_req.set_address(address); + repo_create_req.set_endpoint(endpoint); + repo_create_req.set_pub_key(pub_key); + repo_create_req.set_capacity(capacity); + repo_create_req.set_type(type); + + BOOST_CHECK(repo_create_req.id().compare(id) == 0); + BOOST_CHECK(repo_create_req.title().compare(title) == 0); + BOOST_CHECK(repo_create_req.path().compare(path) == 0); + BOOST_CHECK(repo_create_req.address().compare(address) == 0); + BOOST_CHECK(repo_create_req.endpoint().compare(endpoint) == 0); + BOOST_CHECK(repo_create_req.pub_key().compare(pub_key) == 0); + BOOST_CHECK(repo_create_req.capacity() == capacity); + BOOST_CHECK(repo_create_req.type().compare(type) == 0); + + Buffer buffer; + std::cout << "Calling Copy to buffer" << std::endl; + size_t size = repo_create_req.ByteSizeLong(); + copyToBuffer(buffer, &repo_create_req, size); + + BOOST_CHECK(buffer.size() == buffer.capacity()); + BOOST_CHECK(buffer.size() == repo_create_req.ByteSizeLong()); + + // Create a new message and copy the buffer into it + uint16_t msg_type = proto_map.getMessageType(repo_create_req); + std::unique_ptr<::google::protobuf::Message> new_msg = + proto_factory.create(msg_type); + + copyFromBuffer(new_msg.get(), buffer); + + auto new_repo_create_req = + dynamic_cast(new_msg.get()); + + BOOST_CHECK(new_repo_create_req->id().compare(id) == 0); + BOOST_CHECK(new_repo_create_req->title().compare(title) == 0); + BOOST_CHECK(new_repo_create_req->path().compare(path) == 0); + BOOST_CHECK(new_repo_create_req->address().compare(address) == 0); + BOOST_CHECK(new_repo_create_req->endpoint().compare(endpoint) == 0); + BOOST_CHECK(new_repo_create_req->pub_key().compare(pub_key) == 0); + BOOST_CHECK(new_repo_create_req->capacity() == capacity); + BOOST_CHECK(new_repo_create_req->type().compare(type) == 0); + + +} + BOOST_AUTO_TEST_CASE(testing_Buffer_googleprotobuf) { ProtoBufMap proto_map; diff --git a/compose/README.md b/compose/README.md index 4bdb021ec..9952492b6 100644 --- a/compose/README.md +++ b/compose/README.md @@ -37,6 +37,40 @@ Create the .env file fill in the missing components that are required. ```bash ./generate_env.sh ``` + +Note: For the DataFed web service container in the compose instance, the SSL +certificate and key paths are now hardcoded to + +    /opt/datafed/keys/cert.crt +    /opt/datafed/keys/cert.key + +Ensure that if you are replacing the self‐signed certificates manually, the new +certificates are placed in a keys folder (DataFed/compose/metadata|all/keys) that is +mounted by the container. + +An optional flag ‘--arango-use-ssl’ (or -a) has been added to the +generate_env.sh script. When this flag is set, additional SSL certificates for +ArangoDB will be generated and the .env file will be populated with the +following new variables: + +   DATAFED_ARANGO_CERT_PATH +   DATAFED_ARANGO_KEY_PATH +   DATAFED_ARANGO_PEM_PATH + +Be sure you have your keys folder correctly set up in (DataFed/compose/metadata|all/keys) so that +the ArangoDB container can mount it and locate the necessary certificate files. +If not using SSL with Arango, omit this flag so that the container will run +over TCP (HTTP). + +WARNING - If you do not want to run with HTTPS but it was previously run with +HTTPS turned on, be sure to remove datafed-arango.key, datafed-arango.pem, +datafed-arango.crt from (DataFed/compose/metadata|all/keys) folder. In +addition, you may need to remove, the below variables from your .env file + +   DATAFED_ARANGO_CERT_PATH +   DATAFED_ARANGO_KEY_PATH +   DATAFED_ARANGO_PEM_PATH + ### 2. Fill in the needed .env variables for the Metadata Core Services The .env file will be created in the DataFed/compose/metadata folder and will be hidden. @@ -305,19 +339,36 @@ docker run --env-file .env \ To interact more directly with the container the '-i' flag can be added and the entrypoint file can be overwritten by including '--entrypoint /bin/bash' -## Common Errors +## Troubleshooting & Common Errors + +### Errors with HTTP and HTTPS for the ArangoDB instance + +It is advised to run the `generate_env.sh` script anytime you make changes to the +.env file. It is designed to keep existing variables while ensuring consistency +of compatible configuration arguments. + +To Turn on ssl. + +``` +./generate_env.sh --arango-use-ssl +``` + +If you want to turn it off you will have to be careful to remove the certificate +files that are located in the compose/metadata|all/keys folders. ### Errors during Compose up -Make sure all the ports that are needed are open on the local host. These +Make sure all the ports that are needed are open on the localhost. These include, ports -443 for the datafed-web and datafed-gcs container -7512 for the datafed-core container -50000-51000 for the datafed-gcs container -9000 for the datafed-repo container -80 for the datafed-gcs container -8512 for arangodb web server interface +| Port | Description | +|--------------|-------------------------------------------------| +| 443 | DataFed-web and DataFed-gcs container | +| 7512 | DataFed-core container | +| 50000–51000 | DataFed-gcs container | +| 9000 | DataFed-repo container | +| 80 | DataFed-gcs container | +| 8529 | ArangoDB web server interface | Make sure port 80 is not already bound on the host. Also note that the repo server keys should exist in the keys folder before running the gcs instance. diff --git a/compose/all/compose.yml b/compose/all/compose.yml index 82fb208a8..030973d8a 100644 --- a/compose/all/compose.yml +++ b/compose/all/compose.yml @@ -10,11 +10,10 @@ services: DATAFED_GLOBUS_APP_SECRET: "${DATAFED_GLOBUS_APP_SECRET}" DATAFED_GLOBUS_APP_ID: "${DATAFED_GLOBUS_APP_ID}" DATAFED_ZEROMQ_SESSION_SECRET: "${DATAFED_ZEROMQ_SESSION_SECRET}" - DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}" DATAFED_DOMAIN: "${DATAFED_DOMAIN}" DATAFED_HTTPS_SERVER_PORT: "${DATAFED_HTTPS_SERVER_PORT}" - DATAFED_WEB_CERT_PATH: "${DATAFED_WEB_CERT_PATH}" - DATAFED_WEB_KEY_PATH: "${DATAFED_WEB_KEY_PATH}" + DATAFED_WEB_CERT_PATH: "/opt/datafed/keys/cert.crt" + DATAFED_WEB_KEY_PATH: "/opt/datafed/keys/cert.key" DATAFED_WEB_USER: "datafed" DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}" DATAFED_CORE_ADDRESS_PORT_INTERNAL: "datafed-core:7513" @@ -37,15 +36,15 @@ services: DATAFED_GLOBUS_APP_SECRET: "${DATAFED_GLOBUS_APP_SECRET}" DATAFED_GLOBUS_APP_ID: "${DATAFED_GLOBUS_APP_ID}" DATAFED_ZEROMQ_SESSION_SECRET: "${DATAFED_ZEROMQ_SESSION_SECRET}" - DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}" DATAFED_DOMAIN: "${DATAFED_DOMAIN}" - DATAFED_WEB_CERT_PATH: "${DATAFED_WEB_CERT_PATH}" - DATAFED_WEB_KEY_PATH: "${DATAFED_WEB_KEY_PATH}" + DATAFED_WEB_CERT_PATH: "/opt/datafed/keys/cert.crt" + DATAFED_WEB_KEY_PATH: "/opt/datafed/keys/cert.key" DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}" DATAFED_DATABASE_PASSWORD: "${DATAFED_DATABASE_PASSWORD}" DATAFED_DATABASE_IP_ADDRESS: "${DATAFED_DATABASE_IP_ADDRESS}" DATAFED_DATABASE_IP_ADDRESS_PORT: "${DATAFED_DATABASE_IP_ADDRESS}:${DATAFED_DATABASE_PORT}" DATAFED_CORE_LOG_LEVEL: "${DATAFED_CORE_LOG_LEVEL}" + SSL_CERT_FILE: "/opt/datafed/keys/datafed-arango.crt" user: "${DATAFED_UID}:0" security_opt: - no-new-privileges:true @@ -62,7 +61,6 @@ services: image: datafed-foxx:latest depends_on: ["arango"] environment: - DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}" DATAFED_DOMAIN: "${DATAFED_DOMAIN}" DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}" DATAFED_DATABASE_PASSWORD: "${DATAFED_DATABASE_PASSWORD}" @@ -71,6 +69,7 @@ services: # NOTE enabling foxx tests will cause state changes in the database # do not run this unless you are ok with the database being wiped. ENABLE_FOXX_TESTS: "${DATAFED_ENABLE_FOXX_TESTS}" + SSL_CERT_FILE: "/usr/local/share/ca-certificates/datafed-arango.crt" user: "${DATAFED_UID}:0" security_opt: - no-new-privileges:true @@ -81,6 +80,7 @@ services: retries: 20 volumes: - foxx_tmp:/tmp + - ./keys/:/usr/local/share/ca-certificates/ networks: - datafed-internal @@ -90,17 +90,24 @@ services: ARANGO_ROOT_PASSWORD: "${DATAFED_DATABASE_PASSWORD}" volumes: - arango_db:/var/lib/arangodb3 + - ./keys/:/usr/local/share/ca-certificates/ ports: - 8529:8529 # Arangodb web UI networks: - datafed-internal + entrypoint: > + sh -c 'if [ -f /usr/local/share/ca-certificates/datafed-arango.pem ]; then + exec /entrypoint.sh --ssl.keyfile /usr/local/share/ca-certificates/datafed-arango.pem \ + --server.endpoint ssl://0.0.0.0:8529; + else + exec /entrypoint.sh --server.endpoint tcp://0.0.0.0:8529; + fi' # Needs the datafed-core to be up so it doesn't fail when trying to connect datafed-repo: depends_on: ["datafed-core"] environment: DATAFED_ZEROMQ_SESSION_SECRET: "${DATAFED_ZEROMQ_SESSION_SECRET}" - DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}" DATAFED_DOMAIN: "${DATAFED_DOMAIN}" DATAFED_HTTPS_SERVER_PORT: "${DATAFED_HTTPS_SERVER_PORT}" DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}" @@ -127,7 +134,6 @@ services: depends_on: ["datafed-web"] environment: DATAFED_ZEROMQ_SESSION_SECRET: "${DATAFED_ZEROMQ_SESSION_SECRET}" - DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}" DATAFED_DOMAIN: "${DATAFED_DOMAIN}" DATAFED_HTTPS_SERVER_PORT: "${DATAFED_HTTPS_SERVER_PORT}" DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}" @@ -138,7 +144,7 @@ services: DATAFED_GLOBUS_CONTROL_PORT: "${DATAFED_GLOBUS_CONTROL_PORT}" DATAFED_GCS_COLLECTION_BASE_PATH: "${DATAFED_GCS_COLLECTION_BASE_PATH}" DATAFED_GCS_COLLECTION_ROOT_PATH: "${DATAFED_GCS_COLLECTION_ROOT_PATH}" - DATAFED_REPO_USER: "${DATAFED_REPO_USER}" + DATAFED_REPO_USER: "datafed" UID: "${DATAFED_UID}" HOST_HOSTNAME: "localhost" DATAFED_AUTHZ_USER: "datafed" @@ -152,10 +158,6 @@ services: - ./logs:${DATAFED_CONTAINER_LOG_PATH} - ${DATAFED_HOST_COLLECTION_MOUNT}:${DATAFED_GCS_COLLECTION_ROOT_PATH}/${DATAFED_REPO_ID_AND_DIR} - # ports: - # - "8081:443" # This must be the same port that is mapped to the host for redirects to work - # - "50000-50100:50000-50100" - volumes: foxx_tmp: arango_db: diff --git a/compose/all/generate_env.sh b/compose/all/generate_env.sh index b3a172ac0..23f80a82c 100755 --- a/compose/all/generate_env.sh +++ b/compose/all/generate_env.sh @@ -4,6 +4,6 @@ SOURCE=$(dirname "$SCRIPT") PROJECT_ROOT=$(realpath "${SOURCE}/../../") # Variables specific to running the compose instance -export DATAFED_COMPOSE_REPO_DOMAIN="datafed-repo" +export DATAFED_REPO_DOMAIN="datafed-repo" "${PROJECT_ROOT}/scripts/compose_generate_env.sh" -d "$(pwd)" "$@" diff --git a/compose/all/generate_globus_files.sh b/compose/all/generate_globus_files.sh index a2df4cff5..bb37c56d4 100755 --- a/compose/all/generate_globus_files.sh +++ b/compose/all/generate_globus_files.sh @@ -1,6 +1,8 @@ #!/bin/bash -SCRIPT=$(realpath "$0") +SCRIPT=$(realpath "${BASH_SOURCE[0]}") SOURCE=$(dirname "$SCRIPT") PROJECT_ROOT=$(realpath "${SOURCE}/../../") +set -euf -o pipefail + "${PROJECT_ROOT}/scripts/compose_generate_globus_files.sh" -d "$(pwd)" diff --git a/compose/metadata/compose.yml b/compose/metadata/compose.yml index bd1561630..3ee54ee7f 100644 --- a/compose/metadata/compose.yml +++ b/compose/metadata/compose.yml @@ -1,5 +1,3 @@ -version: "3.9" - services: datafed-web: depends_on: ["datafed-core"] @@ -7,11 +5,10 @@ services: DATAFED_GLOBUS_APP_SECRET: "${DATAFED_GLOBUS_APP_SECRET}" DATAFED_GLOBUS_APP_ID: "${DATAFED_GLOBUS_APP_ID}" DATAFED_ZEROMQ_SESSION_SECRET: "${DATAFED_ZEROMQ_SESSION_SECRET}" - DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}" DATAFED_DOMAIN: "${DATAFED_DOMAIN}" DATAFED_HTTPS_SERVER_PORT: "${DATAFED_HTTPS_SERVER_PORT}" - DATAFED_WEB_CERT_PATH: "${DATAFED_WEB_CERT_PATH}" - DATAFED_WEB_KEY_PATH: "${DATAFED_WEB_KEY_PATH}" + DATAFED_WEB_CERT_PATH: "/opt/datafed/keys/cert.crt" + DATAFED_WEB_KEY_PATH: "/opt/datafed/keys/cert.key" DATAFED_WEB_USER: "datafed" DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}" DATAFED_CORE_ADDRESS_PORT_INTERNAL: "datafed-core:7513" @@ -34,10 +31,9 @@ services: DATAFED_GLOBUS_APP_SECRET: "${DATAFED_GLOBUS_APP_SECRET}" DATAFED_GLOBUS_APP_ID: "${DATAFED_GLOBUS_APP_ID}" DATAFED_ZEROMQ_SESSION_SECRET: "${DATAFED_ZEROMQ_SESSION_SECRET}" - DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}" DATAFED_DOMAIN: "${DATAFED_DOMAIN}" - DATAFED_WEB_CERT_PATH: "${DATAFED_WEB_CERT_PATH}" - DATAFED_WEB_KEY_PATH: "${DATAFED_WEB_KEY_PATH}" + DATAFED_WEB_CERT_PATH: "/opt/datafed/keys/cert.crt" + DATAFED_WEB_KEY_PATH: "/opt/datafed/keys/cert.key" DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}" DATAFED_DATABASE_PASSWORD: "${DATAFED_DATABASE_PASSWORD}" DATAFED_DATABASE_IP_ADDRESS: "${DATAFED_DATABASE_IP_ADDRESS}" @@ -59,7 +55,6 @@ services: image: datafed-foxx:latest depends_on: ["arango"] environment: - DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}" DATAFED_DOMAIN: "${DATAFED_DOMAIN}" DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}" DATAFED_DATABASE_PASSWORD: "${DATAFED_DATABASE_PASSWORD}" @@ -68,6 +63,7 @@ services: # NOTE enabling foxx tests will cause state changes in the database # do not run this unless you are ok with the database being wiped. ENABLE_FOXX_TESTS: "${DATAFED_ENABLE_FOXX_TESTS}" + SSL_CERT_FILE: "/usr/local/share/ca-certificates/datafed-arango.crt" user: "${DATAFED_UID}:0" security_opt: - no-new-privileges:true @@ -78,6 +74,7 @@ services: retries: 20 volumes: - foxx_tmp:/tmp + - ./keys/:/usr/local/share/ca-certificates/ networks: - datafed-internal @@ -87,10 +84,18 @@ services: ARANGO_ROOT_PASSWORD: "${DATAFED_DATABASE_PASSWORD}" volumes: - arango_db:/var/lib/arangodb3 + - ./keys/:/usr/local/share/ca-certificates/ ports: - 8529:8529 # Arangodb web UI networks: - datafed-internal + entrypoint: > + sh -c 'if [ -f /usr/local/share/ca-certificates/datafed-arango.pem ]; then + exec /entrypoint.sh --ssl.keyfile /usr/local/share/ca-certificates/datafed-arango.pem \ + --server.endpoint ssl://0.0.0.0:8529; + else + exec /entrypoint.sh --server.endpoint tcp://0.0.0.0:8529; + fi' # Fox tmp is used so that if docker compose up is run a second time it won't # need to overwrite the database it will simply use the existing one hence diff --git a/compose/metadata/generate_env.sh b/compose/metadata/generate_env.sh index 8f0531ea0..bfd4ca0ac 100755 --- a/compose/metadata/generate_env.sh +++ b/compose/metadata/generate_env.sh @@ -3,4 +3,4 @@ SCRIPT=$(realpath "$0") SOURCE=$(dirname "$SCRIPT") PROJECT_ROOT=$(realpath "${SOURCE}/../../") -"${PROJECT_ROOT}/scripts/compose_generate_env.sh" -d "$(pwd)" -m +"${PROJECT_ROOT}/scripts/compose_generate_env.sh" -d "$(pwd)" -m "$@" diff --git a/compose/metadata/generate_globus_files.sh b/compose/metadata/generate_globus_files.sh new file mode 100755 index 000000000..a20bfd451 --- /dev/null +++ b/compose/metadata/generate_globus_files.sh @@ -0,0 +1,8 @@ +#!/bin/bash +SCRIPT=$(realpath "${BASH_SOURCE[0]}") +SOURCE=$(dirname "$SCRIPT") +PROJECT_ROOT=$(realpath "${SOURCE}/../../") + +set -euf -o pipefail + +"${PROJECT_ROOT}/scripts/compose_generate_web_server_globus_credentials.sh" -d "$(pwd)" diff --git a/compose/repo/compose.yml b/compose/repo/compose.yml index 463012ee3..efdd75eb8 100644 --- a/compose/repo/compose.yml +++ b/compose/repo/compose.yml @@ -47,7 +47,7 @@ services: DATAFED_GLOBUS_CONTROL_PORT: "${DATAFED_GLOBUS_CONTROL_PORT}" DATAFED_GCS_COLLECTION_BASE_PATH: "${DATAFED_GCS_COLLECTION_BASE_PATH}" DATAFED_GCS_COLLECTION_ROOT_PATH: "${DATAFED_GCS_COLLECTION_ROOT_PATH}" - DATAFED_REPO_USER: "${DATAFED_REPO_USER}" + DATAFED_REPO_USER: "datafed" UID: "${DATAFED_UID}" HOST_HOSTNAME: "localhost" DATAFED_AUTHZ_USER: "datafed" diff --git a/compose/repo/generate_globus_files.sh b/compose/repo/generate_globus_files.sh index a2df4cff5..bb37c56d4 100755 --- a/compose/repo/generate_globus_files.sh +++ b/compose/repo/generate_globus_files.sh @@ -1,6 +1,8 @@ #!/bin/bash -SCRIPT=$(realpath "$0") +SCRIPT=$(realpath "${BASH_SOURCE[0]}") SOURCE=$(dirname "$SCRIPT") PROJECT_ROOT=$(realpath "${SOURCE}/../../") +set -euf -o pipefail + "${PROJECT_ROOT}/scripts/compose_generate_globus_files.sh" -d "$(pwd)" diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index 17bdd6da1..b4ebeb975 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -13,19 +13,30 @@ configure_file( if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_setup COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_setup.sh") add_test(NAME foxx_teardown COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_teardown.sh") - add_test(NAME foxx_authz COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_authz") - add_test(NAME foxx_record COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_record") - add_test(NAME foxx_repo COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_repo") - add_test(NAME foxx_path COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_path") add_test(NAME foxx_db_fixtures COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_fixture_setup.sh") - add_test(NAME foxx_version COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version") - add_test(NAME foxx_support COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_support") - add_test(NAME foxx_user_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_router") - add_test(NAME foxx_authz_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_authz_router") - add_test(NAME foxx_unit_user_token COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_token") - add_test(NAME foxx_unit_user_model COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_model") - add_test(NAME foxx_unit_globus_collection_model COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_globus_collection_model") - add_test(NAME foxx_unit_globus_token_model COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_globus_token_model") + + add_test(NAME foxx_authz COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_authz:") + add_test(NAME foxx_record COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_record:") + add_test(NAME foxx_repo COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_repo:") + add_test(NAME foxx_repo_globus COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_repository_globus:") + add_test(NAME foxx_repo_metadata COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_repository_metadata:") + add_test(NAME foxx_base_repo COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_base_repository:") + add_test(NAME foxx_repositories COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "integration_repositories:") + add_test(NAME foxx_repo_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "integration_repo_router:") + add_test(NAME foxx_validation_repo COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_validation_repository:") + add_test(NAME foxx_path COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_path:") + add_test(NAME foxx_version COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version:") + add_test(NAME foxx_support COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_support:") + add_test(NAME foxx_user_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_router:") + add_test(NAME foxx_version_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version_router:") + add_test(NAME foxx_tag_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_tag_router:") + add_test(NAME foxx_task_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_task_router:") + add_test(NAME foxx_authz_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_authz_router:") + add_test(NAME foxx_query_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_query_router:") + add_test(NAME foxx_unit_user_token COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_token:") + add_test(NAME foxx_unit_user_model COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_model:") + add_test(NAME foxx_unit_globus_collection_model COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_globus_collection_model:") + add_test(NAME foxx_unit_globus_token_model COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_globus_token_model:") set_tests_properties(foxx_setup PROPERTIES FIXTURES_SETUP Foxx) set_tests_properties(foxx_teardown PROPERTIES FIXTURES_CLEANUP Foxx) @@ -36,8 +47,18 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_authz_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_record PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_repo PROPERTIES FIXTURES_REQUIRED Foxx) + set_tests_properties(foxx_repo_globus PROPERTIES FIXTURES_REQUIRED Foxx) + set_tests_properties(foxx_repo_metadata PROPERTIES FIXTURES_REQUIRED Foxx) + set_tests_properties(foxx_base_repo PROPERTIES FIXTURES_REQUIRED Foxx) + set_tests_properties(foxx_repositories PROPERTIES FIXTURES_REQUIRED Foxx) + set_tests_properties(foxx_repo_router PROPERTIES FIXTURES_REQUIRED Foxx) + set_tests_properties(foxx_validation_repo PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_path PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_user_router PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") + set_tests_properties(foxx_version_router PROPERTIES FIXTURES_REQUIRED Foxx) + set_tests_properties(foxx_tag_router PROPERTIES FIXTURES_REQUIRED Foxx) + set_tests_properties(foxx_query_router PROPERTIES FIXTURES_REQUIRED Foxx) + set_tests_properties(foxx_task_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_unit_user_token PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_unit_user_model PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") set_tests_properties(foxx_unit_globus_collection_model PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") diff --git a/core/database/foxx/api/acl_router.js b/core/database/foxx/api/acl_router.js index 9f66b643c..336afbc00 100644 --- a/core/database/foxx/api/acl_router.js +++ b/core/database/foxx/api/acl_router.js @@ -5,6 +5,8 @@ const router = createRouter(); const joi = require("joi"); const g_db = require("@arangodb").db; const g_lib = require("./support"); +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); module.exports = router; @@ -37,20 +39,24 @@ router else is_coll = false; if (!is_coll && object._id[0] != "d") - throw [g_lib.ERR_INVALID_PARAM, "Invalid object type, " + object._id]; + throw [error.ERR_INVALID_PARAM, "Invalid object type, " + object._id]; var is_admin = true; - if (!g_lib.hasAdminPermObject(client, object._id)) { + if (!permissions.hasAdminPermObject(client, object._id)) { is_admin = false; - if (!g_lib.hasPermissions(client, object, g_lib.PERM_SHARE)) - throw g_lib.ERR_PERM_DENIED; + if (!permissions.hasPermissions(client, object, permissions.PERM_SHARE)) + throw error.ERR_PERM_DENIED; } var client_perm, cur_rules; if (!is_admin) { - client_perm = g_lib.getPermissions(client, object, g_lib.PERM_ALL); + client_perm = permissions.getPermissions( + client, + object, + permissions.PERM_ALL, + ); cur_rules = g_db ._query( "for v, e in 1..1 outbound @object acl return { id: v._id, gid: v.gid, grant: e.grant, inhgrant: e.inhgrant }", @@ -77,7 +83,7 @@ router if (!is_coll && rule.inhgrant) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Inherited permissions cannot be applied to data records", ]; @@ -89,13 +95,13 @@ router }); if (!group) - throw [g_lib.ERR_NOT_FOUND, "Group " + rule.id + " not found"]; + throw [error.ERR_NOT_FOUND, "Group " + rule.id + " not found"]; rule.id = group._id; } else { acl_mode |= 1; if (!g_db._exists(rule.id)) - throw [g_lib.ERR_NOT_FOUND, "User " + rule.id + " not found"]; + throw [error.ERR_NOT_FOUND, "User " + rule.id + " not found"]; } if (!is_admin) { @@ -108,7 +114,10 @@ router old_rule = cur_rules[old_rule]; if (old_rule.grant != rule.grant) { chg = old_rule.grant ^ rule.grant; - if ((chg & client_perm) != (chg & ~g_lib.PERM_SHARE)) { + if ( + (chg & client_perm) != + (chg & ~permissions.PERM_SHARE) + ) { console.log( "bad alter", rule.id, @@ -117,7 +126,7 @@ router client_perm, ); throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Attempt to alter protected permissions on " + rule.id + " ACL.", @@ -126,7 +135,7 @@ router } } else { if ( - rule.grant & g_lib.PERM_SHARE || + rule.grant & permissions.PERM_SHARE || (rule.grant & client_perm) != rule.grant ) { console.log( @@ -137,7 +146,7 @@ router client_perm, ); throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Attempt to exceed controlled permissions on " + rule.id + " ACL.", @@ -199,11 +208,11 @@ router var object = g_lib.getObject(req.queryParams.id, client); if (object._id[0] != "c" && object._id[0] != "d") - throw [g_lib.ERR_INVALID_PARAM, "Invalid object type, " + object._id]; + throw [error.ERR_INVALID_PARAM, "Invalid object type, " + object._id]; - if (!g_lib.hasAdminPermObject(client, object._id)) { - if (!g_lib.hasPermissions(client, object, g_lib.PERM_SHARE)) - throw g_lib.ERR_PERM_DENIED; + if (!permissions.hasAdminPermObject(client, object._id)) { + if (!permissions.hasPermissions(client, object, permissions.PERM_SHARE)) + throw error.ERR_PERM_DENIED; } var rules = g_db @@ -259,7 +268,7 @@ router // Verify project exists if (!g_db._exists(owner_id)) - throw [g_lib.ERR_NOT_FOUND, "Project " + owner_id + " not found"]; + throw [error.ERR_NOT_FOUND, "Project " + owner_id + " not found"]; } else { owner_id = g_lib.getUserFromClientID(req.queryParams.owner)._id; } @@ -420,12 +429,12 @@ function dedupShares(client, shares) { for (k = path.path.length - 1; k > 0; k--) { coll = g_db.c.document(path.path[k]); - perm = g_lib.getPermissionsLocal(client._id, coll); - if (perm.inhgrant & g_lib.PERM_LIST) { + perm = permissions.getPermissionsLocal(client._id, coll); + if (perm.inhgrant & permissions.PERM_LIST) { k = 0; break; } - if ((perm.grant & g_lib.PERM_LIST) == 0) break; + if ((perm.grant & permissions.PERM_LIST) == 0) break; } if (k == 0) { @@ -492,11 +501,11 @@ router.get('/by_proj/list', function (req, res) { // Verify owner ID is a project if ( !owner_id.startsWith( "p/" )) - throw [g_lib.ERR_INVALID_PARAM,"Invalid project ID: "+owner_id]; + throw [error.ERR_INVALID_PARAM,"Invalid project ID: "+owner_id]; // Verify owner exists if ( !g_db._exists( owner_id )) - throw [g_lib.ERR_NOT_FOUND,"Project "+owner_id+" not found"]; + throw [error.ERR_NOT_FOUND,"Project "+owner_id+" not found"]; var shares = g_db._query("for v in 1..2 inbound @client member, acl filter v.owner == @owner return {id:v._id,title:v.title,alias:v.alias, doi:v.doi,locked:v.locked}", { client: client._id, owner: owner_id }).toArray(); diff --git a/core/database/foxx/api/admin_router.js b/core/database/foxx/api/admin_router.js index 977a6ad70..9468118f1 100644 --- a/core/database/foxx/api/admin_router.js +++ b/core/database/foxx/api/admin_router.js @@ -6,6 +6,7 @@ const joi = require("joi"); const g_db = require("@arangodb").db; const g_lib = require("./support"); +const permissions = require("./lib/permissions"); //const perf = require('@arangodb/foxx'); module.exports = router; @@ -27,7 +28,6 @@ router .get("/test", function (req, res) { try { const client = g_lib.getUserFromClientID(req.queryParams.client); - //var perms = req.queryParams.perms?req.queryParams.perms:g_lib.PERM_ALL; var result = true; var item = g_lib.resolveID(req.queryParams.item, client); var obj = g_db[item[0]].document(item); @@ -35,8 +35,7 @@ router var t1 = new Date(); for (var i = 0; i < 1000; i++) { - result = g_lib.hasPermissions(client, obj, 3); - //result = g_lib.getPermissions( client, obj, 255 ); + result = permissions.hasPermissions(client, obj, 3); } var t2 = new Date(); diff --git a/core/database/foxx/api/authz.js b/core/database/foxx/api/authz.js index 94950d541..8ecbd8227 100644 --- a/core/database/foxx/api/authz.js +++ b/core/database/foxx/api/authz.js @@ -5,6 +5,8 @@ const path = require("path"); const Record = require("./record"); const pathModule = require("./posix_path"); const g_lib = require("./support"); +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); const { Repo, PathType } = require("./repo"); module.exports = (function () { @@ -36,26 +38,26 @@ module.exports = (function () { const data_id = "d/" + a_data_key; // If the user is not an admin of the object we will need // to check if the user has the write authorization - if (g_lib.hasAdminPermObject(a_client, data_id)) { + if (permissions.hasAdminPermObject(a_client, data_id)) { return true; } let data = g_db.d.document(data_id); // Grab the data item - if (g_lib.hasPermissions(a_client, data, a_perm)) { + if (permissions.hasPermissions(a_client, data, a_perm)) { return true; } return false; }; obj.readRecord = function (client, path) { - const permission = g_lib.PERM_RD_DATA; + const permission = permissions.PERM_RD_DATA; const path_components = pathModule.splitPOSIXPath(path); const data_key = path_components.at(-1); let record = new Record(data_key); if (!record.exists()) { // Return not found error for non-existent records console.log("AUTHZ act: read client: " + client._id + " path " + path + " NOT_FOUND"); - throw [g_lib.ERR_NOT_FOUND, "Record not found: " + path]; + throw [error.ERR_NOT_FOUND, "Record not found: " + path]; } // Special case - allow unknown client to read a publicly accessible record @@ -64,14 +66,14 @@ module.exports = (function () { if (!g_lib.hasPublicRead(record.id())) { console.log("AUTHZ act: read" + " unknown client " + " path " + path + " FAILED"); throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Unknown client does not have read permissions on " + path, ]; } } else if (!obj.isRecordActionAuthorized(client, data_key, permission)) { console.log("AUTHZ act: read" + " client: " + client._id + " path " + path + " FAILED"); throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Client " + client._id + " does not have read permissions on " + path, ]; } @@ -83,15 +85,15 @@ module.exports = (function () { }; obj.none = function (client, path) { - const permission = g_lib.PERM_NONE; + const permission = permissions.PERM_NONE; }; obj.denied = function (client, path) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; }; obj.createRecord = function (client, path) { - const permission = g_lib.PERM_WR_DATA; + const permission = permissions.PERM_WR_DATA; const path_components = pathModule.splitPOSIXPath(path); const data_key = path_components.at(-1); @@ -100,7 +102,7 @@ module.exports = (function () { "AUTHZ act: create" + " client: " + client._id + " path " + path + " FAILED", ); throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Unknown client does not have create permissions on " + path, ]; } else if (!obj.isRecordActionAuthorized(client, data_key, permission)) { @@ -108,7 +110,7 @@ module.exports = (function () { "AUTHZ act: create" + " client: " + client._id + " path " + path + " FAILED", ); throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Client " + client._id + " does not have create permissions on " + path, ]; } @@ -119,7 +121,7 @@ module.exports = (function () { if (!record.exists()) { // If the record does not exist then the path would not be consistent. console.log("AUTHZ act: create client: " + client._id + " path " + path + " FAILED"); - throw [g_lib.ERR_PERM_DENIED, "Invalid record specified: " + path]; + throw [error.ERR_PERM_DENIED, "Invalid record specified: " + path]; } // This will tell us if the proposed path is consistent with what we expect diff --git a/core/database/foxx/api/authz_router.js b/core/database/foxx/api/authz_router.js index ff160e764..162585499 100644 --- a/core/database/foxx/api/authz_router.js +++ b/core/database/foxx/api/authz_router.js @@ -5,6 +5,8 @@ const router = createRouter(); const joi = require("joi"); const g_db = require("@arangodb").db; const g_lib = require("./support"); +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); const authzModule = require("./authz"); const { Repo, PathType } = require("./repo"); @@ -48,7 +50,7 @@ router req.queryParams.file + " FAILED", ); - throw [g_lib.ERR_PERM_DENIED, "Unknown client: " + req.queryParams.client]; + throw [error.ERR_PERM_DENIED, "Unknown client: " + req.queryParams.client]; } let repo = new Repo(req.queryParams.repo); let path_type = repo.pathType(req.queryParams.file); @@ -65,7 +67,7 @@ router " FAILED", ); throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Unknown path, or path is not consistent with supported repository folder hierarchy: " + req.queryParams.file, ]; @@ -79,7 +81,7 @@ router req.queryParams.file, ); } else { - throw [g_lib.ERR_INVALID_PARAM, "Invalid gridFTP action: ", req.queryParams.act]; + throw [error.ERR_INVALID_PARAM, "Invalid gridFTP action: ", req.queryParams.act]; } console.log( "AUTHZ act: " + @@ -113,37 +115,37 @@ router .get("/perm/check", function (req, res) { try { const client = g_lib.getUserFromClientID(req.queryParams.client); - var perms = req.queryParams.perms ? req.queryParams.perms : g_lib.PERM_ALL; + var perms = req.queryParams.perms ? req.queryParams.perms : permissions.PERM_ALL; var obj, result = true, id = g_lib.resolveID(req.queryParams.id, client), ty = id[0]; if (id[1] != "/") { - throw [g_lib.ERR_INVALID_PARAM, "Invalid ID, " + req.queryParams.id]; + throw [error.ERR_INVALID_PARAM, "Invalid ID, " + req.queryParams.id]; } if (ty == "p") { var role = g_lib.getProjectRole(client._id, id); if (role == g_lib.PROJ_NO_ROLE) { // Non members have only VIEW permissions - if (perms != g_lib.PERM_RD_REC) result = false; + if (perms != permissions.PERM_RD_REC) result = false; } else if (role == g_lib.PROJ_MEMBER) { // Non members have only VIEW permissions - if ((perms & ~g_lib.PERM_MEMBER) != 0) result = false; + if ((perms & ~permissions.PERM_MEMBER) != 0) result = false; } else if (role == g_lib.PROJ_MANAGER) { // Managers have all but UPDATE - if ((perms & ~g_lib.PERM_MANAGER) != 0) result = false; + if ((perms & ~permissions.PERM_MANAGER) != 0) result = false; } } else if (ty == "d") { - if (!g_lib.hasAdminPermObject(client, id)) { + if (!permissions.hasAdminPermObject(client, id)) { obj = g_db.d.document(id); if (obj.locked) result = false; - else result = g_lib.hasPermissions(client, obj, perms); + else result = permissions.hasPermissions(client, obj, perms); } } else if (ty == "c") { // If create perm is requested, ensure owner of collection has at least one allocation - if (perms & g_lib.PERM_CREATE) { + if (perms & permissions.PERM_CREATE) { var owner = g_db.owner.firstExample({ _from: id, }); @@ -153,18 +155,18 @@ router }) ) { throw [ - g_lib.ERR_NO_ALLOCATION, + error.ERR_NO_ALLOCATION, "An allocation is required to create a collection.", ]; } } - if (!g_lib.hasAdminPermObject(client, id)) { + if (!permissions.hasAdminPermObject(client, id)) { obj = g_db.c.document(id); - result = g_lib.hasPermissions(client, obj, perms); + result = permissions.hasPermissions(client, obj, perms); } } else { - throw [g_lib.ERR_INVALID_PARAM, "Invalid ID, " + req.queryParams.id]; + throw [error.ERR_INVALID_PARAM, "Invalid ID, " + req.queryParams.id]; } res.send({ @@ -184,36 +186,36 @@ router .get("/perm/get", function (req, res) { try { const client = g_lib.getUserFromClientID(req.queryParams.client); - var result = req.queryParams.perms ? req.queryParams.perms : g_lib.PERM_ALL; + var result = req.queryParams.perms ? req.queryParams.perms : permissions.PERM_ALL; var obj, id = g_lib.resolveID(req.queryParams.id, client), ty = id[0]; - if (id[1] != "/") throw [g_lib.ERR_INVALID_PARAM, "Invalid ID, " + req.queryParams.id]; + if (id[1] != "/") throw [error.ERR_INVALID_PARAM, "Invalid ID, " + req.queryParams.id]; if (ty == "p") { var role = g_lib.getProjectRole(client._id, id); if (role == g_lib.PROJ_NO_ROLE) { // Non members have only VIEW permissions - result &= g_lib.PERM_RD_REC; + result &= permissions.PERM_RD_REC; } else if (role == g_lib.PROJ_MEMBER) { - result &= g_lib.PERM_MEMBER; + result &= permissions.PERM_MEMBER; } else if (role == g_lib.PROJ_MANAGER) { // Managers have all but UPDATE - result &= g_lib.PERM_MANAGER; + result &= permissions.PERM_MANAGER; } } else if (ty == "d") { - if (!g_lib.hasAdminPermObject(client, id)) { + if (!permissions.hasAdminPermObject(client, id)) { obj = g_db.d.document(id); if (obj.locked) result = 0; - else result = g_lib.getPermissions(client, obj, result); + else result = permissions.getPermissions(client, obj, result); } } else if (ty == "c") { - if (!g_lib.hasAdminPermObject(client, id)) { + if (!permissions.hasAdminPermObject(client, id)) { obj = g_db.c.document(id); - result = g_lib.getPermissions(client, obj, result); + result = permissions.getPermissions(client, obj, result); } - } else throw [g_lib.ERR_INVALID_PARAM, "Invalid ID, " + req.queryParams.id]; + } else throw [error.ERR_INVALID_PARAM, "Invalid ID, " + req.queryParams.id]; res.send({ granted: result, diff --git a/core/database/foxx/api/coll_router.js b/core/database/foxx/api/coll_router.js index 98383210f..e3c66b21d 100644 --- a/core/database/foxx/api/coll_router.js +++ b/core/database/foxx/api/coll_router.js @@ -7,6 +7,8 @@ const joi = require("joi"); const g_db = require("@arangodb").db; const g_graph = require("@arangodb/general-graph")._graph("sdmsg"); const g_lib = require("./support"); +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); module.exports = router; @@ -37,17 +39,17 @@ router _from: parent_id, })._to; if (owner_id != client._id) { - if (!g_lib.hasManagerPermProj(client, owner_id)) { + if (!permissions.hasManagerPermProj(client, owner_id)) { var parent_coll = g_db.c.document(parent_id); if ( - !g_lib.hasPermissions( + !permissions.hasPermissions( client, parent_coll, - g_lib.PERM_CREATE, + permissions.PERM_CREATE, ) ) - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } owner = g_db._document(owner_id); } @@ -62,7 +64,7 @@ router }) ) { throw [ - g_lib.ERR_NO_ALLOCATION, + error.ERR_NO_ALLOCATION, "An allocation is required to create a collection.", ]; } @@ -79,7 +81,7 @@ router .next(); if (count >= owner.max_coll) throw [ - g_lib.ERR_ALLOCATION_EXCEEDED, + error.ERR_ALLOCATION_EXCEEDED, "Collection limit reached (" + owner.max_coll + "). Contact system administrator to increase limit.", @@ -232,7 +234,7 @@ router //console.log("coll obj:",obj); - if (!g_lib.hasAdminPermObject(client, coll_id)) { + if (!permissions.hasAdminPermObject(client, coll_id)) { var perms = 0; if ( @@ -240,12 +242,12 @@ router obj.alias !== undefined || obj.desc !== undefined ) - perms |= g_lib.PERM_WR_REC; + perms |= permissions.PERM_WR_REC; - if (obj.topic !== undefined) perms |= g_lib.PERM_SHARE; + if (obj.topic !== undefined) perms |= permissions.PERM_SHARE; - if (!g_lib.hasPermissions(client, coll, perms)) - throw g_lib.ERR_PERM_DENIED; + if (!permissions.hasPermissions(client, coll, perms)) + throw error.ERR_PERM_DENIED; } /* Updating topic and tags is complex because topic parts are added as @@ -422,16 +424,16 @@ router admin = false; if (client) { - admin = g_lib.hasAdminPermObject(client, coll_id); + admin = permissions.hasAdminPermObject(client, coll_id); if (!admin) { - if (!g_lib.hasPermissions(client, coll, g_lib.PERM_RD_REC)) { + if (!permissions.hasPermissions(client, coll, permissions.PERM_RD_REC)) { //console.log("perm denied"); - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } } } else if (!g_lib.hasPublicRead(coll_id)) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } coll.notes = g_lib.getNoteMask(client, coll, admin); @@ -463,14 +465,14 @@ router admin = false; if (client) { - admin = g_lib.hasAdminPermObject(client, coll_id); + admin = permissions.hasAdminPermObject(client, coll_id); if (!admin) { - if (!g_lib.hasPermissions(client, coll, g_lib.PERM_LIST)) - throw g_lib.ERR_PERM_DENIED; + if (!permissions.hasPermissions(client, coll, permissions.PERM_LIST)) + throw error.ERR_PERM_DENIED; } } else if (!g_lib.hasPublicRead(coll_id)) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } var qry = @@ -540,7 +542,7 @@ router if (req.queryParams.add && req.queryParams.remove) { throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Cannot add and remove collection items at the same time.", ]; } @@ -552,13 +554,11 @@ router })._to; var chk_perm = false; - if (!g_lib.hasAdminPermObject(client, coll_id)) { - var req_perm = g_lib.PERM_LINK; - //if ( req.queryParams.remove && req.queryParams.remove.length ) - // req_perm |= g_lib.PERM_SHARE; - if (!g_lib.hasPermissions(client, coll, req_perm, true)) + if (!permissions.hasAdminPermObject(client, coll_id)) { + var req_perm = permissions.PERM_LINK; + if (!permissions.hasPermissions(client, coll, req_perm, true)) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Permission denied - requires LINK on collection.", ]; @@ -595,7 +595,7 @@ router }) ) throw [ - g_lib.ERR_UNLINK, + error.ERR_UNLINK, obj._id + " is not in collection " + coll_id, ]; @@ -603,7 +603,7 @@ router // Check if another instance exists in same scope, if not deny permission if (!g_lib.hasAnyCommonAccessScope(obj._id, coll_id)) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Cannot unlink items owned by other users.", ]; } @@ -641,7 +641,7 @@ router //console.log("coll item count:",cres.count()); if (cres.count() + req.queryParams.add.length > g_lib.MAX_COLL_ITEMS) throw [ - g_lib.ERR_INPUT_TOO_LONG, + error.ERR_INPUT_TOO_LONG, "Collection item limit exceeded (" + g_lib.MAX_COLL_ITEMS + " items)", @@ -659,10 +659,10 @@ router _to: obj._id, }) ) - throw [g_lib.ERR_LINK, obj._id + " already linked to " + coll_id]; + throw [error.ERR_LINK, obj._id + " already linked to " + coll_id]; // Check if item is a root collection - if (obj.is_root) throw [g_lib.ERR_LINK, "Cannot link root collection"]; + if (obj.is_root) throw [error.ERR_LINK, "Cannot link root collection"]; // Check if item has same owner as this collection if ( @@ -671,7 +671,7 @@ router })._to != owner_id ) throw [ - g_lib.ERR_LINK, + error.ERR_LINK, obj._id + " and " + coll_id + " have different owners", ]; @@ -679,7 +679,7 @@ router // TODO check if another instance exists in same scope, if not deny if (!g_lib.hasAnyCommonAccessScope(obj._id, coll_id)) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Cannot link items from other access-control scopes.", ]; } @@ -689,7 +689,7 @@ router // Check for circular dependency if (obj._id == coll_id || g_lib.isSrcParentOfDest(obj._id, coll_id)) throw [ - g_lib.ERR_LINK, + error.ERR_LINK, "Cannot link ancestor, " + obj._id + ", to descendant, " + @@ -742,7 +742,7 @@ router g_lib.MAX_COLL_ITEMS ) throw [ - g_lib.ERR_INPUT_TOO_LONG, + error.ERR_INPUT_TOO_LONG, "Root collection item limit exceeded (" + g_lib.MAX_COLL_ITEMS + " items)", @@ -827,7 +827,7 @@ router if (src.owner != dst.owner) throw [ - g_lib.ERR_LINK, + error.ERR_LINK, req.queryParams.source + " and " + req.queryParams.dest + @@ -838,27 +838,32 @@ router src_perms = 0, dst_perms = 0; - if (!g_lib.hasAdminPermObject(client, src_id)) { - src_perms = g_lib.getPermissions(client, src, g_lib.PERM_LINK, true); - if ((src_perms & g_lib.PERM_LINK) == 0) + if (!permissions.hasAdminPermObject(client, src_id)) { + src_perms = permissions.getPermissions( + client, + src, + permissions.PERM_LINK, + true, + ); + if ((src_perms & permissions.PERM_LINK) == 0) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Permission denied - requires LINK on source collection.", ]; chk_perm = true; } - if (!g_lib.hasAdminPermObject(client, dst_id)) { - dst_perms = g_lib.getPermissions( + if (!permissions.hasAdminPermObject(client, dst_id)) { + dst_perms = permissions.getPermissions( client, dst, - g_lib.PERM_LINK /*| g_lib.PERM_SHARE*/, + permissions.PERM_LINK, true, ); - if ((dst_perms & g_lib.PERM_LINK) == 0) + if ((dst_perms & permissions.PERM_LINK) == 0) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Permission denied - requires LINK on destination collection.", ]; @@ -871,12 +876,12 @@ router // TODO - should aliases be resolved with client or owner ID? item = g_lib.getObject(req.queryParams.items[i], client); - if (item.is_root) throw [g_lib.ERR_LINK, "Cannot link root collection"]; + if (item.is_root) throw [error.ERR_LINK, "Cannot link root collection"]; if (chk_perm && item.creator != client._id /*&& !has_share*/) { if (!g_lib.hasCommonAccessScope(src_id, dst_id)) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Cannot move items across access-control scopes.", ]; } @@ -888,7 +893,7 @@ router _to: item._id, }) ) - throw [g_lib.ERR_UNLINK, item._id + " is not in collection " + src_id]; + throw [error.ERR_UNLINK, item._id + " is not in collection " + src_id]; if ( g_db.item.firstExample({ @@ -897,7 +902,7 @@ router }) ) throw [ - g_lib.ERR_LINK, + error.ERR_LINK, item._id + " is already in collection " + dst_id, ]; @@ -905,7 +910,7 @@ router // Check for circular dependency if (item._id == dst_id || g_lib.isSrcParentOfDest(item._id, dst_id)) throw [ - g_lib.ERR_LINK, + error.ERR_LINK, "Cannot link ancestor, " + item._id + ", to descendant, " + @@ -940,7 +945,7 @@ router if (cres.count() > g_lib.MAX_COLL_ITEMS) throw [ - g_lib.ERR_INPUT_TOO_LONG, + error.ERR_INPUT_TOO_LONG, "Collection item limit exceeded (" + g_lib.MAX_COLL_ITEMS + " items)", ]; @@ -967,7 +972,7 @@ router var item_id = g_lib.resolveID(req.queryParams.id, client); if (!item_id.startsWith("d/") && !item_id.startsWith("c/")) - throw [g_lib.ERR_INVALID_PARAM, "ID is not a collection or record."]; + throw [error.ERR_INVALID_PARAM, "ID is not a collection or record."]; var results = g_lib.getParents(item_id); if (req.queryParams.inclusive) { @@ -1003,13 +1008,7 @@ router var item_id = g_lib.resolveID(req.queryParams.item, client); if (coll_id.charAt(0) != "c") - throw [g_lib.ERR_INVALID_PARAM, "ID is not a collection."]; - - /*if ( !g_lib.hasAdminPermObject( client, coll_id )) { - var coll = g_db.c.document( coll_id ); - if ( !g_lib.hasPermissions( client, coll, g_lib.PERM_LIST )) - throw g_lib.ERR_PERM_DENIED; - }*/ + throw [error.ERR_INVALID_PARAM, "ID is not a collection."]; var qry = "for v in 1..1 outbound @coll item "; if (item_id.charAt(0) == "c") @@ -1029,7 +1028,7 @@ router var idx = ids.indexOf(item_id); if (idx < 0) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Item " + req.queryParams.item + " was not found in collection " + diff --git a/core/database/foxx/api/data_router.js b/core/database/foxx/api/data_router.js index 5ba79ec06..c72f20b05 100644 --- a/core/database/foxx/api/data_router.js +++ b/core/database/foxx/api/data_router.js @@ -5,6 +5,8 @@ const router = createRouter(); const joi = require("joi"); const g_db = require("@arangodb").db; const g_lib = require("./support"); +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); const g_proc = require("./process"); const g_tasks = require("./tasks"); const { UserToken } = require("./lib/user_token"); @@ -26,10 +28,10 @@ function recordCreate(client, record, result) { _from: parent_id, })._to; if (owner_id != client._id) { - if (!g_lib.hasManagerPermProj(client, owner_id)) { + if (!permissions.hasManagerPermProj(client, owner_id)) { var parent_coll = g_db.c.document(parent_id); - if (!g_lib.hasPermissions(client, parent_coll, g_lib.PERM_CREATE)) { - throw g_lib.ERR_PERM_DENIED; + if (!permissions.hasPermissions(client, parent_coll, permissions.PERM_CREATE)) { + throw error.ERR_PERM_DENIED; } } } @@ -49,7 +51,7 @@ function recordCreate(client, record, result) { ); if (cnt_res.next() >= g_lib.MAX_COLL_ITEMS) throw [ - g_lib.ERR_INPUT_TOO_LONG, + error.ERR_INPUT_TOO_LONG, "Parent collection item limit exceeded (" + g_lib.MAX_COLL_ITEMS + " items)", ]; @@ -75,7 +77,7 @@ function recordCreate(client, record, result) { // Verify source path is a full globus path to a file if (obj.source) { if (!g_lib.isFullGlobusPath(obj.source, true, false)) { - throw [g_lib.ERR_INVALID_PARAM, "Source must be a full Globus path to a file."]; + throw [error.ERR_INVALID_PARAM, "Source must be a full Globus path to a file."]; } obj.size = 1048576; // Don't know actual size - doesn't really matter @@ -88,7 +90,7 @@ function recordCreate(client, record, result) { repo_alloc = g_lib.assignRepo(owner_id); } - if (!repo_alloc) throw [g_lib.ERR_NO_ALLOCATION, "No allocation available"]; + if (!repo_alloc) throw [error.ERR_NO_ALLOCATION, "No allocation available"]; // Extension setting only apply to managed data if (record.ext) { @@ -102,7 +104,7 @@ function recordCreate(client, record, result) { if (record.md) { obj.md = JSON.parse(record.md); // parse escaped JSON string TODO: this could be dangerous - if (Array.isArray(obj.md)) throw [g_lib.ERR_INVALID_PARAM, "Metadata cannot be an array"]; + if (Array.isArray(obj.md)) throw [error.ERR_INVALID_PARAM, "Metadata cannot be an array"]; } if (obj.alias) { @@ -127,7 +129,7 @@ function recordCreate(client, record, result) { if (obj.sch_id) { var idx = obj.sch_id.indexOf(":"); if (idx < 0) { - throw [g_lib.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; + throw [error.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; } ((sch_id = obj.sch_id.substr(0, idx)), (sch_ver = parseInt(obj.sch_id.substr(idx + 1)))); var sch = g_db.sch.firstExample({ @@ -135,7 +137,7 @@ function recordCreate(client, record, result) { ver: sch_ver, }); - if (!sch) throw [g_lib.ERR_INVALID_PARAM, "Schema '" + obj.sch_id + "' does not exist"]; + if (!sch) throw [error.ERR_INVALID_PARAM, "Schema '" + obj.sch_id + "' does not exist"]; obj.sch_id = sch._id; g_db._update(sch._id, { @@ -174,7 +176,7 @@ function recordCreate(client, record, result) { _key: alias_key, }) ) - throw [g_lib.ERR_INVALID_PARAM, "Alias, " + alias_key + ", already in use"]; + throw [error.ERR_INVALID_PARAM, "Alias, " + alias_key + ", already in use"]; g_db.a.save({ _key: alias_key, @@ -210,7 +212,7 @@ function recordCreate(client, record, result) { }) ) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Only one dependency can be defined between any two data records.", ]; g_db.dep.save({ @@ -422,11 +424,11 @@ function recordUpdate(client, record, result) { var data_id = g_lib.resolveDataID(record.id, client); var data = g_db.d.document(data_id); - if (!g_lib.hasAdminPermObject(client, data_id)) { + if (!permissions.hasAdminPermObject(client, data_id)) { // Required permissions depend on which fields are being modified: // Metadata = PERM_WR_META, file_size = PERM_WR_DATA, all else = ADMIN var perms = 0; - if (record.md !== undefined) perms |= g_lib.PERM_WR_META; + if (record.md !== undefined) perms |= permissions.PERM_WR_META; if ( record.title !== undefined || @@ -437,10 +439,11 @@ function recordUpdate(client, record, result) { (record.dep_add && record.dep_add.length) || (record.dep_rem && record.dep_rem.length) ) { - perms |= g_lib.PERM_WR_REC; + perms |= permissions.PERM_WR_REC; } - if (data.locked || !g_lib.hasPermissions(client, data, perms)) throw g_lib.ERR_PERM_DENIED; + if (data.locked || !permissions.hasPermissions(client, data, perms)) + throw error.ERR_PERM_DENIED; } var owner_id = g_db.owner.firstExample({ @@ -465,7 +468,7 @@ function recordUpdate(client, record, result) { } else if (record.md) { obj.md = JSON.parse(record.md); if (Array.isArray(obj.md)) { - throw [g_lib.ERR_INVALID_PARAM, "Metadata cannot be an array"]; + throw [error.ERR_INVALID_PARAM, "Metadata cannot be an array"]; } obj.md_err_msg = null; obj.md_err = false; @@ -491,7 +494,7 @@ function recordUpdate(client, record, result) { var idx = obj.sch_id.indexOf(":"); if (idx < 0) { - throw [g_lib.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; + throw [error.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; } var sch_id = obj.sch_id.substr(0, idx), sch_ver = parseInt(obj.sch_id.substr(idx + 1)); @@ -502,7 +505,7 @@ function recordUpdate(client, record, result) { }); if (!sch) { - throw [g_lib.ERR_INVALID_PARAM, "Schema '" + obj.sch_id + "' does not exist"]; + throw [error.ERR_INVALID_PARAM, "Schema '" + obj.sch_id + "' does not exist"]; } obj.sch_id = sch._id; @@ -521,7 +524,7 @@ function recordUpdate(client, record, result) { if (data.external) { if (obj.source) { if (!g_lib.isFullGlobusPath(obj.source, true, false)) { - throw [g_lib.ERR_INVALID_PARAM, "Source must be a full Globus path to a file."]; + throw [error.ERR_INVALID_PARAM, "Source must be a full Globus path to a file."]; } obj.size = 1048576; // Don't know actual size - doesn't really matter @@ -529,7 +532,7 @@ function recordUpdate(client, record, result) { } else { if (obj.source) { throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Raw data source cannot be specified for managed data records.", ]; } @@ -616,7 +619,7 @@ function recordUpdate(client, record, result) { _key: alias_key, }) ) - throw [g_lib.ERR_INVALID_PARAM, "Alias, " + obj.alias + ", already in use"]; + throw [error.ERR_INVALID_PARAM, "Alias, " + obj.alias + ", already in use"]; g_db.a.save({ _key: alias_key, @@ -633,7 +636,7 @@ function recordUpdate(client, record, result) { } if (record.deps != undefined && (record.deps_add != undefined || record.deps_rem != undefined)) - throw [g_lib.ERR_INVALID_PARAM, "Cannot use both dependency set and add/remove."]; + throw [error.ERR_INVALID_PARAM, "Cannot use both dependency set and add/remove."]; var dep, id, @@ -654,7 +657,7 @@ function recordUpdate(client, record, result) { }); if (!dep) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Specified dependency on " + id + " does not exist.", ]; @@ -676,7 +679,7 @@ function recordUpdate(client, record, result) { dep = record.dep_add[i]; id = g_lib.resolveDataID(dep.id, client); if (!id.startsWith("d/")) - throw [g_lib.ERR_INVALID_PARAM, "Dependencies can only be set on data records."]; + throw [error.ERR_INVALID_PARAM, "Dependencies can only be set on data records."]; if ( g_db.dep.firstExample({ @@ -686,7 +689,7 @@ function recordUpdate(client, record, result) { }) ) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Only one dependency of each type may be defined between any two data records.", ]; @@ -980,7 +983,7 @@ router _id: data_id, }) ) - throw [g_lib.ERR_INVALID_PARAM, "Record, " + data_id + ", does not exist."]; + throw [error.ERR_INVALID_PARAM, "Record, " + data_id + ", does not exist."]; // TODO Update schema validation error flag g_db._update( @@ -1105,20 +1108,23 @@ router admin = false; if (client) { - admin = g_lib.hasAdminPermObject(client, data_id); + admin = permissions.hasAdminPermObject(client, data_id); if (!admin) { - var perms = g_lib.getPermissions( + var perms = permissions.getPermissions( client, data, - g_lib.PERM_RD_REC | g_lib.PERM_RD_META, + permissions.PERM_RD_REC | permissions.PERM_RD_META, ); - if (data.locked || (perms & (g_lib.PERM_RD_REC | g_lib.PERM_RD_META)) == 0) - throw g_lib.ERR_PERM_DENIED; - if ((perms & g_lib.PERM_RD_META) == 0) rem_md = true; + if ( + data.locked || + (perms & (permissions.PERM_RD_REC | permissions.PERM_RD_META)) == 0 + ) + throw error.ERR_PERM_DENIED; + if ((perms & permissions.PERM_RD_META) == 0) rem_md = true; } } else if (!g_lib.hasPublicRead(data_id)) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } data.notes = g_lib.getNoteMask(client, data); @@ -1431,9 +1437,9 @@ router for (i in req.queryParams.ids) { obj = g_lib.getObject(req.queryParams.ids[i], client); - if (!g_lib.hasAdminPermObject(client, obj._id)) { - if (!g_lib.hasPermissions(client, obj, g_lib.PERM_LOCK)) - throw g_lib.ERR_PERM_DENIED; + if (!permissions.hasAdminPermObject(client, obj._id)) { + if (!permissions.hasPermissions(client, obj, permissions.PERM_LOCK)) + throw error.ERR_PERM_DENIED; } g_db._update( obj._id, @@ -1476,9 +1482,9 @@ router * @param {object} req - The request object, containing the query parameters. * @param {object} res - The response object, used to send the raw data path or error. * - * @throws {Error} g_lib.ERR_PERM_DENIED - If the client does not have permission to read the data. - * @throws {Error} g_lib.ERR_NO_RAW_DATA - If the raw data is not found. - * @throws {Error} g_lib.ERR_INVALID_PARAM - If the data belongs to a different domain than specified. + * @throws {Error} error.ERR_PERM_DENIED - If the client does not have permission to read the data. + * @throws {Error} error.ERR_NO_RAW_DATA - If the raw data is not found. + * @throws {Error} error.ERR_INVALID_PARAM - If the data belongs to a different domain than specified. * * @returns {void} - Returns the raw data path in the response if the request is successful. */ @@ -1488,21 +1494,21 @@ router const client = g_lib.getUserFromClientID(req.queryParams.client); var data_id = g_lib.resolveDataID(req.queryParams.id, client); - if (!g_lib.hasAdminPermObject(client, data_id)) { + if (!permissions.hasAdminPermObject(client, data_id)) { var data = g_db.d.document(data_id); - var perms = g_lib.getPermissions(client, data, g_lib.PERM_RD_DATA); - if ((perms & g_lib.PERM_RD_DATA) == 0) throw g_lib.ERR_PERM_DENIED; + var perms = permissions.getPermissions(client, data, permissions.PERM_RD_DATA); + if ((perms & permissions.PERM_RD_DATA) == 0) throw error.ERR_PERM_DENIED; } var loc = g_db.loc.firstExample({ _from: data_id, }); - if (!loc) throw g_lib.ERR_NO_RAW_DATA; + if (!loc) throw error.ERR_NO_RAW_DATA; var repo = g_db.repo.document(loc._to); if (repo.domain != req.queryParams.domain) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Can only access data from '" + repo.domain + "' domain", ]; @@ -1530,9 +1536,9 @@ router if (req.queryParams.subject) { owner_id = req.queryParams.subject; if (req.queryParams.subject.startsWith("u/")) { - g_lib.ensureAdminPermUser(client, owner_id); + permissions.ensureAdminPermUser(client, owner_id); } else { - g_lib.ensureManagerPermProj(client, owner_id); + permissions.ensureManagerPermProj(client, owner_id); } } else { owner_id = client._id; @@ -1611,7 +1617,7 @@ router if (!req.body.check && !req.body.path) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Must provide path parameter if not running check.", ]; @@ -1623,7 +1629,7 @@ router }).exists(); if (is_collection && !token_exists) { throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Globus token for mapped collection " + collection_id + " for user " + @@ -1693,13 +1699,13 @@ router if (!req.body.check && !req.body.path) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Must provide path parameter if not running check.", ]; if (req.body.id.length > 1) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Concurrent put of multiple records no supported.", ]; @@ -1711,7 +1717,7 @@ router }).exists(); if (is_collection && !token_exists) { throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Globus token for mapped collection " + collection_id + " for user " + diff --git a/core/database/foxx/api/group_router.js b/core/database/foxx/api/group_router.js index 4d4f53416..f2b0960a6 100644 --- a/core/database/foxx/api/group_router.js +++ b/core/database/foxx/api/group_router.js @@ -3,6 +3,8 @@ const createRouter = require("@arangodb/foxx/router"); const router = createRouter(); const joi = require("joi"); +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); const g_db = require("@arangodb").db; const g_graph = require("@arangodb/general-graph")._graph("sdmsg"); @@ -28,13 +30,13 @@ router if (req.queryParams.proj) { uid = req.queryParams.proj; - g_lib.ensureManagerPermProj(client, uid); + permissions.ensureManagerPermProj(client, uid); } else { uid = client._id; } if (req.queryParams.gid == "members") - throw [g_lib.ERR_PERM_DENIED, "Group ID 'members' is reserved"]; + throw [error.ERR_PERM_DENIED, "Group ID 'members' is reserved"]; var obj = { uid: uid, @@ -50,7 +52,7 @@ router gid: obj.gid, }) ) - throw [g_lib.ERR_IN_USE, "Group ID '" + obj.gid + "' already exists."]; + throw [error.ERR_IN_USE, "Group ID '" + obj.gid + "' already exists."]; var group = g_db.g.save(obj, { returnNew: true, @@ -67,7 +69,7 @@ router for (var i in req.queryParams.members) { mem = req.queryParams.members[i]; if (!g_db._exists(mem)) - throw [g_lib.ERR_NOT_FOUND, "User, " + mem + ", not found"]; + throw [error.ERR_NOT_FOUND, "User, " + mem + ", not found"]; g_db.member.save({ _from: group._id, @@ -122,12 +124,11 @@ router }); if (!group) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Group ID '" + req.queryParams.gid + "' not found", ]; - //g_lib.ensureAdminPermObject( client, group._id ); - g_lib.ensureManagerPermProj(client, uid); + permissions.ensureManagerPermProj(client, uid); } else { group = g_db.g.firstExample({ uid: client._id, @@ -135,7 +136,7 @@ router }); if (!group) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Group ID '" + req.queryParams.gid + "' not found", ]; } @@ -161,7 +162,7 @@ router mem = req.queryParams.add[i]; if (!g_db._exists(mem)) - throw [g_lib.ERR_NOT_FOUND, "User, " + mem + ", not found"]; + throw [error.ERR_NOT_FOUND, "User, " + mem + ", not found"]; if ( !g_db.member.firstExample({ @@ -247,15 +248,14 @@ router }); if (!group) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Group ID '" + req.queryParams.gid + "' not found", ]; - //g_lib.ensureAdminPermObject( client, group._id ); - g_lib.ensureManagerPermProj(client, uid); + permissions.ensureManagerPermProj(client, uid); // Make sure special members project is protected - if (group.gid == "members") throw g_lib.ERR_PERM_DENIED; + if (group.gid == "members") throw error.ERR_PERM_DENIED; } else { group = g_db.g.firstExample({ uid: client._id, @@ -263,7 +263,7 @@ router }); if (!group) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Group, " + req.queryParams.gid + ", not found", ]; } @@ -290,7 +290,7 @@ router if (req.queryParams.proj) { owner_id = req.queryParams.proj; if (g_lib.getProjectRole(client._id, owner_id) == g_lib.PROJ_NO_ROLE) - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } else { owner_id = client._id; } @@ -327,17 +327,17 @@ router gid: req.queryParams.gid, }); if (!group) - throw [g_lib.ERR_NOT_FOUND, "Group ID '" + req.queryParams.gid + "' not found"]; + throw [error.ERR_NOT_FOUND, "Group ID '" + req.queryParams.gid + "' not found"]; if (g_lib.getProjectRole(client._id, uid) == g_lib.PROJ_NO_ROLE) - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } else { group = g_db.g.firstExample({ uid: client._id, gid: req.queryParams.gid, }); if (!group) - throw [g_lib.ERR_NOT_FOUND, "Group ID '" + req.queryParams.gid + "' not found"]; + throw [error.ERR_NOT_FOUND, "Group ID '" + req.queryParams.gid + "' not found"]; } var result = { diff --git a/core/database/foxx/api/lib/error_codes.js b/core/database/foxx/api/lib/error_codes.js new file mode 100644 index 000000000..a93f22067 --- /dev/null +++ b/core/database/foxx/api/lib/error_codes.js @@ -0,0 +1,42 @@ +"use strict"; + +module.exports = (function () { + var obj = {}; + obj.ERR_INFO = []; + obj.ERR_COUNT = 0; + + obj.ERR_AUTHN_FAILED = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Authentication Failed"]); + obj.ERR_PERM_DENIED = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Permission Denied"]); + obj.ERR_INVALID_PARAM = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Invalid Parameter"]); + obj.ERR_INPUT_TOO_LONG = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Input value too long"]); + obj.ERR_INVALID_CHAR = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Invalid character"]); + obj.ERR_NOT_FOUND = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Record Not Found"]); + obj.ERR_IN_USE = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Value In Use"]); + obj.ERR_LINK = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Collection Link Error"]); + obj.ERR_UNLINK = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Collection Unlink Error"]); + obj.ERR_MISSING_REQ_PARAM = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Missing one or more required parameters"]); + obj.ERR_NO_RAW_DATA = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Record has no raw data"]); + obj.ERR_XFR_CONFLICT = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Data transfer conflict"]); + obj.ERR_INTERNAL_FAULT = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Internal server fault"]); + obj.ERR_NO_ALLOCATION = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "No allocation available"]); + obj.ERR_ALLOCATION_EXCEEDED = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Storage allocation exceeded"]); + obj.ERR_INVALID_OPERATION = obj.ERR_COUNT++; + obj.ERR_INFO.push([400, "Invalid operation"]); + + return obj; +})(); diff --git a/core/database/foxx/api/lib/execution_types.js b/core/database/foxx/api/lib/execution_types.js new file mode 100644 index 000000000..db0b33c84 --- /dev/null +++ b/core/database/foxx/api/lib/execution_types.js @@ -0,0 +1,15 @@ +"use strict"; + +/** + * Execution methods + * A enum-like constant representing different execution strategies + * @type {Readonly<{DEFERRED: string, DIRECT: string}>} + */ +const ExecutionMethod = Object.freeze({ + DEFERRED: "deferred", + DIRECT: "direct", +}); + +module.exports = { + ExecutionMethod, +}; diff --git a/core/database/foxx/api/lib/logger.js b/core/database/foxx/api/lib/logger.js new file mode 100644 index 000000000..c410a8040 --- /dev/null +++ b/core/database/foxx/api/lib/logger.js @@ -0,0 +1,114 @@ +"use strict"; +const g_lib = require("../support"); + +function logRequestSuccess({ + client, + correlationId, + httpVerb, + routePath, + status, + description, + extra, +}) { + // helper to pad fields + const pad = (label, value, length = 20) => + `${label}: ${value || "unknown"}`.padEnd(length, " "); + + console.info( + pad("Client", client) + + " | " + + pad("Correlation_ID", correlationId) + + " | " + + pad("HTTP", httpVerb) + + " | " + + pad("Route", routePath) + + " | " + + pad("Status", status) + + " | " + + pad("Desc", description) + + " | " + + pad("Extra", typeof extra === "object" ? JSON.stringify(extra) : extra), + ); +} + +function logRequestFailure({ + client, + correlationId, + httpVerb, + routePath, + status, + description, + extra, + error, +}) { + // helper to pad fields + const pad = (label, value, length = 20) => + `${label}: ${value || "unknown"}`.padEnd(length, " "); + //PUT IF STATEMENT + if (g_lib.isInteger(error) || Array.isArray(error)) { + console.error( + pad("Client", client) + + " | " + + pad("Correlation_ID", correlationId) + + " | " + + pad("HTTP", httpVerb) + + " | " + + pad("Route", routePath) + + " | " + + pad("Status", status) + + " | " + + pad("Desc", description) + + " | " + + pad("Extra", typeof extra === "object" ? JSON.stringify(extra) : extra) + + " | " + + pad("Error", error), + ); + } else { + console.error( + pad("Client", client) + + " | " + + pad("Correlation_ID", correlationId) + + " | " + + pad("HTTP", httpVerb) + + " | " + + pad("Route", routePath) + + " | " + + pad("Status", status) + + " | " + + pad("Desc", description) + + " | " + + pad("Extra", typeof extra === "object" ? JSON.stringify(extra) : extra) + + " | " + + pad("Error", error.message) + + " | " + + pad("Stack", error.stack), + ); + } +} + +function logRequestStarted({ client, correlationId, httpVerb, routePath, status, description }) { + // helper to pad fields + const pad = (label, value, length = 20) => + `${label}: ${value || "unknown"}`.padEnd(length, " "); + + console.info( + pad("Client", client) + + " | " + + pad("Correlation_ID", correlationId) + + " | " + + pad("HTTP", httpVerb) + + " | " + + pad("Route", routePath) + + " | " + + pad("Status", status) + + " | " + + pad("Desc", description), + ); +} + +// Export the functions +module.exports = { + logRequestSuccess, + logRequestFailure, + logRequestStarted, +}; diff --git a/core/database/foxx/api/lib/permissions.js b/core/database/foxx/api/lib/permissions.js new file mode 100644 index 000000000..285bcf5db --- /dev/null +++ b/core/database/foxx/api/lib/permissions.js @@ -0,0 +1,653 @@ +"use strict"; + +const error = require("./error_codes"); + +module.exports = (function () { + var obj = {}; + + obj.db = require("@arangodb").db; + + obj.PERM_RD_REC = 0x0001; // Read record info (description, keywords, details) + obj.PERM_RD_META = 0x0002; // Read structured metadata + obj.PERM_RD_DATA = 0x0004; // Read raw data + obj.PERM_WR_REC = 0x0008; // Write record info (description, keywords, details) + obj.PERM_WR_META = 0x0010; // Write structured metadata + obj.PERM_WR_DATA = 0x0020; // Write raw data + obj.PERM_LIST = 0x0040; // Find record and view ID, alias, title, and owner + obj.PERM_LINK = 0x0080; // Link/unlink child records (collections only) + obj.PERM_CREATE = 0x0100; // Create new child records (collections only) + obj.PERM_DELETE = 0x0200; // Delete record + obj.PERM_SHARE = 0x0400; // View/set ACLs + obj.PERM_LOCK = 0x0800; // Lock record + obj.PERM_LABEL = 0x1000; // Label record + obj.PERM_TAG = 0x2000; // Tag record + obj.PERM_ANNOTATE = 0x4000; // Annotate record + + obj.PERM_NONE = 0x0000; + obj.PERM_RD_ALL = 0x0007; // Read all + obj.PERM_WR_ALL = 0x0038; // Write all + obj.PERM_ALL = 0x7fff; + obj.PERM_MEMBER = 0x0047; // Project record perms + obj.PERM_MANAGER = 0x0407; // Project record perms + obj.PERM_PUBLIC = 0x0047; + + obj.hasAdminPermUser = function (a_client, a_user_id) { + if (a_client._id != a_user_id && !a_client?.is_admin) { + return false; + } else { + return true; + } + }; + + obj.hasAdminPermProj = function (a_client, a_proj_id) { + if ( + !a_client?.is_admin && + !obj.db.owner.firstExample({ + _from: a_proj_id, + _to: a_client._id, + }) + ) { + return false; + } else { + return true; + } + }; + + obj.hasManagerPermProj = function (a_client, a_proj_id) { + if ( + !a_client?.is_admin && + !obj.db.owner.firstExample({ + _from: a_proj_id, + _to: a_client._id, + }) && + !obj.db.admin.firstExample({ + _from: a_proj_id, + _to: a_client._id, + }) + ) { + return false; + } else { + return true; + } + }; + + obj.hasAdminPermObjectLoaded = function (a_client, a_object) { + // TODO Should collection creator have admin rights? + if ( + a_object.owner == a_client._id || + a_object.creator == a_client._id || + a_client?.is_admin + ) + return true; + + if (a_object.owner.charAt(0) == "p") { + if ( + obj.db.owner.firstExample({ + _from: a_object.owner, + _to: a_client._id, + }) + ) + return true; + + if ( + obj.db.admin.firstExample({ + _from: a_object.owner, + _to: a_client._id, + }) + ) + return true; + } + + return false; + }; + + /** + * checks to make sure the client has admin permissions on an object + * + * @param {object} a_client - this is a user document i.e. + * + * + * "_key" : "bob", + * "_id" : "u/bob", + * "name" : "bob junior ", + * "name_first" : "bob", + * "name_last" : "jones", + * "is_admin" : true, + * "max_coll" : 50, + * "max_proj" : 10, + * "max_sav_qry" : 20, + * : + * "email" : "bobjones@gmail.com" + * + * @param {string} a_object_id - the identity of a record or collection or project + * + * "d/fdakjfla" + * "p/big_thing" + * "c/my_collection" + * + * @returns {boolean} - if client has admin rights on the object. + **/ + obj.hasAdminPermObject = function (a_client, a_object_id) { + if (a_client?.is_admin) return true; + + var first_owner = obj.db.owner.firstExample({ + _from: a_object_id, + }); + if (first_owner !== null) { + var owner_id = first_owner._to; // obj.db.owner.firstExample({ _from: a_object_id })._to; + } else { + throw [error.ERR_NOT_FOUND, "Data record for owner not found " + a_object_id + "."]; + } + if (owner_id == a_client._id) return true; + + if (owner_id[0] == "p") { + // Object owned by a project + if ( + obj.db.admin.firstExample({ + _from: owner_id, + _to: a_client._id, + }) + ) + return true; + + if ( + obj.db.owner.firstExample({ + _from: owner_id, + _to: a_client._id, + }) + ) + return true; + } + + if (a_object_id[0] == "d") { + var data = obj.db._query("for i in d filter i._id == @id return i.creator", { + id: a_object_id, + }); + if (!data.hasNext()) { + throw [error.ERR_NOT_FOUND, "Data record " + a_object_id + " not found."]; + } + data = data.next(); + if (a_client._id == data) return true; + } + return false; + }; + + obj.hasAdminPermRepo = function (a_client, a_repo_id) { + if ( + !a_client?.is_admin && + !obj.db.admin.firstExample({ + _from: a_repo_id, + _to: a_client._id, + }) + ) { + return false; + } else { + return true; + } + }; + + obj.ensureAdminPermUser = function (a_client, a_user_id) { + if (!obj.hasAdminPermUser(a_client, a_user_id)) throw error.ERR_PERM_DENIED; + }; + + obj.ensureAdminPermProj = function (a_client, a_user_id) { + if (!obj.hasAdminPermProj(a_client, a_user_id)) throw error.ERR_PERM_DENIED; + }; + + obj.ensureManagerPermProj = function (a_client, a_user_id) { + if (!obj.hasManagerPermProj(a_client, a_user_id)) throw error.ERR_PERM_DENIED; + }; + + obj.ensureAdminPermObject = function (a_client, a_object_id) { + if (!obj.hasAdminPermObject(a_client, a_object_id)) throw error.ERR_PERM_DENIED; + }; + + obj.ensureAdminPermRepo = function (a_client, a_repo_id) { + if (!obj.hasAdminPermRepo(a_client, a_repo_id)) + throw [ + error.ERR_PERM_DENIED, + "Client, '" + + a_client?._id + + "', does not have administrative repository permissions on " + + a_repo_id, + ]; + }; + + /* Test if client has requested permission(s) for specified object. Note: this call does NOT check for + * ownership or admin privilege - the hasAdminPermObject function performs these checks and should be + * called first if needed. This function is typically used when filtering a list of objects that are + * known not to be owned by the client (and that the client is not an admin). In this case, those checks + * would add performance cost for no benefit. + */ + obj.hasPermissions = function ( + a_client, + a_object, + a_req_perm, + a_inherited = false, + any = false, + ) { + var perm_found = 0, + acl, + acls, + result, + i; + + // If object is marked "public", everyone is granted VIEW, and READ permissions + // The current implementation allows users to be denied access to public data (maybe wrong?) + + if (a_object.topic) { + perm_found = obj.PERM_PUBLIC; + + result = obj.evalPermissions(a_req_perm, perm_found, any); + if (result != null) return result; + } + + // Evaluate user permissions set directly on object + if (a_object.acls & 1) { + acls = obj.db + ._query("for v, e in 1..1 outbound @object acl filter v._id == @client return e", { + object: a_object._id, + client: a_client._id, + }) + .toArray(); + + if (acls.length) { + for (i in acls) { + acl = acls[i]; + perm_found |= acl.grant; + if (a_inherited && acl.inhgrant) perm_found |= acl.inhgrant; + } + + result = obj.evalPermissions(a_req_perm, perm_found, any); + if (result != null) return result; + } + } + + // Evaluate group permissions on object + if (a_object.acls & 2) { + acls = obj.db + ._query( + "for v, e, p in 2..2 outbound @object acl, outbound member filter p.vertices[2]._id == @client return p.edges[0]", + { + object: a_object._id, + client: a_client._id, + }, + ) + .toArray(); + if (acls.length) { + for (i in acls) { + acl = acls[i]; + perm_found |= acl.grant; + if (a_inherited && acl.inhgrant) perm_found |= acl.inhgrant; + } + + result = obj.evalPermissions(a_req_perm, perm_found, any); + if (result != null) return result; + } + } + + // If not all requested permissions have been found, evaluate permissions inherited from parent collections + // Note that items can only be linked to containers that share the same owner + // This evaluation is implemented as a manually guided breadth-first search + + var children = [a_object]; + var parents, parent; + + for (;;) { + // Find all parent collections owned by object owner + + parents = obj.db + ._query( + "for i in @children for v in 1..1 inbound i item return {_id:v._id,topic:v.topic,acls:v.acls}", + { + children: children, + }, + ) + .toArray(); + + if (parents.length == 0) break; + + for (i in parents) { + parent = parents[i]; + + if (parent.topic) { + perm_found |= obj.PERM_PUBLIC; + + result = obj.evalPermissions(a_req_perm, perm_found, any); + if (result != null) return result; + } + + // User ACL first + if (parent.acls && (parent.acls & 1) !== 0) { + acls = obj.db + ._query( + "for v, e in 1..1 outbound @object acl filter v._id == @client return e", + { + object: parent._id, + client: a_client._id, + }, + ) + .toArray(); + if (acls.length) { + for (i in acls) { + acl = acls[i]; + perm_found |= acl.inhgrant; + } + + result = obj.evalPermissions(a_req_perm, perm_found, any); + if (result != null) return result; + } + } + + // Group ACL next + if (parent.acls && (parent.acls & 2) !== 0) { + acls = obj.db + ._query( + "for v, e, p in 2..2 outbound @object acl, outbound member filter is_same_collection('g',p.vertices[1]) and p.vertices[2]._id == @client return p.edges[0]", + { + object: parent._id, + client: a_client._id, + }, + ) + .toArray(); + if (acls.length) { + for (i in acls) { + acl = acls[i]; + perm_found |= acl.inhgrant; + } + + result = obj.evalPermissions(a_req_perm, perm_found, any); + if (result != null) return result; + } + } + } + + // If there are still missing require permissions... + // Determine which parents are candidates for further evaluation (have req bits not set in inherited permissions) + children = parents; + } + + return false; + }; + + obj.evalPermissions = function (a_req_perm, a_perm_found, any) { + if (any) { + // If any requested permission have been found, return true (granted) + if (a_perm_found & a_req_perm) return true; + else return null; // Else, keep looking + } else { + // If not all requested permissions have been found return NULL (keep looking) + if ((a_perm_found & a_req_perm) != a_req_perm) return null; + else return true; // Else, permission granted + } + }; + + obj.getPermissions = function (a_client, a_object, a_req_perm, a_inherited = false) { + var perm_found = 0, + acl, + acls, + i; + + // If object has a topic (collections only), everyone is granted VIEW, and READ permissions + // The current implementation allows users to be denied access to public data (maybe wrong?) + + if (a_object.topic) { + perm_found = obj.PERM_PUBLIC; + + if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; + } + + // Evaluate permissions set directly on object + + if (a_object.acls && (a_object.acls & 1) !== 0) { + acls = obj.db + ._query("for v, e in 1..1 outbound @object acl filter v._id == @client return e", { + object: a_object._id, + client: a_client._id, + }) + .toArray(); + + if (acls.length) { + for (i in acls) { + acl = acls[i]; + perm_found |= acl.grant; + if (a_inherited && acl.inhgrant) perm_found |= acl.inhgrant; + } + + if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; + } + } + + // Evaluate group permissions on object + + if (a_object.acls && (a_object.acls & 2) !== 0) { + acls = obj.db + ._query( + "for v, e, p in 2..2 outbound @object acl, outbound member filter p.vertices[2]._id == @client return p.edges[0]", + { + object: a_object._id, + client: a_client._id, + }, + ) + .toArray(); + + if (acls.length) { + for (i in acls) { + acl = acls[i]; + perm_found |= acl.grant; + if (a_inherited && acl.inhgrant) perm_found |= acl.inhgrant; + } + + if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; + } + } + + // If not all requested permissions have been found, evaluate permissions inherited from parent collections + // Note that items can only be linked to containers that share the same owner + + var children = [a_object]; + var parents, parent; + + for (;;) { + // Find all parent collections owned by object owner + + parents = obj.db + ._query( + "for i in @children for v in 1..1 inbound i item return {_id:v._id,topic:v.topic,acls:v.acls}", + { + children: children, + }, + ) + .toArray(); + + if (parents.length == 0) break; + + for (i in parents) { + parent = parents[i]; + + if (parent.topic) { + perm_found |= obj.PERM_PUBLIC; + + if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; + } + + // User ACL + if (parent.acls && (parent.acls & 1) != 0) { + acls = obj.db + ._query( + "for v, e in 1..1 outbound @object acl filter v._id == @client return e", + { + object: parent._id, + client: a_client._id, + }, + ) + .toArray(); + if (acls.length) { + for (i in acls) { + acl = acls[i]; + perm_found |= acl.inhgrant; + } + + if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; + } + } + + // Group ACL + if (parent.acls && (parent.acls & 2) != 0) { + acls = obj.db + ._query( + "for v, e, p in 2..2 outbound @object acl, outbound member filter is_same_collection('g',p.vertices[1]) and p.vertices[2]._id == @client return p.edges[0]", + { + object: parent._id, + client: a_client._id, + }, + ) + .toArray(); + if (acls.length) { + for (i in acls) { + acl = acls[i]; + perm_found |= acl.inhgrant; + } + + if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; + } + } + } + + // If there are still missing require permissions... + // Determine which parents are candidates for further evaluation (have req bits not set in inherited permissions) + children = parents; + } + + return perm_found & a_req_perm; + }; + + obj.getPermissionsLocal = function (a_client_id, a_object, a_get_inherited, a_req_perm) { + var perm = { + grant: 0, + inhgrant: 0, + inherited: 0, + }, + acl, + acls, + i; + + if (a_object.topic) { + perm.grant |= obj.PERM_PUBLIC; + perm.inhgrant |= obj.PERM_PUBLIC; + } + + if (a_object.acls & 1) { + acls = obj.db + ._query("for v, e in 1..1 outbound @object acl filter v._id == @client return e", { + object: a_object._id, + client: a_client_id, + }) + .toArray(); + + for (i in acls) { + acl = acls[i]; + perm.grant |= acl.grant; + perm.inhgrant |= acl.inhgrant; + } + } + + // Evaluate group permissions on object + if (a_object.acls & 2) { + acls = obj.db + ._query( + "for v, e, p in 2..2 outbound @object acl, outbound member filter p.vertices[2]._id == @client return p.edges[0]", + { + object: a_object._id, + client: a_client_id, + }, + ) + .toArray(); + for (i in acls) { + acl = acls[i]; + perm.grant |= acl.grant; + perm.inhgrant |= acl.inhgrant; + } + } + + if (a_get_inherited) { + var children = [a_object]; + var parents, parent; + + for (;;) { + // Find all parent collections owned by object owner + + parents = obj.db + ._query( + "for i in @children for v in 1..1 inbound i item return {_id:v._id,topic:v.topic,acls:v.acls}", + { + children: children, + }, + ) + .toArray(); + + if (parents.length == 0) break; + + for (i in parents) { + parent = parents[i]; + + if (parent.topic) { + perm.inherited |= obj.PERM_PUBLIC; + + if ((a_req_perm & perm.inherited) == a_req_perm) break; + } + + // User ACL + if (parent.acls && (parent.acls & 1) != 0) { + acls = obj.db + ._query( + "for v, e in 1..1 outbound @object acl filter v._id == @client return e", + { + object: parent._id, + client: a_client_id, + }, + ) + .toArray(); + if (acls.length) { + for (i in acls) { + acl = acls[i]; + perm.inherited |= acl.inhgrant; + } + + if ((a_req_perm & perm.inherited) == a_req_perm) break; + } + } + + // Group ACL + if (parent.acls && (parent.acls & 2) != 0) { + acls = obj.db + ._query( + "for v, e, p in 2..2 outbound @object acl, outbound member filter is_same_collection('g',p.vertices[1]) and p.vertices[2]._id == @client return p.edges[0]", + { + object: parent._id, + client: a_client_id, + }, + ) + .toArray(); + if (acls.length) { + for (i in acls) { + acl = acls[i]; + perm.inherited |= acl.inhgrant; + } + + if ((a_req_perm & perm.inherited) == a_req_perm) break; + } + } + } + + // If there are still missing require permissions... + // Determine which parents are candidates for further evaluation (have req bits not set in inherited permissions) + children = parents; + } + } + + return perm; + }; + + return obj; +})(); diff --git a/core/database/foxx/api/lib/result.js b/core/database/foxx/api/lib/result.js new file mode 100644 index 000000000..7139423c9 --- /dev/null +++ b/core/database/foxx/api/lib/result.js @@ -0,0 +1,25 @@ +/** + * Result type for Rust-like error handling + * Rust's Result type is used for recoverable errors + * This pattern makes error handling explicit and composable + */ +const Result = { + ok: (value) => ({ + ok: true, + value, + raiseIfError() { + return this.value; + }, + }), + err: (error) => ({ + ok: false, + error, + raiseIfError() { + throw [this.error.code, this.error.message]; + }, + }), +}; + +module.exports = { + Result, +}; diff --git a/core/database/foxx/api/lib/user_token.js b/core/database/foxx/api/lib/user_token.js index a3f66bfda..71ea0cebe 100644 --- a/core/database/foxx/api/lib/user_token.js +++ b/core/database/foxx/api/lib/user_token.js @@ -1,6 +1,7 @@ "use strict"; const g_lib = require("../support.js"); +const error = require("./error_codes"); const { UserModel } = require("../models/user"); const { GlobusCollectionModel } = require("../models/globus_collection"); const { GlobusTokenModel } = require("../models/globus_token"); @@ -28,7 +29,7 @@ class UserToken { const { user_key, user_id, globus_collection_id } = kwargs; this.#user_model = new UserModel(user_id, user_key); if (!this.#user_model.exists()) { - throw [g_lib.ERR_NOT_FOUND, "Specified user does not exist: " + kwargs]; + throw [error.ERR_NOT_FOUND, "Specified user does not exist: " + kwargs]; } this.user = this.#user_model.get(); if (typeof globus_collection_id !== "undefined") { @@ -92,7 +93,7 @@ class UserToken { collection_token = true; } else { throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "/token/get Requires 'collection_id' and 'collection_type' both if one is present, received " + "collection_id: " + collection_id + diff --git a/core/database/foxx/api/models/globus_collection.js b/core/database/foxx/api/models/globus_collection.js index 2b3355132..14aa72c51 100644 --- a/core/database/foxx/api/models/globus_collection.js +++ b/core/database/foxx/api/models/globus_collection.js @@ -1,6 +1,7 @@ "use strict"; const support = require("../support"); +const error = require("../lib/error_codes"); const database = require("@arangodb").db; const globus_collection_collection = database.globus_coll; @@ -32,7 +33,7 @@ class GlobusCollectionModel { */ constructor(globus_collection_id) { if (!globus_collection_id) { - throw [support.ERR_MISSING_REQ_PARAM, "A Globus Collection ID must be provided"]; + throw [error.ERR_MISSING_REQ_PARAM, "A Globus Collection ID must be provided"]; } this.#globus_collection_uuid = globus_collection_id; } diff --git a/core/database/foxx/api/models/repositories/base_repository.js b/core/database/foxx/api/models/repositories/base_repository.js new file mode 100644 index 000000000..f11878805 --- /dev/null +++ b/core/database/foxx/api/models/repositories/base_repository.js @@ -0,0 +1,228 @@ +"use strict"; + +const { RepositoryType } = require("./types"); +const g_db = require("@arangodb").db; +const { validateNonEmptyString } = require("./validation"); +const error = require("../../lib/error_codes"); +const { Result } = require("../../lib/result"); + +const createRepositoryData = ({ + key, + type, + title, + desc, + capacity, + // Type-specific fields handled through composition + typeSpecific = {}, +}) => ({ + key: key, + id: `repo/${key}`, + type, + title, + desc, + capacity, + ...typeSpecific, +}); + +// WARNING - this will completely replace arrays +function deepMerge(target, source) { + for (const key of Object.keys(source)) { + if (source[key] && typeof source[key] === "object" && !Array.isArray(source[key])) { + if (!target[key] || typeof target[key] !== "object") { + target[key] = {}; + } + deepMerge(target[key], source[key]); + } else { + target[key] = source[key]; + } + } + return target; +} + +class BaseRepository { + constructor(config, typeSpecificConfig) { + if (new.target === BaseRepository) { + return Result.err({ + code: error.ERR_INTERNAL_FAULT, + message: "BaseRepository cannot be instantiated directly", + }); + } + + let new_repo_data = createRepositoryData({ + key: config.key, + type: config.type, + title: config.title, + desc: config.desc, + capacity: config.capacity, + typeSpecific: typeSpecificConfig, + }); + + let id_defined = false; + if (config.id !== undefined) { + if (config.id.startsWith("repo/") && config.id.length > "repo/".length) { + id_defined = true; + } + } + + let key_defined = false; + if (config.key !== undefined) { + key_defined = true; + } + + if (key_defined && id_defined) { + if (config.id !== `repo/${config.key}`) { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: `BaseRepository - provided key ${config.key} is in conflict with id ${config.id}.`, + }); + } + } + + if (key_defined) { + config.id = `repo/${config.key}`; + } else if (id_defined) { + config.key = config.id.slice("repo/".length); + } + + // If we have a key we assume the repo exists + if (config.key != undefined) { + try { + if (g_db._exists(config.id)) { + const existingDoc = g_db.repo.document(config.key); + const { _id, _key, _rev, ...temp } = existingDoc; + console.log("existingData found"); + console.log(existingDoc); + this.repoData = { + id: existingDoc._id, + key: existingDoc._key, + ...temp, + }; + this.repoData = deepMerge(this.repoData, new_repo_data); + } else { + this.repoData = new_repo_data; + } + console.log("Repo data after deepMerge"); + console.log(this.repoData); + } catch { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: `BaseRepository - unable to create repository instance ${config.key}.`, + }); + } + } else { + this.repoData = { ...new_repo_data, id: `repo/${config.key}`, key: config.key }; + + console.log("Repo data, now that we know document doesn't exist"); + console.log(this.repoData); + } + + return Result.ok(this); + } + + id() { + return this.repoData.id; + } + + // Validate repository configuration + static validate(config) { + return Result.err({ + code: error.ERR_INVALID_OPERATION, + message: `BaseRepository - unimplemented validation method called.`, + }); + } + + // Create allocation for repository + createAllocation(allocationParams) { + return Result.err({ + code: error.ERR_INVALID_OPERATION, + message: `BaseRepository - unimplemented createAllocation method called.`, + }); + } + + // Delete allocation from repository + deleteAllocation(subjectId) { + return Result.ok(this.repoData.capacity); + } + + // Check if repository supports data operations + supportsDataOperations() { + return Result.err({ + code: error.ERR_INVALID_OPERATION, + message: `BaseRepository - unimplemented supportsDataOperations method called.`, + }); + } + + // Return repository type + type() { + return Result.err({ + code: error.ERR_INTERNAL_FAULT, + message: `BaseRepository - unimplemented type method called.`, + }); + } + + capacity() { + return this.repoData?.capacity; + } + + // Save repository to database + save() { + try { + const { id, key, ...repo_data } = this.repoData; + if (key != undefined) { + if (g_db._exists(id)) { + const updated = g_db.repo.update( + { _key: key, ...repo_data }, + { returnNew: true }, + ); + const { _id, _key, _rev, ...updated_repo_data } = updated.new; + this.repoData = { id: _id, key: _key, ...updated_repo_data }; + return Result.ok(updated.new); + } else { + const saved = g_db.repo.save({ _key: key, ...repo_data }, { returnNew: true }); + const { _id, _key, _rev, ...saved_repo_data } = saved.new; + this.repoData = { id: _id, key: _key, ...saved_repo_data }; + return Result.ok(saved.new); + } + } else { + const saved = g_db.repo.save(repo_data, { returnNew: true }); + const { _id, _key, _rev, ...saved_repo_data } = saved.new; + this.repoData = { id: _id, key: _key, ...saved_repo_data }; + return Result.ok(saved.new); + } + } catch (e) { + return Result.err({ + code: e.errorNum || 500, + message: e.errorMessage || "Failed to save repository", + }); + } + } + + // Update repository in database + update(updates) { + try { + // Lazy migration: ensure type field exists when updating + // If the repository doesn't have a type, add it based on current state + if (!this.repoData.type && !updates.type) { + updates.type = this.repoData.type || RepositoryType.GLOBUS; + } + + if (g_db._exists(this.repoData.id)) { + const updated = g_db.repo.update(this.repoData.key, updates, { returnNew: true }); + const { _id, _key, _rev, ...updated_repo_data } = updated.new; + this.repoData = { id: _id, key: _key, ...updated_repo_data }; + return Result.ok(updated.new); + } + return Result.err({ + code: error.ERR_INTERNAL_FAULT, + message: `Failed to update repository, repository document was not found (${this.repoData.id})`, + }); + } catch (e) { + return Result.err({ + code: e.errorNum || 500, + message: e.errorMessage || "Failed to update repository", + }); + } + } +} + +module.exports = { BaseRepository }; diff --git a/core/database/foxx/api/models/repositories/repositories.js b/core/database/foxx/api/models/repositories/repositories.js new file mode 100644 index 000000000..fd9bd6800 --- /dev/null +++ b/core/database/foxx/api/models/repositories/repositories.js @@ -0,0 +1,130 @@ +"use strict"; + +const { + RepositoryType, + createRepository, + createRepositoryData, + createGlobusConfig, +} = require("./types"); +const { GlobusRepo } = require("./repository/globus"); +const { MetadataRepo } = require("./repository/metadata"); +const error = require("../../lib/error_codes"); +const { Result } = require("../../lib/result"); + +/** + * Create repository based on type + * + * @param {object} config - Repository configuration object + * @param {string} config.id - Repository ID + * @param {string} config.type - Repository type (from RepositoryType enum) + * @param {string} config.title - Repository title + * @param {string} config.desc - Repository description + * @param {number} config.capacity - Storage capacity in bytes + * @param {string} config.endpoint - Globus endpoint (required for GLOBUS type) + * @param {string} config.path - File path (required for GLOBUS type) + * @param {string} config.pub_key - Public key for ZeroMQ CURVE authentication (required for GLOBUS type) + * @param {string} config.address - Network address (required for GLOBUS type) + * @param {string} config.exp_path - Export path (optional for GLOBUS type) + * @returns {{ok: boolean, error: *}|{ok: boolean, value: *}} Result object containing repository or error + */ +class Repositories { + static createRepositoryByType = (config) => { + const missingFields = []; + if (!("type" in config)) missingFields.push("type"); + if (!("title" in config)) missingFields.push("title"); + if (!("capacity" in config)) missingFields.push("capacity"); + + if (missingFields.length > 0) { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: `Missing required repository fields: ${missingFields.join(", ")}`, + }); + } + /** + * Type-based creation using switch (Rust match pattern) + */ + switch (config.type) { + case RepositoryType.GLOBUS: { + return new GlobusRepo(config); + } + + case RepositoryType.METADATA: { + return new MetadataRepo(config); + } + + default: + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: `Unknown repository type: ${config.type}`, + }); + } + }; + + /** + * Find repository by ID + * This is an associated function (doesn't take self) + * @param {string} repoId - Repository ID (with or without "repo/" prefix) + * @returns {{ok: boolean, error?: *, value?: *}} Result containing repository or error + */ + static find(repoId) { + try { + const key = repoId.startsWith("repo/") ? repoId.slice(5) : repoId; + const repo = g_db.repo.document(key); + + // Default to GLOBUS type if missing (backward compatibility) + // This handles legacy repositories that don't have a type field + repo.type ??= RepositoryType.GLOBUS; + + // Return as tagged union based on type + return Result.ok({ + id: repo._id, // Add id at top level for easy access + type: repo.type, + data: repo, + }); + } catch (e) { + if (e.errorNum === 1202) { + // Document not found + return Result.err({ + code: 404, + message: `Repository not found: ${repoId}`, + }); + } + return Result.err({ + code: e.errorNum || 500, + message: e.errorMessage || "Failed to find repository", + }); + } + } + + // List repositories with optional filter + static list(filter = {}) { + try { + let query = "FOR r IN repo"; + const bindVars = {}; + + if (filter.type) { + query += " FILTER r.type == @type"; + bindVars.type = filter.type; + } + + query += " RETURN r"; + + const results = g_db._query(query, bindVars).toArray(); + return Result.ok( + results.map((repo) => ({ + type: repo.type, + data: repo, + })), + ); + } catch (e) { + return Result.err({ + code: e.errorNum || 500, + message: e.errorMessage || "Failed to list repositories", + }); + } + } +} + +module.exports = { + Repositories, +}; diff --git a/core/database/foxx/api/models/repositories/repository/globus.js b/core/database/foxx/api/models/repositories/repository/globus.js new file mode 100644 index 000000000..8179f7f73 --- /dev/null +++ b/core/database/foxx/api/models/repositories/repository/globus.js @@ -0,0 +1,275 @@ +"use strict"; + +const Joi = require("joi"); +const { RepositoryType, createAllocationResult } = require("../types"); +const { ExecutionMethod } = require("../../../lib/execution_types"); +const { BaseRepository } = require("../base_repository"); +const { + validateCommonFields, + validateAllocationParams, + validateNonEmptyString, + validateRepositoryPath, + validatePOSIXPath, +} = require("../validation"); +const g_tasks = require("../../../tasks"); +const error = require("../../../lib/error_codes"); +const { Result } = require("../../../lib/result"); + +/** + * Globus-specific configuration + * @param {object} config - Globus configuration object + * @param {string} config.endpoint - Globus endpoint identifier + * @param {string} config.path - Repository path on filesystem + * @param {string} config.pub_key - Public key for ZeroMQ CURVE authentication + * @param {string} config.address - Network address + * @param {string} [config.exp_path] - Export path + * @returns {{endpoint: string, path: string, pub_key: string, address: string, exp_path: string }} Globus configuration object + */ +const createGlobusConfig = ({ endpoint, path, pub_key, address, exp_path }) => ({ + endpoint, + path, + pub_key, + address, + exp_path, +}); + +/** + * @module globus + * Globus repository implementation + * Implements repository operations specific to Globus-backed repositories + **/ + +const validatePartialGlobusAllocationParams = (params) => { + if (params.data_limit <= 0) { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: + "Allocation data_limit must be a positive number data_limit: " + params.data_limit, + }); + } + return Result.ok(true); +}; + +// Create allocation in Globus repository (async via task) +// +// Expectation +// +// params = { +// "client": { +// "_id": "u/bob", +// "is_admin": false +// }, +// "subject": "u/tim", +// "data_limit": 100000, +// "rec_limit": 20000, +// +// } +// +class GlobusRepo extends BaseRepository { + constructor(config) { + const config_result = GlobusRepo.validate(config); + if (config_result.ok == false) { + return config_result; + } + + const globusConfig = createGlobusConfig({ + endpoint: config.endpoint, + path: config.path, + pub_key: config.pub_key, + address: config.address, + exp_path: config.exp_path, + }); + + const result = super(config, globusConfig); + if (result.ok == false) { + return result; + } + this.repoData = result.value.repoData; + + return Result.ok(this.value); + } + + type() { + return RepositoryType.GLOBUS; + } + + createAllocation(params) { + // Validate allocation parameters + const validationResult = validateAllocationParams(params); + if (!validationResult.ok) { + return validationResult; + } + + const validationGlobusResult = validatePartialGlobusAllocationParams(params); + if (!validationGlobusResult.ok) { + return validationGlobusResult; + } + + try { + // Create task for async Globus allocation + // Note: taskInitAllocCreate expects (client, repo_id, subject_id, data_limit, rec_limit) + + // params.client must contain _id, and is_admin members + const taskResult = g_tasks.taskInitAllocCreate( + params.client, + this.repoData.id, + params.subject, + params.size || params.data_limit, // Handle both parameter names + params.rec_limit || 1000000, // Default to 1M records if not specified + ); + + // The taskResult contains { task: taskObject } + // We need to return the task properties that the web service expects + const task = taskResult.task; + + // Return a structure that matches what the original API expects + // The web service needs properties like state, task_id, status, etc. + return Result.ok({ + id: `alloc/${Date.now()}`, // Temporary allocation ID format + repo_id: this.repoData.id, + subject: params.subject, + task_id: task._id, + status: task.status, + state: task.state, // Important: include the state property + queue_time: task.ct || Date.now(), + }); + } catch (e) { + // Handle both Error objects and array-style errors + const errorMessage = e.message || (Array.isArray(e) && e[1]) || String(e); + return Result.err({ + code: error.ERR_INTERNAL_FAULT, + message: `Failed to create allocation task: ${errorMessage}`, + }); + } + } + + // Delete allocation from Globus repository (async via task) + deleteAllocation(client, subject) { + if (!subject || typeof subject !== "string") { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: "Subject ID is required for allocation deletion", + }); + } + + try { + // Create task for async Globus allocation deletion + const task_result = g_tasks.taskInitAllocDelete(client, this.repoData.id, subject); + + return Result.ok(createAllocationResult(ExecutionMethod.DEFERRED, task_result.task)); + } catch (e) { + const errorMessage = e.message || (Array.isArray(e) && e[1]) || String(e); + return Result.err({ + code: error.ERR_INTERNAL_FAULT, + message: `Failed to create allocation task: ${errorMessage}`, + }); + } + } + + static validate(config) { + if (config == null) { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: "Unable to validate globus repo config 'null' config provided.", + }); + } + // For partial updates, we don't require all fields + // Only validate the fields that are provided + const errors = []; + + const commonResult = validateCommonFields(config); + if (!commonResult.ok) { + return commonResult; + } + + // Define Joi schema using old-style .error() message customization + const schema = Joi.object() + .keys({ + pub_key: Joi.string() + .min(1) + .error((errors) => { + errors.forEach((err) => { + switch (err.type) { + case "string.base": + err.message = "Public key must be a string"; + break; + case "string.min": + case "any.empty": + err.message = "Public key cannot be empty"; + break; + } + }); + return errors; + }), + + address: Joi.string() + .min(1) + .error((errors) => { + errors.forEach((err) => { + switch (err.type) { + case "string.base": + err.message = "Address must be a string"; + break; + case "string.min": + case "any.empty": + err.message = "Address cannot be empty"; + break; + } + }); + return errors; + }), + + endpoint: Joi.string() + .min(1) + .error((errors) => { + errors.forEach((err) => { + switch (err.type) { + case "string.base": + err.message = "Endpoint must be a string"; + break; + case "string.min": + case "any.empty": + err.message = "Endpoint cannot be empty"; + break; + } + }); + return errors; + }), + + path: Joi.string().optional(), + exp_path: Joi.string().optional(), + }) + .unknown(true); // allow extra fields not explicitly validated + + // Validate + const { error: joiError, value } = Joi.validate(config, schema, { + abortEarly: false, // collect all errors + }); + + if (joiError) { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: joiError.details.map((d) => d.message).join("; "), + }); + } + + // Perform additional custom validations (that require multiple fields) + if (config.path !== undefined && config.key) { + const pathResult = validateRepositoryPath(config.path, config.key); + if (!pathResult.ok) { + return pathResult; + } + } + + return Result.ok(true); + } + + // Globus repositories support data operations + supportsDataOperations() { + return Result.ok(true); + } +} + +module.exports = { + GlobusRepo, +}; diff --git a/core/database/foxx/api/models/repositories/repository/metadata.js b/core/database/foxx/api/models/repositories/repository/metadata.js new file mode 100644 index 000000000..3dc280b06 --- /dev/null +++ b/core/database/foxx/api/models/repositories/repository/metadata.js @@ -0,0 +1,297 @@ +"use strict"; + +const { createAllocationResult, RepositoryType } = require("../types"); +const { ExecutionMethod } = require("../../../lib/execution_types"); +const { BaseRepository } = require("../base_repository.js"); +const { + validateAllocationParams, + validateRepoData, + validateCommonFields, +} = require("../validation"); +const error = require("../../../lib/error_codes"); +const { Result } = require("../../../lib/result"); +const permissions = require("../../../lib/permissions"); +const g_db = require("@arangodb").db; + +/** + * @module metadata + * @description Metadata-only repository implementation + * Implements repository operations for repositories that only store metadata without actual data storage backend + **/ + +// Validate metadata repository (already validated in factory) +class MetadataRepo extends BaseRepository { + constructor(config) { + const config_result = MetadataRepo.validate(config); + if (config_result.ok == false) { + return config_result; + } + + const result = super(config); + if (result.ok == false) { + return result; + } + this.repoData = result.value.repoData; + return Result.ok(this.value); + } + + type() { + return RepositoryType.METADATA; + } + + // Create allocation in metadata repository (direct/synchronous) + // NOTE: We do not need a transaction here, we are assuming the transaction + // declared in the router covers all arango documents and collections used here + createAllocation(params) { + // Validate allocation parameters + const validationResult = validateAllocationParams(params); + if (!validationResult.ok) { + return validationResult; + } + const validationResultRepo = validateRepoData(this.repoData); + if (!validationResultRepo.ok) { + return validationResultRepo; + } + + try { + // For metadata-only repos, allocations are just database records + // No actual storage allocation happens + // + // Unlike the Globus Allocation creation process, we do not need to + // touch the block collection because, we have not created a task. + // blocks documents are needed to track what tasks are blocked + // + // The transaction needs to include the subect document and the repo document + // to avoid the case where the nodes no longer exist. + if (!g_db._exists(this.repoData.id)) { + return Result.err({ + code: error.ERR_NOT_FOUND, + message: + "Failed to create metadata allocation: Repo, '" + + this.repoData.id + + "', does not exist.", + }); + } + if (!g_db._exists(params.subject)) { + return Result.err({ + code: error.ERR_NOT_FOUND, + message: + "Failed to create metadata allocation: Subject, '" + + params.subject + + "', does not exist.", + }); + } + + // Check for proper permissions + try { + permissions.ensureAdminPermRepo(params.client, this.repoData.id); + } catch (e) { + const errorMessage = e.message || (Array.isArray(e) && e[1]) || String(e); + return Result.err({ + code: error.ERR_PERM_DENIED, + message: "Allocation creation failed - " + errorMessage, + }); + } + // Check if there is already a matching allocation + var alloc = g_db.alloc.firstExample({ + _from: params.subject, + _to: this.repoData.id, + }); + if (alloc) { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: + "Failed to create metadata allocation: Subject, '" + + params.subject + + "', already has an allocation on " + + this.repoData.id, + }); + } + + const allocation = g_db.alloc.save({ + _from: params.subject, + _to: this.repoData.id, + data_limit: params.data_limit, + rec_limit: params.rec_limit, + rec_count: 0, + data_size: 0, + path: "/", + type: RepositoryType.METADATA, + }); + + // Save to allocations collection (would need to be created) + // For now, return success with the allocation data + const result = { + id: allocation._id, + repo_id: this.repoData.id, + subject: params.subject, + rec_limit: params.rec_limit, + }; + + return Result.ok(createAllocationResult(ExecutionMethod.DIRECT, result)); + } catch (e) { + return Result.err({ + code: error.ERR_INTERNAL_FAULT, + message: `Failed to create metadata allocation: ${e.message}`, + }); + } + } + + // Delete allocation from metadata repository (direct/synchronous) + deleteAllocation(client, subject) { + if (!subject || typeof subject !== "string") { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: "Subject ID is required for allocation deletion", + }); + } + + try { + if (!g_db._exists(this.repoData.id)) { + return Result.err({ + code: error.ERR_NOT_FOUND, + message: + "Failed to delete metadata allocation: Repo, '" + + this.repoData.id + + "', does not exist ", + }); + } + + if (!g_db._exists(subject)) { + return Result.err({ + code: error.ERR_NOT_FOUND, + message: + "Failed to delete metadata allocation: Subject, '" + + subject + + "', does not exist ", + }); + } + + var repo = g_db.repo.document(this.repoData.id); + + try { + permissions.ensureAdminPermRepo(client, this.repoData.id); + } catch (e) { + if (e == error.ERR_PERM_DENIED) { + return Result.err({ + code: error.ERR_PERM_DENIED, + message: + "Failed to delete metadata allocation: client, '" + + client._id + + "', does not have permissions to delete an allocation of " + + subject + + " on " + + this.repoData.id, + }); + } + } + + let alloc; + try { + alloc = g_db.alloc.firstExample({ + _from: subject, + _to: this.repoData.id, + }); + } catch { + alloc = null; + } + if (!alloc) { + return Result.err({ + code: error.ERR_NOT_FOUND, + message: + "Failed to delete metadata allocation: Subject, '" + + subject + + "', has no allocation on " + + this.repoData.id, + }); + } + + var count = g_db + ._query( + "return length(for v, e in 1..1 inbound @repo loc filter e.uid == @subj return 1)", + { + repo: this.repoData.id, + subj: subject, + }, + ) + .next(); + if (count) { + return Result.err({ + code: error.ERR_IN_USE, + message: + "Failed to delete metadata allocation: " + + count + + " records found on the allocaition ", + }); + } + + g_db.alloc.removeByExample({ + _from: subject, + _to: this.repoData.id, + }); + // For metadata-only repos, just remove the database record + // No actual storage deallocation needed + const result = { + repo_id: this.repoData.id, + subject: subject, + status: "completed", + message: "Metadata allocation removed", + }; + + return Result.ok(createAllocationResult(ExecutionMethod.DIRECT, result)); + } catch (e) { + return Result.err({ + code: error.ERR_INTERNAL_FAULT, + message: `Failed to delete metadata location: ${e.message}`, + }); + } + } + + // Normalize admin/admins field for backward compatibility + static validate(config) { + if (config == null) { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: "Unable to validate metadata repo config 'null' config provided.", + }); + } + + const commonResult = validateCommonFields(config); + if (!commonResult.ok) { + return commonResult; + } + + const errors = []; + if (config.capacity != 0) { + errors.push("Metadata repository capacity must be 0: capacity=" + config.capacity); + } + // Metadata repositories don't need Globus-specific fields + // But should not have them either + const invalidFields = ["pub_key", "address", "endpoint", "path", "exp_path"]; + const presentInvalidFields = invalidFields.filter((field) => config[field] !== undefined); + + if (presentInvalidFields.length > 0) { + errors.push( + `Metadata-only repositories should not have: ${presentInvalidFields.join(", ")}`, + ); + } + + if (errors.length > 0) { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: errors.join("; "), + }); + } + + return Result.ok(true); + } + + // Metadata repositories do NOT support data operations + supportsDataOperations() { + return Result.ok(false); + } +} + +module.exports = { + MetadataRepo, +}; diff --git a/core/database/foxx/api/models/repositories/types.js b/core/database/foxx/api/models/repositories/types.js new file mode 100644 index 000000000..663863e46 --- /dev/null +++ b/core/database/foxx/api/models/repositories/types.js @@ -0,0 +1,39 @@ +"use strict"; + +const { ExecutionMethod } = require("../../lib/execution_types"); + +const RepositoryType = Object.freeze({ + GLOBUS: "globus", + METADATA: "metadata", +}); + +/** + * Tagged union for repositories (type + data) + * Rust enums can contain data, creating tagged unions (also called algebraic data types) + * This pattern enables type-safe polymorphism without inheritance + * @param {string} type - Repository type (from RepositoryType enum) + * @param {object} data - Repository data object + * @returns {{type: string, data: object}} Tagged union with type and data fields + * @see https://doc.rust-lang.org/book/ch06-01-defining-an-enum.html#enum-values + */ +const createRepository = (type, data) => ({ + type, + data, +}); + +/** + * Allocation result structure + * @param {string} method - Execution method (DEFERRED or DIRECT) + * @param {object} payload - Result payload (task info or direct result) + * @returns {{execution_method: string, task?: object, result?: object}} Allocation result with execution method and appropriate payload + */ +const createAllocationResult = (method, payload) => ({ + execution_method: method, + ...(method === ExecutionMethod.DEFERRED ? { task: payload } : { result: payload }), +}); + +module.exports = { + RepositoryType, + createRepository, + createAllocationResult, +}; diff --git a/core/database/foxx/api/models/repositories/validation.js b/core/database/foxx/api/models/repositories/validation.js new file mode 100644 index 000000000..e0cbde626 --- /dev/null +++ b/core/database/foxx/api/models/repositories/validation.js @@ -0,0 +1,231 @@ +"use strict"; + +const Joi = require("joi"); +const { Result } = require("../../lib/result"); +const error = require("../../lib/error_codes"); + +// Define error code constant if not available from g_lib +const ERR_INVALID_PARAM = error.ERR_INVALID_PARAM !== undefined ? error.ERR_INVALID_PARAM : 2; +const ERR_INVALID_OPERATION = + error.ERR_INVALID_OPERATION !== undefined ? error.ERR_INVALID_OPERATION : 400; + +/** + * Pure functions that return Result types for error handling + */ + +// Validate that a value is a non-empty string +// Reusable helper following DRY principle +const validateNonEmptyString = (value, fieldName) => { + if (!value || typeof value !== "string" || value.trim() === "") { + return Result.err({ + code: ERR_INVALID_PARAM, + message: `${fieldName} is required and must be a non-empty string`, + }); + } + return Result.ok(true); +}; + +// Validate common repository fields +// Pure function - no side effects, deterministic output +const validateCommonFields = (config) => { + const schema = Joi.object() + .keys({ + id: Joi.string() + .min(1) + .error((errors) => { + errors.forEach((err) => { + switch (err.type) { + case "string.base": + err.message = "Repository ID must be a non-empty string"; + break; + } + }); + return errors; + }), + + title: Joi.string() + .min(1) + .error((errors) => { + errors.forEach((err) => { + switch (err.type) { + case "string.base": + err.message = "Repository title must be a string"; + break; + case "string.min": + case "any.empty": + err.message = "Repository title cannot be empty"; + break; + } + }); + return errors; + }), + + key: Joi.string() + .max(40) + .lowercase() + .regex(/^[a-z0-9_.-]+$/) + .error((errors) => { + errors.forEach((err) => { + switch (err.type) { + case "string.base": + err.message = "Repository key must be a string"; + break; + case "string.empty": + err.message = "Repository key is empty"; + break; + case "string.max": + err.message = "Repository key cannot be longer than 40 characters"; + break; + case "string.pattern.base": + err.message = + "Repository key may only contain lowercase letters, numbers, underscores, hyphens and periods."; + break; + } + }); + return errors; + }), + + capacity: Joi.number() + .min(0) + .error((errors) => { + errors.forEach((err) => { + switch (err.type) { + case "number.base": + err.message = "Repository capacity must be a number"; + break; + case "number.min": + err.message = "Repository capacity cannot be negative"; + break; + } + }); + return errors; + }), + + key: Joi.string().optional(), + }) + .unknown(true); // allow extra fields not explicitly validated + + const { error: joiError, value } = Joi.validate(config, schema, { + abortEarly: false, // collect all errors + }); + + if (joiError) { + return Result.err({ + code: error.ERR_INVALID_PARAM, + message: joiError.details.map((d) => d.message).join("; "), + }); + } + + return Result.ok(true); +}; + +// Validate POSIX path format +const validatePOSIXPath = (path, fieldName) => { + if (!path || typeof path !== "string") { + return Result.err({ + code: ERR_INVALID_PARAM, + message: `${fieldName} must be a non-empty string`, + }); + } + + if (!path.startsWith("/")) { + return Result.err({ + code: ERR_INVALID_PARAM, + message: `${fieldName} must be an absolute path (start with '/')`, + }); + } + + // Check for invalid characters in path + if (path.includes("..") || path.includes("//")) { + return Result.err({ + code: ERR_INVALID_PARAM, + message: `${fieldName} contains invalid path sequences`, + }); + } + + return Result.ok(true); +}; + +// Validate repository path ends with ID +const validateRepositoryPath = (path, repoId) => { + const pathResult = validatePOSIXPath(path, "Repository path"); + if (!pathResult.ok) { + return pathResult; + } + + // Ensure path ends with / + const normalizedPath = path.endsWith("/") ? path : path + "/"; + + // Extract last component + const idx = normalizedPath.lastIndexOf("/", normalizedPath.length - 2); + const lastComponent = normalizedPath.slice(idx + 1, normalizedPath.length - 1); + + if (lastComponent !== repoId) { + return Result.err({ + code: ERR_INVALID_PARAM, + message: `Repository path must end with repository ID (${repoId})`, + }); + } + + return Result.ok(true); +}; + +const validateRepoData = (repoData) => { + if (typeof repoData === "undefined") { + return Result.err({ + code: ERR_INVALID_PARAM, + message: "Repo data is undefined.", + }); + } + return Result.ok(true); +}; + +// Validate allocation parameters +const validateAllocationParams = (params) => { + const errors = []; + + const subjectValidation = validateNonEmptyString(params.subject, "Allocation subject"); + if (!subjectValidation.ok) { + errors.push(subjectValidation.error.message); + } + + if (typeof params.data_limit !== "number") { + errors.push( + "Allocation data_limit must be a number, type: " + + typeof params.data_limit + + " data_limit: " + + params.data_limit, + ); + } + + if (typeof params.rec_limit !== "number") { + errors.push( + "Allocation rec_limit must be a number, type: " + + typeof params.rec_limit + + " rec_limit: " + + params.rec_limit, + ); + } + + if (params.path && typeof params.path !== "string") { + errors.push("Allocation path must be a string if provided"); + } + + if (errors.length > 0) { + return Result.err({ + code: ERR_INVALID_PARAM, + message: errors.join("; "), + }); + } + + return Result.ok(true); +}; + +module.exports = { + validateNonEmptyString, + validateCommonFields, + validatePOSIXPath, + validateRepositoryPath, + validateAllocationParams, + validateRepoData, +}; diff --git a/core/database/foxx/api/note_router.js b/core/database/foxx/api/note_router.js index 210bce368..445ed28d1 100644 --- a/core/database/foxx/api/note_router.js +++ b/core/database/foxx/api/note_router.js @@ -5,6 +5,8 @@ const router = createRouter(); const joi = require("joi"); const g_db = require("@arangodb").db; const g_lib = require("./support"); +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); module.exports = router; @@ -24,17 +26,17 @@ router var id = g_lib.resolveDataCollID(req.queryParams.subject, client), doc = g_db._document(id); - if (!g_lib.hasAdminPermObject(client, id)) { + if (!permissions.hasAdminPermObject(client, id)) { if ( - (g_lib.getPermissions(client, doc, g_lib.PERM_RD_REC) & - g_lib.PERM_RD_REC) == + (permissions.getPermissions(client, doc, permissions.PERM_RD_REC) & + permissions.PERM_RD_REC) == 0 ) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } if (req.queryParams.activate) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Only owner or admin may create a new annotaion in active state.", ]; } @@ -118,13 +120,13 @@ router if (!req.queryParams.id.startsWith("n/")) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Invalid annotaion ID '" + req.queryParams.id + "'", ]; if (!g_db._exists(req.queryParams.id)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Annotaion ID '" + req.queryParams.id + "' does not exist.", ]; @@ -144,14 +146,14 @@ router */ if (req.queryParams.new_state === note.state) { - throw [g_lib.ERR_INVALID_PARAM, "Invalid new state for annotaion."]; + throw [error.ERR_INVALID_PARAM, "Invalid new state for annotaion."]; } // Subject admins can do anything // Creators cannot edit if state is active // Others can not update - if (!g_lib.hasAdminPermObject(client, ne._from)) { + if (!permissions.hasAdminPermObject(client, ne._from)) { if (client._id == note.creator) { if ( (note.state == g_lib.NOTE_ACTIVE && @@ -160,10 +162,10 @@ router req.queryParams.title != undefined)) || req.queryParams.new_state == g_lib.NOTE_ACTIVE ) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } } else { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } } @@ -262,20 +264,20 @@ router if (!req.queryParams.id.startsWith("n/")) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Invalid annotaion ID '" + req.queryParams.id + "'", ]; if (!g_db._exists(req.queryParams.id)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Annotaion ID '" + req.queryParams.id + "' does not exist.", ]; var note = g_db.n.document(req.queryParams.id); if (req.queryParams.comment_idx >= note.comments.length) - throw [g_lib.ERR_INVALID_PARAM, "Comment index out of range."]; + throw [error.ERR_INVALID_PARAM, "Comment index out of range."]; var obj = { ut: Math.floor(Date.now() / 1000), @@ -283,7 +285,7 @@ router comment = note.comments[req.queryParams.comment_idx]; if (client._id != comment.user) { - throw [g_lib.ERR_PERM_DENIED, "Only original commentor may edit comments."]; + throw [error.ERR_PERM_DENIED, "Only original commentor may edit comments."]; } if (req.queryParams.comment != comment.comment) { @@ -318,13 +320,13 @@ router if (!req.queryParams.id.startsWith("n/")) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Invalid annotaion ID '" + req.queryParams.id + "'", ]; if (!g_db._exists(req.queryParams.id)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Annotaion ID '" + req.queryParams.id + "' does not exist.", ]; @@ -334,23 +336,23 @@ router var ne = g_db.note.firstExample({ _to: note._id, }); - if (!client || !g_lib.hasAdminPermObject(client, ne._from)) { + if (!client || !permissions.hasAdminPermObject(client, ne._from)) { if (note.state == g_lib.NOTE_ACTIVE) { // Anyone with read permission to subject doc can comment on active notes var doc = g_db._document(ne._from); if (!client) { if (!g_lib.hasPublicRead(doc._id)) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } } else if ( - (g_lib.getPermissions(client, doc, g_lib.PERM_RD_REC) & - g_lib.PERM_RD_REC) == + (permissions.getPermissions(client, doc, permissions.PERM_RD_REC) & + permissions.PERM_RD_REC) == 0 ) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } } else { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } } } @@ -391,7 +393,7 @@ router results = g_db._query(qry, { subj: id, }); - } else if (g_lib.hasAdminPermObject(client, id)) { + } else if (permissions.hasAdminPermObject(client, id)) { qry = "for v in 1..1 outbound @subj note sort v.ut desc return {_id:v._id,state:v.state,type:v.type,subject_id:v.subject_id,title:v.title,creator:v.creator,parent_id:v.parent_id,ct:v.ct,ut:v.ut}"; results = g_db._query(qry, { diff --git a/core/database/foxx/api/process.js b/core/database/foxx/api/process.js index c989f0a57..9eaa4eff1 100644 --- a/core/database/foxx/api/process.js +++ b/core/database/foxx/api/process.js @@ -2,6 +2,8 @@ const g_db = require("@arangodb").db; const g_lib = require("./support"); +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); module.exports = (function () { var obj = {}; @@ -41,7 +43,7 @@ module.exports = (function () { * - `ext_data`: A list of external data records. * - `visited`: A record of visited items during recursion. * - * @throws {Error} g_lib.ERR_INVALID_MODE - If an invalid mode is passed. + * @throws {Error} error.ERR_INVALID_MODE - If an invalid mode is passed. * * @example * const result = obj.preprocessItems(client, newOwnerId, dataIds, g_lib.TT_DATA_GET); @@ -67,31 +69,31 @@ module.exports = (function () { switch (a_mode) { case g_lib.TT_DATA_GET: - ctxt.data_perm = g_lib.PERM_RD_DATA; - ctxt.coll_perm = g_lib.PERM_LIST; + ctxt.data_perm = permissions.PERM_RD_DATA; + ctxt.coll_perm = permissions.PERM_LIST; break; case g_lib.TT_DATA_PUT: - ctxt.data_perm = g_lib.PERM_WR_DATA; + ctxt.data_perm = permissions.PERM_WR_DATA; // Collections not allowed break; case g_lib.TT_REC_ALLOC_CHG: // Must be data owner OR if owned by a project, the project or // an admin, or the creator. - ctxt.coll_perm = g_lib.PERM_LIST; + ctxt.coll_perm = permissions.PERM_LIST; break; case g_lib.TT_REC_OWNER_CHG: // Must have all read+delete, or be owner or creator OR, if owned by a project, the project or // an admin. - ctxt.data_perm = g_lib.PERM_RD_ALL | g_lib.PERM_DELETE; - ctxt.coll_perm = g_lib.PERM_LIST; + ctxt.data_perm = permissions.PERM_RD_ALL | permissions.PERM_DELETE; + ctxt.coll_perm = permissions.PERM_LIST; break; case g_lib.TT_REC_DEL: - ctxt.data_perm = g_lib.PERM_DELETE; - ctxt.coll_perm = g_lib.PERM_DELETE; + ctxt.data_perm = permissions.PERM_DELETE; + ctxt.coll_perm = permissions.PERM_DELETE; break; case g_lib.TT_DATA_EXPORT: - ctxt.data_perm = g_lib.PERM_RD_REC | g_lib.PERM_RD_META; - ctxt.coll_perm = g_lib.PERM_LIST; + ctxt.data_perm = permissions.PERM_RD_REC | permissions.PERM_RD_META; + ctxt.coll_perm = permissions.PERM_LIST; break; } @@ -170,7 +172,7 @@ module.exports = (function () { if (id.charAt(0) == "c") { if (a_ctxt.mode == g_lib.TT_DATA_PUT) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Collections not supported for PUT operations.", ]; is_coll = true; @@ -195,7 +197,7 @@ module.exports = (function () { if (!g_db._exists(id)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, (is_coll ? "Collection '" : "Data record '") + id + "' does not exist.", ]; @@ -211,7 +213,7 @@ module.exports = (function () { // Make sure user isn't trying to delete root if (doc.is_root && a_ctxt.mode == g_lib.TT_REC_DEL) - throw [g_lib.ERR_PERM_DENIED, "Cannot delete root collection " + id]; + throw [error.ERR_PERM_DENIED, "Cannot delete root collection " + id]; /* If either collection OR data permission are not satisfied, will need to evaluate grant and inherited collection @@ -222,12 +224,12 @@ module.exports = (function () { (coll_perm & a_ctxt.coll_perm) != a_ctxt.coll_perm || (data_perm & a_ctxt.data_perm) != a_ctxt.data_perm ) { - if (!g_lib.hasAdminPermObjectLoaded(a_ctxt.client, doc)) { + if (!permissions.hasAdminPermObjectLoaded(a_ctxt.client, doc)) { if (a_coll_perm != null) // Already have inherited permission, don't ask again - perm = g_lib.getPermissionsLocal(a_ctxt.client._id, doc); + perm = permissions.getPermissionsLocal(a_ctxt.client._id, doc); else - perm = g_lib.getPermissionsLocal( + perm = permissions.getPermissionsLocal( a_ctxt.client._id, doc, true, @@ -244,7 +246,7 @@ module.exports = (function () { ((perm.grant | perm.inherited) & a_ctxt.coll_perm) != a_ctxt.coll_perm ) { - throw [g_lib.ERR_PERM_DENIED, "Permission denied for collection " + id]; + throw [error.ERR_PERM_DENIED, "Permission denied for collection " + id]; } // inherited and inhgrant perms only apply to recursion @@ -271,19 +273,19 @@ module.exports = (function () { if (doc.owner != a_ctxt.client._id) { if (doc.owner.startsWith("p/")) { if (!(doc.owner in a_ctxt.visited)) { - if (g_lib.hasManagerPermProj(a_ctxt.client, doc.owner)) { + if (permissions.hasManagerPermProj(a_ctxt.client, doc.owner)) { // Put project ID in visited to avoid checking permissions again a_ctxt.visited[doc.owner] = 1; } else { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Permission denied for data record " + id, ]; } } } else { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Permission denied for data record " + id, ]; } @@ -300,7 +302,7 @@ module.exports = (function () { if (doc.owner.startsWith("p/")) { if (!(doc.owner in a_ctxt.visited)) { - if (g_lib.hasManagerPermProj(a_ctxt.client, doc.owner)) { + if (permissions.hasManagerPermProj(a_ctxt.client, doc.owner)) { // Put project ID in visited to avoid checking permissions again a_ctxt.visited[doc.owner] = 1; ok = true; @@ -313,9 +315,9 @@ module.exports = (function () { if (!ok && (a_data_perm & a_ctxt.data_perm) != a_ctxt.data_perm) { if (a_data_perm != null) // Already have inherited permission, don't ask again - perm = g_lib.getPermissionsLocal(a_ctxt.client._id, doc); + perm = permissions.getPermissionsLocal(a_ctxt.client._id, doc); else - perm = g_lib.getPermissionsLocal( + perm = permissions.getPermissionsLocal( a_ctxt.client._id, doc, true, @@ -327,19 +329,19 @@ module.exports = (function () { a_ctxt.data_perm ) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Permission denied for data record " + id, ]; } } } else { if ((a_data_perm & a_ctxt.data_perm) != a_ctxt.data_perm) { - if (!g_lib.hasAdminPermObjectLoaded(a_ctxt.client, doc)) { + if (!permissions.hasAdminPermObjectLoaded(a_ctxt.client, doc)) { if (a_data_perm != null) // Already have inherited permission, don't ask again - perm = g_lib.getPermissionsLocal(a_ctxt.client._id, doc); + perm = permissions.getPermissionsLocal(a_ctxt.client._id, doc); else - perm = g_lib.getPermissionsLocal( + perm = permissions.getPermissionsLocal( a_ctxt.client._id, doc, true, @@ -351,7 +353,7 @@ module.exports = (function () { a_ctxt.data_perm ) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Permission denied for data record " + id, ]; } @@ -362,7 +364,7 @@ module.exports = (function () { if (doc.external) { if (a_ctxt.mode == g_lib.TT_DATA_PUT) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Cannot upload to external data on record '" + doc.id + "'.", ]; diff --git a/core/database/foxx/api/proj_router.js b/core/database/foxx/api/proj_router.js index 0dfb1674b..a237b561f 100644 --- a/core/database/foxx/api/proj_router.js +++ b/core/database/foxx/api/proj_router.js @@ -6,6 +6,8 @@ const joi = require("joi"); const g_db = require("@arangodb").db; const g_lib = require("./support"); +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); const g_tasks = require("./tasks"); module.exports = router; @@ -47,7 +49,7 @@ router .toArray(); if (repos.length == 0) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Projects can only be created by repository administrators.", ]; @@ -63,7 +65,7 @@ router .next(); if (count >= client.max_proj) throw [ - g_lib.ERR_ALLOCATION_EXCEEDED, + error.ERR_ALLOCATION_EXCEEDED, "Project limit reached (" + client.max_proj + "). Contact system administrator to increase limit.", @@ -149,8 +151,8 @@ router g_db.acl.save({ _from: root._id, _to: mem_grp._id, - grant: g_lib.PERM_MEMBER, - inhgrant: g_lib.PERM_MEMBER, + grant: permissions.PERM_MEMBER, + inhgrant: permissions.PERM_MEMBER, }); proj.new.admins = []; @@ -162,7 +164,7 @@ router uid = req.queryParams.admins[i]; if (uid == client._id) continue; if (!g_db._exists(uid)) - throw [g_lib.ERR_NOT_FOUND, "User, " + uid + ", not found"]; + throw [error.ERR_NOT_FOUND, "User, " + uid + ", not found"]; g_db.admin.save({ _from: proj._id, @@ -177,7 +179,7 @@ router uid = req.queryParams.members[i]; if (uid == client._id || proj.new.admins.indexOf(uid) != -1) continue; if (!g_db._exists(uid)) - throw [g_lib.ERR_NOT_FOUND, "User, " + uid + ", not found"]; + throw [error.ERR_NOT_FOUND, "User, " + uid + ", not found"]; g_db.member.save({ _from: mem_grp._id, @@ -229,13 +231,13 @@ router var proj_id = req.queryParams.id; if (!g_db.p.exists(proj_id)) - throw [g_lib.ERR_INVALID_PARAM, "No such project '" + proj_id + "'"]; + throw [error.ERR_INVALID_PARAM, "No such project '" + proj_id + "'"]; var is_admin = true; - if (!g_lib.hasAdminPermProj(client, proj_id)) { - if (!g_lib.hasManagerPermProj(client, proj_id)) { - throw g_lib.ERR_PERM_DENIED; + if (!permissions.hasAdminPermProj(client, proj_id)) { + if (!permissions.hasManagerPermProj(client, proj_id)) { + throw error.ERR_PERM_DENIED; } is_admin = false; } @@ -258,7 +260,7 @@ router obj.desc != undefined || req.queryParams.admins != undefined ) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } } @@ -280,7 +282,7 @@ router uid = req.queryParams.admins[i]; if (uid == owner_id) continue; if (!g_db._exists(uid)) - throw [g_lib.ERR_NOT_FOUND, "User, " + uid + ", not found"]; + throw [error.ERR_NOT_FOUND, "User, " + uid + ", not found"]; g_db.admin.save({ _from: proj_id, @@ -333,7 +335,7 @@ router uid = req.queryParams.members[i]; if (uid == owner_id || proj.new.admins.indexOf(uid) != -1) continue; if (!g_db._exists(uid)) - throw [g_lib.ERR_NOT_FOUND, "User, " + uid + ", not found"]; + throw [error.ERR_NOT_FOUND, "User, " + uid + ", not found"]; g_db.member.save({ _from: mem_grp._id, @@ -390,7 +392,7 @@ router const client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); if (!g_db.p.exists(req.queryParams.id)) - throw [g_lib.ERR_INVALID_PARAM, "No such project '" + req.queryParams.id + "'"]; + throw [error.ERR_INVALID_PARAM, "No such project '" + req.queryParams.id + "'"]; var proj = g_db.p.document({ _id: req.queryParams.id, @@ -525,7 +527,7 @@ router var user_id; if (req.queryParams.subject) { - g_lib.ensureAdminPermUser(client, req.queryParams.subject); + permissions.ensureAdminPermUser(client, req.queryParams.subject); } else user_id = client._id; if (req.queryParams.offset != undefined && req.queryParams.count != undefined) { @@ -646,10 +648,10 @@ router else subj = client._id; if (!req.queryParams.id.startsWith("p/")) - throw [g_lib.ERR_INVALID_PARAM, "Invalid project ID: " + req.queryParams.id]; + throw [error.ERR_INVALID_PARAM, "Invalid project ID: " + req.queryParams.id]; if (!g_db._exists(req.queryParams.id)) - throw [g_lib.ERR_NOT_FOUND, "Project, " + req.queryParams.id + ", not found"]; + throw [error.ERR_NOT_FOUND, "Project, " + req.queryParams.id + ", not found"]; var role = g_lib.getProjectRole(subj, req.queryParams.id); diff --git a/core/database/foxx/api/query_router.js b/core/database/foxx/api/query_router.js index 3a3bedbfa..30de93454 100644 --- a/core/database/foxx/api/query_router.js +++ b/core/database/foxx/api/query_router.js @@ -4,9 +4,12 @@ const createRouter = require("@arangodb/foxx/router"); const router = createRouter(); const joi = require("joi"); +const error = require("./lib/error_codes"); const g_db = require("@arangodb").db; const g_graph = require("@arangodb/general-graph")._graph("sdmsg"); const g_lib = require("./support"); +const logger = require("./lib/logger"); +const basePath = "qry"; module.exports = router; @@ -14,16 +17,24 @@ module.exports = router; router .post("/create", function (req, res) { + let client = undefined; + let result = undefined; try { - var result; - g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "admin"], write: ["q", "owner"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Started", + description: "Create Query", + }); // Check max number of saved queries if (client.max_sav_qry >= 0) { @@ -38,7 +49,7 @@ router if (count >= client.max_sav_qry) throw [ - g_lib.ERR_ALLOCATION_EXCEEDED, + error.ERR_ALLOCATION_EXCEEDED, "Saved query limit reached (" + client.max_sav_qry + "). Contact system administrator to increase limit.", @@ -81,7 +92,27 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Success", + description: "Create Query", + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Failure", + description: "Create Query", + extra: result, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -105,20 +136,29 @@ router router .post("/update", function (req, res) { + let client = undefined; + let result = undefined; try { - var result; - g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "admin"], write: ["q", "owner"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Started", + description: "Update a saved query", + }); + var qry = g_db.q.document(req.body.id); if (client._id != qry.owner && !client.is_admin) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } // Update time and title (if set) @@ -158,9 +198,27 @@ router result = qry; }, }); - res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Success", + description: "Update a saved query", + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Failure", + description: "Update a saved query", + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -185,12 +243,22 @@ router router .get("/view", function (req, res) { + let client = undefined; + let qry = undefined; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); - var qry = g_db.q.document(req.queryParams.id); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: "View specified query", + }); + qry = g_db.q.document(req.queryParams.id); if (client._id != qry.owner && !client.is_admin) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } qry.id = qry._id; @@ -204,7 +272,27 @@ router delete qry.lmit; res.send(qry); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Success", + description: "View specified query", + extra: qry, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: "View specified query", + extra: qry, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -215,14 +303,23 @@ router router .get("/delete", function (req, res) { + let client = undefined; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); var owner; + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Started", + description: "Delete specified query", + }); for (var i in req.queryParams.ids) { if (!req.queryParams.ids[i].startsWith("q/")) { throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Invalid query ID '" + req.queryParams.ids[i] + "'.", ]; } @@ -232,18 +329,38 @@ router }); if (!owner) { throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Query '" + req.queryParams.ids[i] + "' not found.", ]; } if (client._id != owner._to && !client.is_admin) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } g_graph.q.remove(owner._from); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Success", + description: "Delete specified query", + extra: req.queryParams.ids[i], + }); } } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Failure", + description: "Delete specified query", + extra: req.queryParams.ids[i], + error: e, + }); + g_lib.handleException(e, res); } }) @@ -254,12 +371,21 @@ router router .get("/list", function (req, res) { + let client = undefined; + let result = undefined; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Started", + description: "List client saved queries", + }); var qry = "for v in 1..1 inbound @user owner filter is_same_collection('q',v) sort v.title"; - var result; if (req.queryParams.offset != undefined && req.queryParams.count != undefined) { qry += " limit " + req.queryParams.offset + ", " + req.queryParams.count; @@ -291,7 +417,29 @@ router } res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Success", + description: "List client saved queries", + extra: { + queryParams: req.queryParams, + _countTotal: result._countTotal, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Failure", + description: "List client saved queries", + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -313,7 +461,7 @@ function execQuery(client, mode, published, orig_query) { if (query.params.owner.startsWith("u/") && query.params.owner != client._id) { // A non-client owner for non-public searches means this is a search over shared data if (!g_db.u.exists(query.params.owner)) - throw [g_lib.ERR_NOT_FOUND, "user " + query.params.owner + " not found"]; + throw [error.ERR_NOT_FOUND, "user " + query.params.owner + " not found"]; ctxt = query.params.owner; @@ -330,7 +478,7 @@ function execQuery(client, mode, published, orig_query) { .toArray(); if (!query.params.cols) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "No access to user '" + query.params.owner + "' data/collections.", ]; } @@ -338,7 +486,7 @@ function execQuery(client, mode, published, orig_query) { } } else if (query.params.owner.startsWith("p/")) { if (!g_db.p.exists(query.params.owner)) - throw [g_lib.ERR_NOT_FOUND, "Project " + query.params.owner + " not found"]; + throw [error.ERR_NOT_FOUND, "Project " + query.params.owner + " not found"]; // Must determine clients access to the project @@ -364,7 +512,7 @@ function execQuery(client, mode, published, orig_query) { .toArray(); if (!query.params.cols) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "No access to project '" + query.params.owner + "'.", ]; } @@ -395,7 +543,7 @@ function execQuery(client, mode, published, orig_query) { })._to != query.params.owner ) { throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Collection '" + col + "' not in search scope.", ]; } @@ -414,7 +562,7 @@ function execQuery(client, mode, published, orig_query) { // sch_id is id:ver var idx = query.params.sch_id.indexOf(":"); if (idx < 0) { - throw [g_lib.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; + throw [error.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; } var sch_id = query.params.sch_id.substr(0, idx), sch_ver = parseInt(query.params.sch_id.substr(idx + 1)); @@ -424,7 +572,7 @@ function execQuery(client, mode, published, orig_query) { ver: sch_ver, }); if (!query.params.sch) - throw [g_lib.ERR_NOT_FOUND, "Schema '" + sch_id + "-" + sch_ver + "' does not exist."]; + throw [error.ERR_NOT_FOUND, "Schema '" + sch_id + "-" + sch_ver + "' does not exist."]; query.params.sch = query.params.sch._id; delete query.params.sch_id; @@ -503,12 +651,23 @@ function execQuery(client, mode, published, orig_query) { router .get("/exec", function (req, res) { + let client = undefined; + let results = undefined; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/exec", + status: "Started", + description: "Execute specified queries", + }); + var qry = g_db.q.document(req.queryParams.id); if (client._id != qry.owner && !client.is_admin) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } if (req.queryParams.offset != undefined && req.queryParams.count != undefined) { @@ -516,10 +675,29 @@ router qry.params.cnt = req.queryParams.count; } - var results = execQuery(client, qry.query.mode, qry.query.published, qry); + results = execQuery(client, qry.query.mode, qry.query.published, qry); res.send(results); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/exec", + status: "Success", + description: "Execute specified queries", + extra: results, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/exec", + status: "Failure", + description: "Execute specified queries", + extra: results, + error: e, + }); g_lib.handleException(e, res); } }) @@ -532,17 +710,46 @@ router router .post("/exec/direct", function (req, res) { + let results = undefined; + let client = undefined; try { - const client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/exec/direct", + status: "Started", + description: "Execute published data search query", + }); const query = { ...req.body, params: JSON.parse(req.body.params), }; - var results = execQuery(client, req.body.mode, req.body.published, query); + results = execQuery(client, req.body.mode, req.body.published, query); res.send(results); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/exec/direct", + status: "Success", + description: "Execute published data search query", + extra: results, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/exec/direct", + status: "Failure", + description: "Execute published data search query", + extra: results, + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/api/record.js b/core/database/foxx/api/record.js index 36c330672..1c3fbbf97 100644 --- a/core/database/foxx/api/record.js +++ b/core/database/foxx/api/record.js @@ -3,6 +3,7 @@ const g_db = require("@arangodb").db; const g_lib = require("./support"); const { errors } = require("@arangodb"); +const error = require("./lib/error_codes"); /** * Represents a record in the database and provides methods to manage it. @@ -50,12 +51,12 @@ class Record { this.#exists = true; } else { this.#exists = false; - this.#error = g_lib.ERR_NOT_FOUND; + this.#error = error.ERR_NOT_FOUND; this.#err_msg = "Invalid key: (" + a_key + "). No record found."; } } catch (e) { this.#exists = false; - this.#error = g_lib.ERR_INTERNAL_FAULT; + this.#error = error.ERR_INTERNAL_FAULT; this.#err_msg = "Unknown error encountered."; console.log(e); } @@ -77,7 +78,7 @@ class Record { } else if (loc.uid.charAt(0) == "p") { return path + "project/" + loc.uid.substr(2) + "/" + this.#key; } else { - this.#error = g_lib.ERR_INTERNAL_FAULT; + this.#error = error.ERR_INTERNAL_FAULT; this.#err_msg = "Provided path does not fit within supported directory "; this.#err_msg += "structure for repository, no user or project folder has"; this.#err_msg += " been determined for the record."; @@ -98,7 +99,7 @@ class Record { return false; } if (storedPath !== inputPath) { - this.#error = g_lib.ERR_PERM_DENIED; + this.#error = error.ERR_PERM_DENIED; this.#err_msg = "Record path is not consistent with repo expected path is: " + storedPath + @@ -159,7 +160,7 @@ class Record { }); if (!this.#loc) { - this.#error = g_lib.ERR_PERM_DENIED; + this.#error = error.ERR_PERM_DENIED; this.#err_msg = "Permission denied data is not managed by DataFed. This can happen if you try to do a transfer directly from Globus."; return false; @@ -202,7 +203,7 @@ class Record { // If no allocation is found for the item throw an error // if the paths do not align also throw an error. if (!new_alloc) { - this.#error = g_lib.ERR_PERM_DENIED; + this.#error = error.ERR_PERM_DENIED; this.#err_msg = "Permission denied, '" + this.#key + "' is not part of an allocation '"; return false; @@ -211,7 +212,7 @@ class Record { this.#repo = g_db._document(this.#loc.new_repo); if (!this.#repo) { - this.#error = g_lib.ERR_INTERNAL_FAULT; + this.#error = error.ERR_INTERNAL_FAULT; this.#err_msg = "Unable to find repo that record is meant to be allocated too, '" + this.#loc.new_repo + diff --git a/core/database/foxx/api/repo.js b/core/database/foxx/api/repo.js index baea100b9..226179e0b 100644 --- a/core/database/foxx/api/repo.js +++ b/core/database/foxx/api/repo.js @@ -3,6 +3,7 @@ const g_db = require("@arangodb").db; const g_lib = require("./support"); const { errors } = require("@arangodb"); +const error = require("./lib/error_codes"); const pathModule = require("./posix_path"); /** @@ -80,12 +81,12 @@ class Repo { this.#exists = true; } else { this.#exists = false; - this.#error = g_lib.ERR_NOT_FOUND; + this.#error = error.ERR_NOT_FOUND; this.#err_msg = "Invalid repo: (" + a_key + "). No record found."; } } catch (e) { this.#exists = false; - this.#error = g_lib.ERR_INTERNAL_FAULT; + this.#error = error.ERR_INTERNAL_FAULT; this.#err_msg = "Unknown error encountered."; console.log(e); } @@ -135,12 +136,12 @@ class Repo { pathType(a_path) { // Ensure the repo exists if (!this.#exists) { - throw [g_lib.ERR_PERM_DENIED, "Repo does not exist " + this.#repo_id]; + throw [error.ERR_PERM_DENIED, "Repo does not exist " + this.#repo_id]; } let repo = g_db._document(this.#repo_id); if (!repo.path) { - throw [g_lib.ERR_INTERNAL_FAULT, "Repo document is missing path: " + this.#repo_id]; + throw [error.ERR_INTERNAL_FAULT, "Repo document is missing path: " + this.#repo_id]; } // Get and sanitize the repo root path by removing the trailing slash if one exists diff --git a/core/database/foxx/api/repo_router.js b/core/database/foxx/api/repo_router.js index d0c828e8a..6f79c53fb 100644 --- a/core/database/foxx/api/repo_router.js +++ b/core/database/foxx/api/repo_router.js @@ -3,13 +3,47 @@ const createRouter = require("@arangodb/foxx/router"); const router = createRouter(); const joi = require("joi"); - +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); +const { RepositoryType } = require("./models/repositories/types"); +const { Repositories } = require("./models/repositories/repositories"); +const { Result } = require("./lib/result"); const g_db = require("@arangodb").db; const g_lib = require("./support"); const g_tasks = require("./tasks"); module.exports = router; +function validateAndNormalizeRepoPath(obj) { + if (!obj.path || typeof obj.path !== "string") { + throw [error.ERR_INVALID_PARAM, "Repository path must be a valid string."]; + } + + // Must start with a slash + if (!obj.path.startsWith("/")) { + throw [error.ERR_INVALID_PARAM, "Repository path must be an absolute file system path."]; + } + + // Ensure trailing slash + if (!obj.path.endsWith("/")) { + obj.path += "/"; + } + + // Extract last folder name before trailing slash + const idx = obj.path.lastIndexOf("/", obj.path.length - 2); + const lastPart = obj.path.substring(idx + 1, obj.path.length - 1); + + // Ensure last part matches repository key + if (lastPart !== obj.key) { + throw [ + error.ERR_INVALID_PARAM, + `Last part of repository path must match repository ID suffix (${obj.key})`, + ]; + } + + return obj.path; // return the normalized path if needed +} + router .get("/list", function (req, res) { var client; @@ -17,7 +51,7 @@ router client = g_lib.getUserFromClientID(req.queryParams.client); if (req.queryParams.all && !client.is_admin) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } } @@ -106,65 +140,49 @@ router }, action: function () { var client = g_lib.getUserFromClientID(req.queryParams.client); - if (!client.is_admin) throw g_lib.ERR_PERM_DENIED; + if (!client.is_admin) throw error.ERR_PERM_DENIED; var obj = { + key: req.body.id, capacity: req.body.capacity, pub_key: req.body.pub_key, address: req.body.address, endpoint: req.body.endpoint, path: req.body.path, + type: req.body?.type, }; - - g_lib.procInputParam(req.body, "id", false, obj); g_lib.procInputParam(req.body, "title", false, obj); g_lib.procInputParam(req.body, "summary", false, obj); - g_lib.procInputParam(req.body, "domain", false, obj); - if (!obj.path.startsWith("/")) - throw [ - g_lib.ERR_INVALID_PARAM, - "Repository path must be an absolute path file system path.", - ]; - - if (!obj.path.endsWith("/")) obj.path += "/"; + if (req.body?.type == undefined || req.body?.type == RepositoryType.GLOBUS) { + obj["type"] = RepositoryType.GLOBUS; + g_lib.procInputParam(req.body, "domain", false, obj); + validateAndNormalizeRepoPath(obj); - var idx = obj.path.lastIndexOf("/", obj.path.length - 2); - if (obj.path.substr(idx + 1, obj.path.length - idx - 2) != obj._key) - throw [ - g_lib.ERR_INVALID_PARAM, - "Last part of repository path must be repository ID suffix (" + - obj._key + - ")", - ]; - - if (req.body.exp_path) { - obj.exp_path = req.body.exp_path; - if (!obj.exp_path.endsWith("/")) obj.path += "/"; + if (req.body.exp_path) { + obj.exp_path = req.body.exp_path; + if (!obj.exp_path.endsWith("/")) obj.path += "/"; + } } - var repo = g_db.repo.save(obj, { - returnNew: true, - }); + const repo = Repositories.createRepositoryByType(obj).raiseIfError(); + const repo_doc = repo.save().raiseIfError(); - for (var i in req.body.admins) { - if (!g_db._exists(req.body.admins[i])) - throw [ - g_lib.ERR_NOT_FOUND, - "User, " + req.body.admins[i] + ", not found", - ]; + for (const adminId of req.body.admins) { + if (!g_db._exists(adminId)) + throw [error.ERR_NOT_FOUND, "User, " + adminId + ", not found"]; g_db.admin.save({ - _from: repo._id, - _to: req.body.admins[i], + _from: repo.id(), + _to: adminId, }); } - repo.new.id = repo.new._id; - delete repo.new._id; - delete repo.new._key; - delete repo.new._rev; - res.send([repo.new]); + repo_doc.id = repo_doc._id; + delete repo_doc._id; + delete repo_doc._key; + delete repo_doc._rev; + res.send([repo_doc]); }, }); } catch (e) { @@ -180,12 +198,13 @@ router desc: joi.string().optional(), domain: joi.string().optional(), capacity: joi.number().integer().min(0).required(), - pub_key: joi.string().required(), - address: joi.string().required(), - endpoint: joi.string().required(), - path: joi.string().required(), + pub_key: joi.string().optional(), + address: joi.string().optional(), + endpoint: joi.string().optional(), + path: joi.string().optional(), exp_path: joi.string().optional(), admins: joi.array().items(joi.string()).required(), + type: joi.string().valid(RepositoryType.GLOBUS, RepositoryType.METADATA).optional(), }) .required(), "Repo fields", @@ -204,7 +223,7 @@ router }, action: function () { var client = g_lib.getUserFromClientID(req.queryParams.client); - g_lib.ensureAdminPermRepo(client, req.body.id); + permissions.ensureAdminPermRepo(client, req.body.id); var obj = {}; g_lib.procInputParam(req.body, "title", true, obj); @@ -214,7 +233,7 @@ router if (req.body.path) { if (!req.body.path.startsWith("/")) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Repository path must be an absolute path file system path.", ]; @@ -226,7 +245,7 @@ router var key = req.body.id.substr(5); if (obj.path.substr(idx + 1, obj.path.length - idx - 2) != key) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Last part of repository path must be repository ID suffix (" + key + ")", @@ -257,7 +276,7 @@ router for (var i in req.body.admins) { if (!g_db._exists(req.body.admins[i])) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "User, " + req.body.admins[i] + ", not found", ]; g_db.admin.save({ @@ -324,7 +343,7 @@ router .toArray(); if (items_connected_to_repo.length > 0) { throw [ - g_lib.ERR_IN_USE, + error.ERR_IN_USE, "Cannot delete repo. The repository is in use: " + items_connected_to_repo.join(", "), ]; @@ -333,9 +352,9 @@ router var client = g_lib.getUserFromClientID(req.queryParams.client); if (!g_db._exists(req.queryParams.id)) - throw [g_lib.ERR_NOT_FOUND, "Repo, " + req.queryParams.id + ", not found"]; + throw [error.ERR_NOT_FOUND, "Repo, " + req.queryParams.id + ", not found"]; - g_lib.ensureAdminPermRepo(client, req.queryParams.id); + permissions.ensureAdminPermRepo(client, req.queryParams.id); const graph = require("@arangodb/general-graph")._graph("sdmsg"); // Make sure there are no allocations present on repo @@ -345,7 +364,7 @@ router console.log(alloc); if (alloc.hasNext()) throw [ - g_lib.ERR_IN_USE, + error.ERR_IN_USE, "Cannot delete repo with associated allocations. Allocations still exist on the repository.", ]; // Remove the repo vertex from the graph and all edges, this includes all @@ -438,7 +457,7 @@ function calcSize(a_item, a_recurse, a_depth, a_visited, a_result) { calcSize(items.next(), a_recurse, a_depth + 1, a_visited, a_result); } } - } else throw [g_lib.ERR_INVALID_PARAM, "Invalid item type for size calculation: " + a_item]; + } else throw [error.ERR_INVALID_PARAM, "Invalid item type for size calculation: " + a_item]; } router @@ -446,7 +465,7 @@ router var client = g_lib.getUserFromClientID(req.queryParams.client); var repo = g_db.repo.document(req.queryParams.repo); - g_lib.ensureAdminPermRepo(client, repo._id); + permissions.ensureAdminPermRepo(client, repo._id); var result = g_db ._query( @@ -546,7 +565,7 @@ router owner_id != client._id && g_lib.getProjectRole(client._id, owner_id) == g_lib.PROJ_NO_ROLE ) { - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; } } else { owner_id = client._id; @@ -593,7 +612,7 @@ function getAllocStats(a_repo, a_subject) { }); if (!alloc) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Subject " + a_subject + " has no allocation on repo " + a_repo, ]; @@ -642,7 +661,7 @@ router .get("/alloc/stats", function (req, res) { try { var client = g_lib.getUserFromClientID(req.queryParams.client); - g_lib.ensureAdminPermRepo(client, req.queryParams.repo); + permissions.ensureAdminPermRepo(client, req.queryParams.repo); var result = getAllocStats(req.queryParams.repo, req.queryParams.subject); res.send(result); } catch (e) { @@ -760,16 +779,16 @@ router if (!g_db._exists(req.queryParams.repo)) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Repo, '" + req.queryParams.repo + "', does not exist", ]; if (!g_db._exists(subject_id)) - throw [g_lib.ERR_NOT_FOUND, "Subject, " + subject_id + ", not found"]; + throw [error.ERR_NOT_FOUND, "Subject, " + subject_id + ", not found"]; var repo = g_db.repo.document(req.queryParams.repo); - g_lib.ensureAdminPermRepo(client, repo._id); + permissions.ensureAdminPermRepo(client, repo._id); var alloc = g_db.alloc.firstExample({ _from: subject_id, @@ -777,7 +796,7 @@ router }); if (!alloc) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Subject, '" + subject_id + "', has no allocation on " + repo._id, ]; @@ -823,14 +842,14 @@ router if (req.queryParams.subject.startsWith("p/")) { if (!g_db._exists(subject_id)) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Project, " + req.queryParams.subject + ", not found", ]; var role = g_lib.getProjectRole(client._id, req.queryParams.subject); if (role != g_lib.PROJ_MANAGER && role != g_lib.PROJ_ADMIN) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Setting default allocation on project requires admin/manager rights.", ]; @@ -840,7 +859,7 @@ router if (subject_id != client._id && !client.is_admin) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Setting default allocation on user requires admin rights.", ]; } @@ -848,7 +867,7 @@ router if (!g_db._exists(req.queryParams.repo)) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Repo, '" + req.queryParams.repo + "', does not exist", ]; diff --git a/core/database/foxx/api/schema_router.js b/core/database/foxx/api/schema_router.js index 62d058fc7..51734b678 100644 --- a/core/database/foxx/api/schema_router.js +++ b/core/database/foxx/api/schema_router.js @@ -4,6 +4,7 @@ const createRouter = require("@arangodb/foxx/router"); const router = createRouter(); const joi = require("joi"); +const error = require("./lib/error_codes"); const g_db = require("@arangodb").db; const g_lib = require("./support"); const g_graph = require("@arangodb/general-graph")._graph("sdmsg"); @@ -101,11 +102,11 @@ router if (req.body.sys) { if (!client.is_admin) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Creating a system schema requires admin privileges.", ]; if (!req.body.pub) - throw [g_lib.ERR_INVALID_PARAM, "System schemas cannot be private."]; + throw [error.ERR_INVALID_PARAM, "System schemas cannot be private."]; } else { obj.own_id = client._id; obj.own_nm = client.name; @@ -161,7 +162,7 @@ router const client = g_lib.getUserFromClientID(req.queryParams.client); var idx = req.queryParams.id.indexOf(":"); if (idx < 0) { - throw [g_lib.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; + throw [error.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; } var sch_id = req.queryParams.id.substr(0, idx), sch_ver = parseInt(req.queryParams.id.substr(idx + 1)), @@ -172,7 +173,7 @@ router if (!sch_old) { throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Schema '" + req.queryParams.id + "' not found.", ]; } @@ -180,7 +181,7 @@ router // Cannot modify schemas that are in use if (sch_old.cnt) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Schema is associated with data records - cannot update.", ]; } @@ -192,13 +193,13 @@ router }) ) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Schema is referenced by another schema - cannot update.", ]; } if (sch_old.own_id != client._id && !client.is_admin) - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; var obj = {}; @@ -209,12 +210,12 @@ router if (req.body.sys) { if (!client.is_admin) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Changing to a system schema requires admin privileges.", ]; if (!sch_old.pub && !req.body.pub) - throw [g_lib.ERR_INVALID_PARAM, "System schemas cannot be private."]; + throw [error.ERR_INVALID_PARAM, "System schemas cannot be private."]; obj.own_id = null; obj.own_nm = null; @@ -230,7 +231,7 @@ router })) ) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Cannot change schema ID once revisions exist.", ]; } @@ -292,7 +293,7 @@ router const client = g_lib.getUserFromClientID(req.queryParams.client); var idx = req.queryParams.id.indexOf(":"); if (idx < 0) { - throw [g_lib.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; + throw [error.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; } var sch_id = req.queryParams.id.substr(0, idx), sch_ver = parseInt(req.queryParams.id.substr(idx + 1)), @@ -303,11 +304,11 @@ router if (!sch) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Schema '" + req.queryParams.id + "' not found.", ]; - if (sch.own_id != client._id && !client.is_admin) throw g_lib.ERR_PERM_DENIED; + if (sch.own_id != client._id && !client.is_admin) throw error.ERR_PERM_DENIED; if ( g_db.sch_ver.firstExample({ @@ -315,13 +316,13 @@ router }) ) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "A revision of schema '" + req.queryParams.id + "' already exists.", ]; if (!sch.own_id && !client.is_admin) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Revising a system schema requires admin privileges.", ]; @@ -340,7 +341,7 @@ router if (req.body.sys) { if (!client.is_admin) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Creating a system schema requires admin privileges.", ]; @@ -349,7 +350,7 @@ router } if (!sch.pub && !sch.own_id) - throw [g_lib.ERR_INVALID_PARAM, "System schemas cannot be private."]; + throw [error.ERR_INVALID_PARAM, "System schemas cannot be private."]; g_lib.procInputParam(req.body, "desc", true, sch); @@ -409,7 +410,7 @@ router const client = g_lib.getUserFromClientID(req.queryParams.client); var idx = req.queryParams.id.indexOf(":"); if (idx < 0) { - throw [g_lib.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; + throw [error.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; } var sch_id = req.queryParams.id.substr(0, idx), sch_ver = parseInt(req.queryParams.id.substr(idx + 1)), @@ -419,14 +420,14 @@ router }); if (!sch_old) - throw [g_lib.ERR_NOT_FOUND, "Schema '" + req.queryParams.id + "' not found."]; + throw [error.ERR_NOT_FOUND, "Schema '" + req.queryParams.id + "' not found."]; - if (sch_old.own_id != client._id && !client.is_admin) throw g_lib.ERR_PERM_DENIED; + if (sch_old.own_id != client._id && !client.is_admin) throw error.ERR_PERM_DENIED; // Cannot delete schemas that are in use if (sch_old.cnt) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Schema is associated with data records - cannot update.", ]; } @@ -438,7 +439,7 @@ router }) ) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Schema is referenced by another schema - cannot update.", ]; } @@ -452,7 +453,7 @@ router _to: sch_old._id, }) ) { - throw [g_lib.ERR_PERM_DENIED, "Cannot delete intermediate schema revisions."]; + throw [error.ERR_PERM_DENIED, "Cannot delete intermediate schema revisions."]; } g_graph.sch.remove(sch_old._id); @@ -471,7 +472,7 @@ router const client = g_lib.getUserFromClientID(req.queryParams.client); var idx = req.queryParams.id.indexOf(":"); if (idx < 0) { - throw [g_lib.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; + throw [error.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; } var sch_id = req.queryParams.id.substr(0, idx), sch_ver = parseInt(req.queryParams.id.substr(idx + 1)), @@ -480,10 +481,10 @@ router ver: sch_ver, }); - if (!sch) throw [g_lib.ERR_NOT_FOUND, "Schema '" + req.queryParams.id + "' not found."]; + if (!sch) throw [error.ERR_NOT_FOUND, "Schema '" + req.queryParams.id + "' not found."]; if (!(sch.pub || sch.own_id == client._id || client.is_admin)) - throw g_lib.ERR_PERM_DENIED; + throw error.ERR_PERM_DENIED; if (req.queryParams.resolve) { var refs = {}; @@ -660,7 +661,7 @@ function validateKey(val) { } if (i == len) { - throw [g_lib.ERR_INVALID_CHAR, "Malformed property '" + val + "'."]; + throw [error.ERR_INVALID_CHAR, "Malformed property '" + val + "'."]; } code = val.charCodeAt(i); @@ -673,7 +674,7 @@ function validateKey(val) { // upper alpha (A-Z) i++; } else { - throw [g_lib.ERR_INVALID_CHAR, "Malformed property '" + val + "'."]; + throw [error.ERR_INVALID_CHAR, "Malformed property '" + val + "'."]; } // Check remaining chars @@ -687,7 +688,7 @@ function validateKey(val) { code != 95 ) { // _ - throw [g_lib.ERR_INVALID_CHAR, "Illegal character(s) in property '" + val + "'."]; + throw [error.ERR_INVALID_CHAR, "Illegal character(s) in property '" + val + "'."]; } } } @@ -734,7 +735,7 @@ function updateSchemaRefs(a_sch) { if (idx < 0) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Invalid reference ID '" + v + "' in schema (expected id:ver).", ]; @@ -748,9 +749,9 @@ function updateSchemaRefs(a_sch) { ver: ver, }); - if (!r) throw [g_lib.ERR_INVALID_PARAM, "Referenced schema '" + v + "' does not exist."]; + if (!r) throw [error.ERR_INVALID_PARAM, "Referenced schema '" + v + "' does not exist."]; - if (r._id == a_sch._id) throw [g_lib.ERR_INVALID_PARAM, "Schema references self."]; + if (r._id == a_sch._id) throw [error.ERR_INVALID_PARAM, "Schema references self."]; g_graph.sch_dep.save({ _from: a_sch._id, @@ -769,7 +770,7 @@ function gatherRefs(a_doc, a_refs) { gatherRefs(v, a_refs); } else if (k == "$ref") { if (typeof v !== "string") - throw [g_lib.ERR_INVALID_PARAM, "Invalid reference type in schema."]; + throw [error.ERR_INVALID_PARAM, "Invalid reference type in schema."]; // Add dependencies to external schemas, only once i = v.indexOf("#"); diff --git a/core/database/foxx/api/schemas/repo.js b/core/database/foxx/api/schemas/repo.js new file mode 100644 index 000000000..71ae2c387 --- /dev/null +++ b/core/database/foxx/api/schemas/repo.js @@ -0,0 +1,24 @@ +"use strict"; + +const Joi = require("joi"); + +const repositoryGlobusSchema = Joi.object({ + repo_id: Joi.string().required(), + type: Joi.string().valid(RepositoryType.GLOBUS).required(), + title: Joi.string().min(1).required(), + desc: Joi.string().required().allow(""), + capacity: Joi.number().required(), + endpoint: Joi.string().uri().required().allow(""), + path: Joi.string().required().allow(""), + pub_key: Joi.string().required().allow(""), + address: Joi.string().required(), + exp_path: Joi.string().required().allow(""), +}); + +const repositoryMetadataSchema = Joi.object({ + repo_id: Joi.string().required(), + type: Joi.string().valid(RepositoryType.METADATA).required(), + title: Joi.string().min(1).required(), + desc: Joi.string().required().allow(""), + capacity: Joi.number().required(), +}); diff --git a/core/database/foxx/api/schemas/response_envelope.js b/core/database/foxx/api/schemas/response_envelope.js new file mode 100644 index 000000000..4bc939618 --- /dev/null +++ b/core/database/foxx/api/schemas/response_envelope.js @@ -0,0 +1,31 @@ +"use strict"; + +const { TaskSchema } = require("./task"); + +const Joi = require("joi"); + +const SuccessResponseDirectSchema = Joi.object({ + execution_method: Joi.string().valid("deferred", "direct").required(), + result: Joi.any(), +}).required(); + +// +// This function allows keeping the envelope while overwriting the +// result with an arbitrary schema +// +function makeResponseDirectSchema(resultSchema) { + return SuccessResponseDirectSchema.keys({ + result: resultSchema, + }); +} + +const SuccessResponseDeferredSchema = Joi.object({ + execution_method: Joi.string().valid("deferred", "direct").required(), + task: TaskSchema, +}).required(); + +module.exports = { + SuccessResponseDeferredSchema, + SuccessResponseDirectSchema, + makeResponseDirectSchema, +}; diff --git a/core/database/foxx/api/schemas/task.js b/core/database/foxx/api/schemas/task.js new file mode 100644 index 000000000..7ed491280 --- /dev/null +++ b/core/database/foxx/api/schemas/task.js @@ -0,0 +1,27 @@ +"use strict"; + +const TaskStateSchema = Joi.object({ + repo_id: Joi.string().required(), + subject: Joi.string().required(), + data_limit: Joi.number().integer().required(), + rec_limit: Joi.number().integer().required(), + repo_path: Joi.string().required(), +}).required(); + +const TaskSchema = Joi.object({ + task_id: Joi.string().required(), + type: Joi.number().integer().required(), + status: Joi.number().integer().required(), + msg: Joi.string().required(), + ct: Joi.number().integer().required(), + ut: Joi.number().integer().required(), + client: Joi.string().required(), + step: Joi.number().integer().required(), + steps: Joi.number().integer().required(), + state: TaskStateSchema, +}).required(); + +module.exports = { + TaskSchema, + TaskStateSchema, +}; diff --git a/core/database/foxx/api/support.js b/core/database/foxx/api/support.js index 6133ed7c7..8f7ec612e 100644 --- a/core/database/foxx/api/support.js +++ b/core/database/foxx/api/support.js @@ -1,6 +1,8 @@ "use strict"; const joi = require("joi"); +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); module.exports = (function () { var obj = {}; @@ -8,30 +10,6 @@ module.exports = (function () { obj.db = require("@arangodb").db; obj.graph = require("@arangodb/general-graph")._graph("sdmsg"); - obj.PERM_RD_REC = 0x0001; // Read record info (description, keywords, details) - obj.PERM_RD_META = 0x0002; // Read structured metadata - obj.PERM_RD_DATA = 0x0004; // Read raw data - obj.PERM_WR_REC = 0x0008; // Write record info (description, keywords, details) - obj.PERM_WR_META = 0x0010; // Write structured metadata - obj.PERM_WR_DATA = 0x0020; // Write raw data - obj.PERM_LIST = 0x0040; // Find record and view ID, alias, title, and owner - obj.PERM_LINK = 0x0080; // Link/unlink child records (collections only) - obj.PERM_CREATE = 0x0100; // Create new child records (collections only) - obj.PERM_DELETE = 0x0200; // Delete record - obj.PERM_SHARE = 0x0400; // View/set ACLs - obj.PERM_LOCK = 0x0800; // Lock record - obj.PERM_LABEL = 0x1000; // Label record - obj.PERM_TAG = 0x2000; // Tag record - obj.PERM_ANNOTATE = 0x4000; // Annotate record - - obj.PERM_NONE = 0x0000; - obj.PERM_RD_ALL = 0x0007; // Read all - obj.PERM_WR_ALL = 0x0038; // Write all - obj.PERM_ALL = 0x7fff; - obj.PERM_MEMBER = 0x0047; // Project record perms - obj.PERM_MANAGER = 0x0407; // Project record perms - obj.PERM_PUBLIC = 0x0047; - obj.MAX_COLL_ITEMS = 10000; obj.MAX_QRY_ITEMS = 10000; obj.MAX_PAGE_SIZE = 1000; @@ -131,40 +109,6 @@ module.exports = (function () { inhgrant: joi.number().optional(), }); - obj.ERR_INFO = []; - obj.ERR_COUNT = 0; - - obj.ERR_AUTHN_FAILED = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Authentication Failed"]); - obj.ERR_PERM_DENIED = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Permission Denied"]); - obj.ERR_INVALID_PARAM = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Invalid Parameter"]); - obj.ERR_INPUT_TOO_LONG = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Input value too long"]); - obj.ERR_INVALID_CHAR = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Invalid character"]); - obj.ERR_NOT_FOUND = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Record Not Found"]); - obj.ERR_IN_USE = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Value In Use"]); - obj.ERR_LINK = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Collection Link Error"]); - obj.ERR_UNLINK = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Collection Unlink Error"]); - obj.ERR_MISSING_REQ_PARAM = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Missing one or more required parameters"]); - obj.ERR_NO_RAW_DATA = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Record has no raw data"]); - obj.ERR_XFR_CONFLICT = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Data transfer conflict"]); - obj.ERR_INTERNAL_FAULT = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Internal server fault"]); - obj.ERR_NO_ALLOCATION = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "No allocation available"]); - obj.ERR_ALLOCATION_EXCEEDED = obj.ERR_COUNT++; - obj.ERR_INFO.push([400, "Storage allocation exceeded"]); - obj.CHARSET_ID = 0; obj.CHARSET_ALIAS = 1; obj.CHARSET_TOPIC = 2; @@ -320,7 +264,7 @@ module.exports = (function () { if (!spec) { throw [ - obj.ERR_INTERNAL_FAULT, + error.ERR_INTERNAL_FAULT, "Input specification for '" + a_field + "' not found. Please contact system administrator.", @@ -345,7 +289,7 @@ module.exports = (function () { // Check length if specified if (spec.max_len && val.length > spec.max_len) throw [ - obj.ERR_INPUT_TOO_LONG, + error.ERR_INPUT_TOO_LONG, "'" + spec.label + "' field is too long. Maximum length is " + @@ -369,8 +313,13 @@ module.exports = (function () { // lower alpha (a-z) if (extra.indexOf(val.charAt(i)) == -1) throw [ - obj.ERR_INVALID_CHAR, - "Invalid character(s) in '" + spec.label + "' field.", + error.ERR_INVALID_CHAR, + "Invalid character(s) in '" + + spec.label + + "' field '" + + val + + "' detected char '" + + val.charAt(i), ]; } } @@ -385,7 +334,7 @@ module.exports = (function () { if (val === "") { if (spec.required) throw [ - obj.ERR_MISSING_REQ_PARAM, + error.ERR_MISSING_REQ_PARAM, "Required field '" + spec.label + "' cannot be deleted.", ]; @@ -393,7 +342,7 @@ module.exports = (function () { else a_out[a_field] = null; } } else if (spec.required) - throw [obj.ERR_MISSING_REQ_PARAM, "Missing required field '" + spec.label + "'."]; + throw [error.ERR_MISSING_REQ_PARAM, "Missing required field '" + spec.label + "'."]; } }; @@ -404,7 +353,7 @@ module.exports = (function () { obj.validatePassword = function (pw) { if (pw.length < obj.PASSWORD_MIN_LEN) { throw [ - obj.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "ERROR: password must be at least " + obj.PASSWORD_MIN_LEN + " characters in length.", @@ -428,7 +377,7 @@ module.exports = (function () { if (j != 3) { throw [ - obj.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "ERROR: password must contain at least one number (0-9) and one special character (" + obj.pw_chars + ").", @@ -454,10 +403,10 @@ module.exports = (function () { obj.handleException = function (e, res) { console.log("Service exception:", e); - if (obj.isInteger(e) && e >= 0 && e < obj.ERR_COUNT) { - res.throw(obj.ERR_INFO[e][0], obj.ERR_INFO[e][1]); + if (obj.isInteger(e) && e >= 0 && e < error.ERR_COUNT) { + res.throw(error.ERR_INFO[e][0], error.ERR_INFO[e][1]); } else if (Array.isArray(e)) { - res.throw(obj.ERR_INFO[e[0]][0], e[1]); + res.throw(error.ERR_INFO[e[0]][0], e[1]); //} else if ( e.hasOwnProperty( "errorNum" )) { } else if (Object.prototype.hasOwnProperty.call(e, "errorNum")) { switch (e.errorNum) { @@ -588,7 +537,7 @@ module.exports = (function () { .toArray(); if (result.length !== 1) { - throw [obj.ERR_NOT_FOUND, "No user matching Globus IDs found"]; + throw [error.ERR_NOT_FOUND, "No user matching Globus IDs found"]; } var first_uuid = result[0]._id; @@ -596,7 +545,7 @@ module.exports = (function () { for (var i = 1; i < result.length; i++) { if (first_uuid != result[i]._id) { throw [ - obj.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "uuid_list does not resolve to a single user, unable to unambiguously resolve user, it is possible that you have multiple accounts when you should have only a single one problematic ids are: " + first_uuid + " and " + @@ -623,7 +572,7 @@ module.exports = (function () { }) .toArray(); if (result.length != 1) { - throw [obj.ERR_NOT_FOUND, "No user matching Globus IDs found"]; + throw [error.ERR_NOT_FOUND, "No user matching Globus IDs found"]; } var first_uuid = result[0]._id; @@ -679,7 +628,7 @@ module.exports = (function () { if (a_client_id.startsWith("u/")) { if (!obj.db.u.exists(a_client_id)) { - throw [obj.ERR_INVALID_PARAM, "No such user '" + a_client_id + "'"]; + throw [error.ERR_INVALID_PARAM, "No such user '" + a_client_id + "'"]; } return obj.db._document({ @@ -709,7 +658,7 @@ module.exports = (function () { }); } else { if (!obj.db.u.exists("u/" + a_client_id)) { - throw [obj.ERR_INVALID_PARAM, "No such user 'u/" + a_client_id + "'"]; + throw [error.ERR_INVALID_PARAM, "No such user 'u/" + a_client_id + "'"]; } return obj.db._document({ _id: "u/" + a_client_id, @@ -724,7 +673,7 @@ module.exports = (function () { if (result.length != 1) { //console.log("Client", a_client_id, "not found, params:", params ); - throw [obj.ERR_NOT_FOUND, "Account/Identity '" + a_client_id + "' not found"]; + throw [error.ERR_NOT_FOUND, "Account/Identity '" + a_client_id + "' not found"]; } return result[0]; @@ -795,10 +744,10 @@ module.exports = (function () { .toArray(); if (result.length === 0) { - throw [obj.ERR_NOT_FOUND, "No user matching Globus IDs found"]; + throw [error.ERR_NOT_FOUND, "No user matching Globus IDs found"]; } else if (result.length > 1) { throw [ - obj.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Multiple DataFed accounts associated with the provided Globus identities" + result.toString(), ]; @@ -815,7 +764,7 @@ module.exports = (function () { }) .toArray(); if (result.length !== 1) - throw [obj.ERR_NOT_FOUND, "No user matching authentication key found"]; + throw [error.ERR_NOT_FOUND, "No user matching authentication key found"]; return result[0]; }; @@ -832,7 +781,7 @@ module.exports = (function () { //console.log( "key res:", result ); if (result.length != 1) - throw [obj.ERR_NOT_FOUND, "No user matching authentication key found"]; + throw [error.ERR_NOT_FOUND, "No user matching authentication key found"]; return result[0]; }; @@ -922,17 +871,17 @@ module.exports = (function () { _from: a_user_id, _to: a_repo_id, }); - if (!alloc) throw [obj.ERR_NO_ALLOCATION, "No allocation on repo " + a_repo_id]; + if (!alloc) throw [error.ERR_NO_ALLOCATION, "No allocation on repo " + a_repo_id]; if (alloc.data_size >= alloc.data_limit) throw [ - obj.ERR_ALLOCATION_EXCEEDED, + error.ERR_ALLOCATION_EXCEEDED, "Allocation data size exceeded (max: " + alloc.data_limit + ")", ]; if (alloc.rec_count >= alloc.rec_limit) throw [ - obj.ERR_ALLOCATION_EXCEEDED, + error.ERR_ALLOCATION_EXCEEDED, "Allocation record count exceeded (max: " + alloc.rec_limit + ")", ]; @@ -968,7 +917,7 @@ module.exports = (function () { var id = obj.resolveID(a_obj_id, a_client); if (!obj.db._exists(id)) - throw [obj.ERR_INVALID_PARAM, "Record '" + id + "' does not exist."]; + throw [error.ERR_INVALID_PARAM, "Record '" + id + "' does not exist."]; var doc = obj.db._document(id); @@ -983,178 +932,6 @@ module.exports = (function () { .count(); }; - obj.hasAdminPermUser = function (a_client, a_user_id) { - //if ( a_client._id != a_user_id && !a_client.is_admin && !obj.db.owner.firstExample({ _from: a_user_id, _to: a_client._id }) && !obj.db.admin.firstExample({ _from: a_user_id, _to: a_client._id })){ - if (a_client._id != a_user_id && !a_client.is_admin) { - return false; - } else { - return true; - } - }; - - obj.hasAdminPermProj = function (a_client, a_proj_id) { - if ( - !a_client.is_admin && - !obj.db.owner.firstExample({ - _from: a_proj_id, - _to: a_client._id, - }) - ) { - return false; - } else { - return true; - } - }; - - obj.hasManagerPermProj = function (a_client, a_proj_id) { - if ( - !a_client.is_admin && - !obj.db.owner.firstExample({ - _from: a_proj_id, - _to: a_client._id, - }) && - !obj.db.admin.firstExample({ - _from: a_proj_id, - _to: a_client._id, - }) - ) { - return false; - } else { - return true; - } - }; - - obj.hasAdminPermObjectLoaded = function (a_client, a_object) { - // TODO Should collection creator have admin rights? - if (a_object.owner == a_client._id || a_object.creator == a_client._id || a_client.is_admin) - return true; - - if (a_object.owner.charAt(0) == "p") { - if ( - obj.db.owner.firstExample({ - _from: a_object.owner, - _to: a_client._id, - }) - ) - return true; - - if ( - obj.db.admin.firstExample({ - _from: a_object.owner, - _to: a_client._id, - }) - ) - return true; - } - - return false; - }; - - /** - * checks to make sure the client has admin permissions on an object - * - * @param {object} a_client - this is a user document i.e. - * - * - * "_key" : "bob", - * "_id" : "u/bob", - * "name" : "bob junior ", - * "name_first" : "bob", - * "name_last" : "jones", - * "is_admin" : true, - * "max_coll" : 50, - * "max_proj" : 10, - * "max_sav_qry" : 20, - * : - * "email" : "bobjones@gmail.com" - * - * @param {string} a_object_id - the identity of a record or collection or project - * - * "d/fdakjfla" - * "p/big_thing" - * "c/my_collection" - * - * @returns {boolean} - if client has admin rights on the object. - **/ - obj.hasAdminPermObject = function (a_client, a_object_id) { - if (a_client.is_admin) return true; - - var first_owner = obj.db.owner.firstExample({ - _from: a_object_id, - }); - if (first_owner !== null) { - var owner_id = first_owner._to; // obj.db.owner.firstExample({ _from: a_object_id })._to; - } else { - throw [obj.ERR_NOT_FOUND, "Data record for owner not found " + a_object_id + "."]; - } - if (owner_id == a_client._id) return true; - - if (owner_id[0] == "p") { - // Object owned by a project - if ( - obj.db.admin.firstExample({ - _from: owner_id, - _to: a_client._id, - }) - ) - return true; - - if ( - obj.db.owner.firstExample({ - _from: owner_id, - _to: a_client._id, - }) - ) - return true; - } - - if (a_object_id[0] == "d") { - var data = obj.db._query("for i in d filter i._id == @id return i.creator", { - id: a_object_id, - }); - if (!data.hasNext()) { - throw [obj.ERR_NOT_FOUND, "Data record " + a_object_id + " not found."]; - } - data = data.next(); - if (a_client._id == data) return true; - } - return false; - }; - - obj.hasAdminPermRepo = function (a_client, a_repo_id) { - if ( - !a_client.is_admin && - !obj.db.admin.firstExample({ - _from: a_repo_id, - _to: a_client._id, - }) - ) { - return false; - } else { - return true; - } - }; - - obj.ensureAdminPermUser = function (a_client, a_user_id) { - if (!obj.hasAdminPermUser(a_client, a_user_id)) throw obj.ERR_PERM_DENIED; - }; - - obj.ensureAdminPermProj = function (a_client, a_user_id) { - if (!obj.hasAdminPermProj(a_client, a_user_id)) throw obj.ERR_PERM_DENIED; - }; - - obj.ensureManagerPermProj = function (a_client, a_user_id) { - if (!obj.hasManagerPermProj(a_client, a_user_id)) throw obj.ERR_PERM_DENIED; - }; - - obj.ensureAdminPermObject = function (a_client, a_object_id) { - if (!obj.hasAdminPermObject(a_client, a_object_id)) throw obj.ERR_PERM_DENIED; - }; - - obj.ensureAdminPermRepo = function (a_client, a_repo_id) { - if (!obj.hasAdminPermRepo(a_client, a_repo_id)) throw obj.ERR_PERM_DENIED; - }; - obj.isSrcParentOfDest = function (a_src_id, a_dest_id) { var parent; var child_id = a_dest_id; @@ -1175,7 +952,7 @@ module.exports = (function () { if (i != -1) { if (!a_id.startsWith("d/") && !a_id.startsWith("c/") && !a_id.startsWith("p/")) - throw [obj.ERR_INVALID_PARAM, "Invalid ID '" + a_id + "'"]; + throw [error.ERR_INVALID_PARAM, "Invalid ID '" + a_id + "'"]; id = a_id; } else { var alias_id = "a/"; @@ -1185,13 +962,13 @@ module.exports = (function () { var alias = obj.db.alias.firstExample({ _to: alias_id, }); - if (!alias) throw [obj.ERR_NOT_FOUND, "Alias '" + a_id + "' does not exist"]; + if (!alias) throw [error.ERR_NOT_FOUND, "Alias '" + a_id + "' does not exist"]; id = alias._from; } if (!obj.db._exists(id)) { - throw [obj.ERR_INVALID_PARAM, "Record '" + id + "' does not exist."]; + throw [error.ERR_INVALID_PARAM, "Record '" + id + "' does not exist."]; } return id; @@ -1204,7 +981,7 @@ module.exports = (function () { if (i != -1) { if (!a_id.startsWith("d/")) - throw [obj.ERR_INVALID_PARAM, "Invalid data record ID '" + a_id + "'"]; + throw [error.ERR_INVALID_PARAM, "Invalid data record ID '" + a_id + "'"]; id = a_id; } else { var alias_id = "a/"; @@ -1214,19 +991,19 @@ module.exports = (function () { alias = obj.db.alias.firstExample({ _to: alias_id, }); - if (!alias) throw [obj.ERR_NOT_FOUND, "Alias '" + a_id + "' does not exist"]; + if (!alias) throw [error.ERR_NOT_FOUND, "Alias '" + a_id + "' does not exist"]; id = alias._from; if (!id.startsWith("d/")) throw [ - obj.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Alias '" + a_id + "' does not identify a data record", ]; } if (!obj.db.d.exists(id)) { - throw [obj.ERR_INVALID_PARAM, "Data record '" + id + "' does not exist."]; + throw [error.ERR_INVALID_PARAM, "Data record '" + id + "' does not exist."]; } return id; @@ -1238,7 +1015,7 @@ module.exports = (function () { if (i != -1) { if (!a_id.startsWith("c/")) - throw [obj.ERR_INVALID_PARAM, "Invalid collection ID '" + a_id + "'"]; + throw [error.ERR_INVALID_PARAM, "Invalid collection ID '" + a_id + "'"]; id = a_id; } else { var alias_id = "a/"; @@ -1248,19 +1025,19 @@ module.exports = (function () { var alias = obj.db.alias.firstExample({ _to: alias_id, }); - if (!alias) throw [obj.ERR_NOT_FOUND, "Alias '" + a_id + "' does not exist"]; + if (!alias) throw [error.ERR_NOT_FOUND, "Alias '" + a_id + "' does not exist"]; id = alias._from; if (!id.startsWith("c/")) throw [ - obj.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Alias '" + a_id + "' does not identify a collection", ]; } if (!obj.db.c.exists(id)) { - throw [obj.ERR_INVALID_PARAM, "Collection '" + id + "' does not exist."]; + throw [error.ERR_INVALID_PARAM, "Collection '" + id + "' does not exist."]; } return id; @@ -1272,7 +1049,7 @@ module.exports = (function () { if (i != -1) { if (!a_id.startsWith("c/")) - throw [obj.ERR_INVALID_PARAM, "Invalid collection ID '" + a_id + "'"]; + throw [error.ERR_INVALID_PARAM, "Invalid collection ID '" + a_id + "'"]; id = a_id; } else { var alias_id = "a/"; @@ -1283,19 +1060,19 @@ module.exports = (function () { var alias = obj.db.alias.firstExample({ _to: alias_id, }); - if (!alias) throw [obj.ERR_NOT_FOUND, "Alias '" + alias_id + "' does not exist"]; + if (!alias) throw [error.ERR_NOT_FOUND, "Alias '" + alias_id + "' does not exist"]; id = alias._from; if (!id.startsWith("c/")) throw [ - obj.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Alias '" + alias_id + "' does not identify a collection", ]; } if (!obj.db.c.exists(id)) { - throw [obj.ERR_INVALID_PARAM, "Collection '" + id + "' does not exist."]; + throw [error.ERR_INVALID_PARAM, "Collection '" + id + "' does not exist."]; } return id; @@ -1307,7 +1084,7 @@ module.exports = (function () { if (i != -1) { if (!a_id.startsWith("d/") && !a_id.startsWith("c/")) - throw [obj.ERR_INVALID_PARAM, "Invalid ID '" + a_id + "'"]; + throw [error.ERR_INVALID_PARAM, "Invalid ID '" + a_id + "'"]; id = a_id; } else { var alias_id = "a/"; @@ -1317,14 +1094,14 @@ module.exports = (function () { var alias = obj.db.alias.firstExample({ _to: alias_id, }); - if (!alias) throw [obj.ERR_NOT_FOUND, "Alias '" + a_id + "' does not exist"]; + if (!alias) throw [error.ERR_NOT_FOUND, "Alias '" + a_id + "' does not exist"]; id = alias._from; } if (!obj.db._exists(id)) { throw [ - obj.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, (id.charAt(0) == "d" ? "Data record '" : "Collection '") + id + "' does not exist.", ]; } @@ -1579,7 +1356,7 @@ module.exports = (function () { // Detect misplaced topic delimiters for (i in topics) { - if (topics[i].length === 0) throw [obj.ERR_INVALID_PARAM, "Invalid category"]; + if (topics[i].length === 0) throw [error.ERR_INVALID_PARAM, "Invalid category"]; } var topic, parent; //,tag; @@ -1926,470 +1703,6 @@ module.exports = (function () { } }; - /* Test if client has requested permission(s) for specified object. Note: this call does NOT check for - * ownership or admin privilege - the hasAdminPermObject function performs these checks and should be - * called first if needed. This function is typically used when filtering a list of objects that are - * known not to be owned by the client (and that the client is not an admin). In this case, those checks - * would add performance cost for no benefit. - */ - obj.hasPermissions = function ( - a_client, - a_object, - a_req_perm, - a_inherited = false, - any = false, - ) { - //console.log("check perm:", a_req_perm, "client:", a_client._id, "object:", a_object._id, "any:", any ); - //console.log("grant:", a_object.grant ); - - var perm_found = 0, - acl, - acls, - result, - i; - - // If object is marked "public", everyone is granted VIEW, and READ permissions - // The current implementation allows users to be denied access to public data (maybe wrong?) - - if (a_object.topic) { - perm_found = obj.PERM_PUBLIC; - - result = obj.evalPermissions(a_req_perm, perm_found, any); - if (result != null) return result; - } - - // Evaluate user permissions set directly on object - if (a_object.acls & 1) { - acls = obj.db - ._query("for v, e in 1..1 outbound @object acl filter v._id == @client return e", { - object: a_object._id, - client: a_client._id, - }) - .toArray(); - - if (acls.length) { - for (i in acls) { - acl = acls[i]; - //console.log("user_perm:",acl); - perm_found |= acl.grant; - if (a_inherited && acl.inhgrant) perm_found |= acl.inhgrant; - } - - result = obj.evalPermissions(a_req_perm, perm_found, any); - if (result != null) return result; - } - } - - // Evaluate group permissions on object - if (a_object.acls & 2) { - acls = obj.db - ._query( - "for v, e, p in 2..2 outbound @object acl, outbound member filter p.vertices[2]._id == @client return p.edges[0]", - { - object: a_object._id, - client: a_client._id, - }, - ) - .toArray(); - if (acls.length) { - for (i in acls) { - acl = acls[i]; - //console.log("group_perm:",acl); - perm_found |= acl.grant; - if (a_inherited && acl.inhgrant) perm_found |= acl.inhgrant; - } - - result = obj.evalPermissions(a_req_perm, perm_found, any); - if (result != null) return result; - } - } - - // If not all requested permissions have been found, evaluate permissions inherited from parent collections - // Note that items can only be linked to containers that share the same owner - // This evaluation is implemented as a manually guided breadth-first search - - var children = [a_object]; - var parents, parent; - - for (;;) { - // Find all parent collections owned by object owner - - parents = obj.db - ._query( - "for i in @children for v in 1..1 inbound i item return {_id:v._id,topic:v.topic,acls:v.acls}", - { - children: children, - }, - ) - .toArray(); - - if (parents.length == 0) break; - - for (i in parents) { - parent = parents[i]; - - if (parent.topic) { - perm_found |= obj.PERM_PUBLIC; - - result = obj.evalPermissions(a_req_perm, perm_found, any); - if (result != null) return result; - } - - // User ACL first - if (parent.acls && (parent.acls & 1) !== 0) { - acls = obj.db - ._query( - "for v, e in 1..1 outbound @object acl filter v._id == @client return e", - { - object: parent._id, - client: a_client._id, - }, - ) - .toArray(); - if (acls.length) { - for (i in acls) { - acl = acls[i]; - perm_found |= acl.inhgrant; - } - - result = obj.evalPermissions(a_req_perm, perm_found, any); - if (result != null) return result; - } - } - - // Group ACL next - if (parent.acls && (parent.acls & 2) !== 0) { - acls = obj.db - ._query( - "for v, e, p in 2..2 outbound @object acl, outbound member filter is_same_collection('g',p.vertices[1]) and p.vertices[2]._id == @client return p.edges[0]", - { - object: parent._id, - client: a_client._id, - }, - ) - .toArray(); - if (acls.length) { - for (i in acls) { - acl = acls[i]; - perm_found |= acl.inhgrant; - } - - result = obj.evalPermissions(a_req_perm, perm_found, any); - if (result != null) return result; - } - } - } - - // If there are still missing require permissions... - // Determine which parents are candidates for further evaluation (have req bits not set in inherited permissions) - children = parents; - } - - //console.log("perm (last): false" ); - return false; - }; - - obj.evalPermissions = function (a_req_perm, a_perm_found, any) { - if (any) { - // If any requested permission have been found, return true (granted) - if (a_perm_found & a_req_perm) return true; - else return null; // Else, keep looking - } else { - // If not all requested permissions have been found return NULL (keep looking) - if ((a_perm_found & a_req_perm) != a_req_perm) return null; - else return true; // Else, permission granted - } - }; - - obj.getPermissions = function (a_client, a_object, a_req_perm, a_inherited = false) { - //console.log("get perm:", a_req_perm, "client:", a_client._id, "object:", a_object._id, "any:", any ); - //console.log("grant:", a_object.grant ); - - var perm_found = 0, - acl, - acls, - i; - - // If object has a topic (collections only), everyone is granted VIEW, and READ permissions - // The current implementation allows users to be denied access to public data (maybe wrong?) - - if (a_object.topic) { - perm_found = obj.PERM_PUBLIC; - - if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; - } - - // Evaluate permissions set directly on object - - if (a_object.acls && (a_object.acls & 1) !== 0) { - acls = obj.db - ._query("for v, e in 1..1 outbound @object acl filter v._id == @client return e", { - object: a_object._id, - client: a_client._id, - }) - .toArray(); - - if (acls.length) { - for (i in acls) { - acl = acls[i]; - //console.log("user_perm:",acl); - perm_found |= acl.grant; - if (a_inherited && acl.inhgrant) perm_found |= acl.inhgrant; - } - - if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; - } - } - - // Evaluate group permissions on object - - if (a_object.acls && (a_object.acls & 2) !== 0) { - acls = obj.db - ._query( - "for v, e, p in 2..2 outbound @object acl, outbound member filter p.vertices[2]._id == @client return p.edges[0]", - { - object: a_object._id, - client: a_client._id, - }, - ) - .toArray(); - - if (acls.length) { - for (i in acls) { - acl = acls[i]; - //console.log("group_perm:",acl); - perm_found |= acl.grant; - if (a_inherited && acl.inhgrant) perm_found |= acl.inhgrant; - } - - if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; - } - } - - // If not all requested permissions have been found, evaluate permissions inherited from parent collections - // Note that items can only be linked to containers that share the same owner - - var children = [a_object]; - var parents, parent; - - for (;;) { - // Find all parent collections owned by object owner - - parents = obj.db - ._query( - "for i in @children for v in 1..1 inbound i item return {_id:v._id,topic:v.topic,acls:v.acls}", - { - children: children, - }, - ) - .toArray(); - - if (parents.length == 0) break; - - for (i in parents) { - parent = parents[i]; - - if (parent.topic) { - perm_found |= obj.PERM_PUBLIC; - - if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; - } - - // User ACL - if (parent.acls && (parent.acls & 1) != 0) { - acls = obj.db - ._query( - "for v, e in 1..1 outbound @object acl filter v._id == @client return e", - { - object: parent._id, - client: a_client._id, - }, - ) - .toArray(); - if (acls.length) { - for (i in acls) { - acl = acls[i]; - perm_found |= acl.inhgrant; - } - - if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; - } - } - - // Group ACL - if (parent.acls && (parent.acls & 2) != 0) { - acls = obj.db - ._query( - "for v, e, p in 2..2 outbound @object acl, outbound member filter is_same_collection('g',p.vertices[1]) and p.vertices[2]._id == @client return p.edges[0]", - { - object: parent._id, - client: a_client._id, - }, - ) - .toArray(); - if (acls.length) { - for (i in acls) { - acl = acls[i]; - perm_found |= acl.inhgrant; - } - - if ((a_req_perm & perm_found) == a_req_perm) return a_req_perm; - } - } - } - - // If there are still missing require permissions... - // Determine which parents are candidates for further evaluation (have req bits not set in inherited permissions) - children = parents; - } - - return perm_found & a_req_perm; - }; - - obj.getPermissionsLocal = function (a_client_id, a_object, a_get_inherited, a_req_perm) { - var perm = { - grant: 0, - inhgrant: 0, - inherited: 0, - }, - acl, - acls, - i; - - //console.log("getPermissionsLocal",a_object._id); - - if (a_object.topic) { - //console.log("has topic 1"); - perm.grant |= obj.PERM_PUBLIC; - perm.inhgrant |= obj.PERM_PUBLIC; - } - - if (a_object.acls & 1) { - //console.log("chk local user acls"); - - acls = obj.db - ._query("for v, e in 1..1 outbound @object acl filter v._id == @client return e", { - object: a_object._id, - client: a_client_id, - }) - .toArray(); - - for (i in acls) { - acl = acls[i]; - perm.grant |= acl.grant; - perm.inhgrant |= acl.inhgrant; - } - } - - // Evaluate group permissions on object - if (a_object.acls & 2) { - //console.log("chk local group acls"); - - acls = obj.db - ._query( - "for v, e, p in 2..2 outbound @object acl, outbound member filter p.vertices[2]._id == @client return p.edges[0]", - { - object: a_object._id, - client: a_client_id, - }, - ) - .toArray(); - for (i in acls) { - acl = acls[i]; - perm.grant |= acl.grant; - perm.inhgrant |= acl.inhgrant; - } - } - - if (a_get_inherited) { - //console.log("chk inherited"); - - var children = [a_object]; - var parents, parent; - - for (;;) { - // Find all parent collections owned by object owner - - parents = obj.db - ._query( - "for i in @children for v in 1..1 inbound i item return {_id:v._id,topic:v.topic,acls:v.acls}", - { - children: children, - }, - ) - .toArray(); - - //console.log("parents",parents); - - if (parents.length == 0) break; - - for (i in parents) { - parent = parents[i]; - - if (parent.topic) { - //console.log("has topic 2"); - - perm.inherited |= obj.PERM_PUBLIC; - - if ((a_req_perm & perm.inherited) == a_req_perm) break; - } - - // User ACL - if (parent.acls && (parent.acls & 1) != 0) { - //console.log("chk par user acls"); - - acls = obj.db - ._query( - "for v, e in 1..1 outbound @object acl filter v._id == @client return e", - { - object: parent._id, - client: a_client_id, - }, - ) - .toArray(); - if (acls.length) { - for (i in acls) { - acl = acls[i]; - perm.inherited |= acl.inhgrant; - } - - if ((a_req_perm & perm.inherited) == a_req_perm) break; - } - } - - // Group ACL - if (parent.acls && (parent.acls & 2) != 0) { - //console.log("chk par group acls"); - - acls = obj.db - ._query( - "for v, e, p in 2..2 outbound @object acl, outbound member filter is_same_collection('g',p.vertices[1]) and p.vertices[2]._id == @client return p.edges[0]", - { - object: parent._id, - client: a_client_id, - }, - ) - .toArray(); - if (acls.length) { - for (i in acls) { - acl = acls[i]; - perm.inherited |= acl.inhgrant; - } - - if ((a_req_perm & perm.inherited) == a_req_perm) break; - } - } - } - - // If there are still missing require permissions... - // Determine which parents are candidates for further evaluation (have req bits not set in inherited permissions) - children = parents; - } - } - - return perm; - }; - obj.getACLOwnersBySubject = function (subject, inc_users, inc_projects) { var results = []; @@ -2547,7 +1860,7 @@ module.exports = (function () { dep = deps.next(); if (dep == src) throw [ - obj.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Circular dependency detected in references, from " + id, ]; obj.checkDependencies(dep, src ? src : id, depth + 1); @@ -2566,7 +1879,10 @@ module.exports = (function () { id = a_subj.id || a_subj._id; if (a_client) { - if (a_admin || (a_admin === undefined && obj.hasAdminPermObject(a_client, id))) { + if ( + a_admin || + (a_admin === undefined && permissions.hasAdminPermObject(a_client, id)) + ) { // Owner/admin - return notes that are open or active res = obj.db._query( "for n in 1..1 outbound @id note return {type:n.type,state:n.state,parent_id:n.parent_id}", @@ -2933,7 +2249,7 @@ module.exports = (function () { } else { //console.log( "save", id ); if (tag.length > 40) - throw [obj.ERR_INVALID_PARAM, "Tag too long (max 40 characters)."]; + throw [error.ERR_INVALID_PARAM, "Tag too long (max 40 characters)."]; for (j = 0; j < tag.length; j++) { code = tag.charCodeAt(j); @@ -2943,7 +2259,7 @@ module.exports = (function () { code !== 45 ) // "-" - throw [obj.ERR_INVALID_CHAR, "Invalid character(s) in tag."]; + throw [error.ERR_INVALID_CHAR, "Invalid character(s) in tag."]; } obj.db.tag.save({ @@ -3025,7 +2341,7 @@ module.exports = (function () { for (c in cur) { col = obj.db.c.document(cur[c]); - if (obj.hasAdminPermObject(a_client, col._id)) { + if (permissions.hasAdminPermObject(a_client, col._id)) { child = obj.db._query( "for i in 1..10 outbound @col item filter is_same_collection('c',i) return i._id", { @@ -3044,19 +2360,20 @@ module.exports = (function () { } } } else { - perm = obj.getPermissionsLocal( + perm = permissions.getPermissionsLocal( a_client._id, col, true, - obj.PERM_RD_REC | obj.PERM_LIST, + permissions.PERM_RD_REC | permissions.PERM_LIST, ); if ( perm.grant & - ((obj.PERM_RD_REC | obj.PERM_LIST) != (obj.PERM_RD_REC | obj.PERM_LIST)) + ((permissions.PERM_RD_REC | permissions.PERM_LIST) != + (permissions.PERM_RD_REC | permissions.PERM_LIST)) ) { throw [ - obj.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Permission denied for collection '" + col._id + "'", ]; } @@ -3067,7 +2384,8 @@ module.exports = (function () { if ( perm.inhgrant & - ((obj.PERM_RD_REC | obj.PERM_LIST) == (obj.PERM_RD_REC | obj.PERM_LIST)) + ((permissions.PERM_RD_REC | permissions.PERM_LIST) == + (permissions.PERM_RD_REC | permissions.PERM_LIST)) ) { child = obj.db._query( "for i in 1..10 outbound @col item filter is_same_collection('c',i) return i._id", @@ -3111,7 +2429,7 @@ module.exports = (function () { if (!a_cols.has(a_col_id)) { //console.log("expColl",a_col_id,"inh:",a_inh_perm); var col, res; - if (obj.hasAdminPermObject(a_client, a_col_id)) { + if (permissions.hasAdminPermObject(a_client, a_col_id)) { a_cols.add(a_col_id); //console.log("has admin"); res = obj.db._query( @@ -3129,23 +2447,23 @@ module.exports = (function () { } else { col = obj.db.c.document(a_col_id); - var perm = obj.getPermissionsLocal( + var perm = permissions.getPermissionsLocal( a_client._id, col, a_inh_perm == undefined ? true : false, - obj.PERM_RD_REC | obj.PERM_LIST, + permissions.PERM_RD_REC | permissions.PERM_LIST, ); //console.log("perm",perm); if ( ((perm.grant | perm.inherited | a_inh_perm) & - (obj.PERM_RD_REC | obj.PERM_LIST)) != - (obj.PERM_RD_REC | obj.PERM_LIST) + (permissions.PERM_RD_REC | permissions.PERM_LIST)) != + (permissions.PERM_RD_REC | permissions.PERM_LIST) ) { if (a_inh_perm == undefined) { // Only throw a PERM_DENIED error if this is one of the user-specified collections (not a child) throw [ - obj.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Permission denied for collection '" + col._id + "'", ]; } else { @@ -3160,8 +2478,8 @@ module.exports = (function () { if ( ((perm.inhgrant | perm.inherited | a_inh_perm) & - (obj.PERM_RD_REC | obj.PERM_LIST)) == - (obj.PERM_RD_REC | obj.PERM_LIST) + (permissions.PERM_RD_REC | permissions.PERM_LIST)) == + (permissions.PERM_RD_REC | permissions.PERM_LIST) ) { //console.log("have all inh perms"); @@ -3208,7 +2526,7 @@ module.exports = (function () { * * GLOBUS_TRANSFER: "|" * @returns {{}} Object containing the parsed key/values of the input other_token_data string. - * @throws obj.ERR_INVALAD_PARAM + * @throws error.ERR_INVALAD_PARAM * * @example * // returns { uuid: "1cbaaee5-b938-4a4e-87a8-f1ec4d5d92f9", scopes: "urn:globus:auth:scope:transfer.api.globus.org:all+email" } @@ -3223,13 +2541,16 @@ module.exports = (function () { // GLOBUS_TRANSFER parse currently assumes uuid and scopes exist, but this may change const parsed_data = other_token_data.split("|"); if (parsed_data.length !== 2) { - throw [obj.ERR_INVALID_PARAM, "Unexpected count of additional token data provided"]; + throw [ + error.ERR_INVALID_PARAM, + "Unexpected count of additional token data provided", + ]; } const parsed_uuid = parsed_data[0]; if (!obj.isUUID(parsed_uuid)) { throw [ - obj.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Provided other_token_data does not follow format of '|'", ]; } @@ -3237,7 +2558,7 @@ module.exports = (function () { if (!parsed_scopes.includes("transfer.api.globus.org")) { // TODO: does this need validation, and is this validation sufficient? throw [ - obj.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Scopes included in other_token_data do not refer to transfer resource, but transfer resource was specified", ]; } diff --git a/core/database/foxx/api/tag_router.js b/core/database/foxx/api/tag_router.js index 3dfb28d41..575d04f3a 100644 --- a/core/database/foxx/api/tag_router.js +++ b/core/database/foxx/api/tag_router.js @@ -2,36 +2,53 @@ const createRouter = require("@arangodb/foxx/router"); const router = createRouter(); +const error = require("./lib/error_codes"); const joi = require("joi"); const g_db = require("@arangodb").db; const g_lib = require("./support"); +const logger = require("./lib/logger"); +const basePath = "tag"; module.exports = router; //==================== TAG API FUNCTIONS router .post("/search", function (req, res) { + let client = null; + let result = null; + let tot = null; try { + client = req.queryParams.client + ? g_lib.getUserFromClientID(req.queryParams.client) + : null; + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/search", + status: "Started", + description: `Search for tags by name (${req.queryParams?.name?.trim()})`, + }); var name = req.queryParams.name.trim(); if (name.length < 3) - throw [g_lib.ERR_INVALID_PARAM, "Input is too short for tag search."]; + throw [error.ERR_INVALID_PARAM, "Input is too short for tag search."]; - var off = req.queryParams.offset ? req.queryParams.offset : 0, - cnt = req.queryParams.count ? req.queryParams.count : 50, - result = g_db._query( - "for t in tagview search analyzer(t._key in tokens(@name,'tag_name'), 'tag_name') let s = BM25(t) sort s desc limit @off,@cnt return {name: t._key, count: t.count}", - { - name: name, - off: off, - cnt: cnt, - }, - { - fullCount: true, - }, - ), - tot = result.getExtra().stats.fullCount; + var off = req.queryParams.offset ? req.queryParams.offset : 0; + var cnt = req.queryParams.count ? req.queryParams.count : 50; + result = g_db._query( + "for t in tagview search analyzer(t._key in tokens(@name,'tag_name'), 'tag_name') let s = BM25(t) sort s desc limit @off,@cnt return {name: t._key, count: t.count}", + { + name: name, + off: off, + cnt: cnt, + }, + { + fullCount: true, + }, + ); + tot = result.getExtra().stats.fullCount; result = result.toArray(); result.push({ @@ -43,7 +60,34 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/search", + status: "Success", + description: `Search for tags by name(${req.queryParams?.name?.trim()})`, + extra: { + requestedName: name, + returnedCount: result?.length - 1, // subtract the paging object + total_found: tot, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/search", + status: "Failure", + description: `Search for tags by name(${req.queryParams?.name?.trim()})`, + extra: { + requestedName: name, + returnedCount: result?.length - 1, // subtract the paging object + total_found: tot, + }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -55,7 +99,21 @@ router router .post("/list/by_count", function (req, res) { + let client = null; + let tot = null; try { + client = req.queryParams.client + ? g_lib.getUserFromClientID(req.queryParams.client) + : null; + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/list/by_count", + status: "Started", + description: "List tags by count", + }); + g_db._executeTransaction({ collections: { read: ["tag"], @@ -75,7 +133,7 @@ router }, ); - var tot = result.getExtra().stats.fullCount; + tot = result.getExtra().stats.fullCount; result = result.toArray(); result.push({ paging: { @@ -86,9 +144,29 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/list/by_count", + status: "Success", + description: "List tags by count", + extra: { total_tags: tot }, + }); }, }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/list/by_count", + status: "Failure", + description: "List tags by count", + extra: { total_tags: tot }, + error: e, + }); + g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/api/task_router.js b/core/database/foxx/api/task_router.js index e4f96922b..292beb922 100644 --- a/core/database/foxx/api/task_router.js +++ b/core/database/foxx/api/task_router.js @@ -5,7 +5,10 @@ const router = createRouter(); const joi = require("joi"); const g_db = require("@arangodb").db; const g_lib = require("./support"); +const error = require("./lib/error_codes"); const g_tasks = require("./tasks"); +const logger = require("./lib/logger"); +const basePath = "task"; module.exports = router; @@ -13,16 +16,25 @@ module.exports = router; router .get("/view", function (req, res) { + let task = null; try { + logger.logRequestStarted({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: "View an existing task record", + }); if (!g_db._exists(req.queryParams.task_id)) { // WARNING - do not change this error message it is acted on by the task worker throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Task " + req.queryParams.task_id + " does not exist.", ]; } - var task = g_db.task.document(req.queryParams.task_id); + task = g_db.task.document(req.queryParams.task_id); var blocks = g_db.block.byExample({ _from: req.queryParams.task_id, }); @@ -42,8 +54,30 @@ router delete task._rev; delete task._key; + const { state, ...partial_task } = task; + res.send([task]); + logger.logRequestSuccess({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Success", + description: "View an existing task record", + extra: partial_task, + }); } catch (e) { + logger.logRequestFailure({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: "View an existing task record", + extra: task, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -54,10 +88,18 @@ router router .get("/run", function (req, res) { var task, run_func; - - //console.log("task/run - trans 1"); + let result = null; try { + logger.logRequestStarted({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/run", + status: "Started", + description: "Run task", + }); + g_db._executeTransaction({ collections: { read: [], @@ -68,7 +110,7 @@ router action: function () { if (!g_db.task.exists(req.queryParams.task_id)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Task " + req.queryParams.task_id + " does not exist.", ]; @@ -84,19 +126,12 @@ router if (task.status == g_lib.TS_READY) { g_tasks.taskReady(task._id); } else if (task.status == g_lib.TS_RUNNING) { - console.log("task/run: ", task._id, " - step is: ", req.queryParams.step); if ( req.queryParams.step != undefined && req.queryParams.step == task.step ) { // This confirms previous step was completed, so update step number task.step++; - console.log( - "task/run: ", - task._id, - " - step after incrementing is: ", - task.step, - ); g_db.task.update( task._id, { @@ -112,7 +147,7 @@ router req.queryParams.step >= task.steps ) { throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Called run on task " + task._id + " with invalid step: " + @@ -121,7 +156,7 @@ router } } else { throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Called run on task " + task._id + " with incorrect status: " + @@ -131,10 +166,6 @@ router }, }); - //console.log("task/run - call handler" ); - - var result; - for (;;) { try { if (req.queryParams.err_msg) { @@ -144,7 +175,6 @@ router result = run_func.call(g_tasks, task); // An empty result means rollback has completed without additional errors if (!result) { - //console.log("Task run handler stopped rollback" ); result = { cmd: g_lib.TC_STOP, params: g_tasks.taskComplete(task._id, false, task.error), @@ -155,12 +185,9 @@ router var err = Array.isArray(e) ? e[1] : e; if (err.errorMessage) err = err.errorMessage; - console.log("Task run handler exception: " + err); - // Load current task and check step # task = g_db.task.document(task._id); if (task.step > 0) { - //console.log("First exception" ); // Exception on processing, start roll-back task.step = -task.step; task.error = String(err); @@ -176,7 +203,6 @@ router }, ); } else { - console.log("Exception in rollback"); // Exception on roll-back, abort and return next tasks to process result = { cmd: g_lib.TC_STOP, @@ -193,9 +219,28 @@ router } } - //console.log("task/run return"); res.send(result); + task = g_db.task.document(req.queryParams.task_id); + logger.logRequestSuccess({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/run", + status: "Success", + description: "Run task", + extra: { type: task?.type }, + }); } catch (e) { + logger.logRequestFailure({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/run", + status: "Failure", + description: "Run task", + extra: { type: task?.type }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -221,8 +266,18 @@ router */ router .post("/abort", function (req, res) { + let result = null; try { - var result = []; + logger.logRequestStarted({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/abort", + status: "Started", + description: "Abort a schedule task", + }); + + result = []; g_db._executeTransaction({ collections: { read: [], @@ -232,7 +287,7 @@ router action: function () { if (!g_db._exists(req.queryParams.task_id)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Task " + req.queryParams.task_id + " does not exist.", ]; @@ -241,7 +296,26 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/abort", + status: "Success", + description: "Abort a schedule task", + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/abort", + status: "Failure", + description: "Abort a schedule task", + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -253,19 +327,47 @@ router router .post("/delete", function (req, res) { try { + logger.logRequestStarted({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Started", + description: "Delete an existing task record", + }); + if (!g_db._exists(req.queryParams.task_id)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Task " + req.queryParams.task_id + " does not exist.", ]; var task = g_db.task.document(req.queryParams.task_id); if (task.status < g_lib.TS_SUCCEEDED) - throw [g_lib.ERR_IN_USE, "Cannot delete task that is still scheduled."]; + throw [error.ERR_IN_USE, "Cannot delete task that is still scheduled."]; g_lib.graph.task.remove(req.queryParams.task_id); + logger.logRequestSuccess({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Success", + description: "Delete an existing task record", + extra: req.queryParams.task_id, + }); } catch (e) { g_lib.handleException(e, res); + logger.logRequestFailure({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Failure", + description: "Delete an existing task record", + extra: "undefined", + error: e, + }); } }) .queryParam("task_id", joi.string().required(), "Task ID") @@ -274,8 +376,17 @@ router router .get("/list", function (req, res) { + let result = null; try { const client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Started", + description: "List task records", + }); var params = { client: client._id, @@ -307,10 +418,32 @@ router qry += " return i"; - var result = g_db._query(qry, params); + result = g_db._query(qry, params); res.send(result); + logger.logRequestSuccess({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Success", + description: "List task records", + extra: { + queryParams: req.queryParams, + _countTotal: result._countTotal, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Failure", + description: "List task records", + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -334,8 +467,17 @@ router router .get("/reload", function (req, res) { + let result = null; try { - var result = []; + result = []; + logger.logRequestStarted({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/reload", + status: "Started", + description: "Reload ready/running task records", + }); g_db._executeTransaction({ collections: { @@ -352,7 +494,26 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/reload", + status: "Success", + description: "Reload ready/running task records", + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: req?.queryParams?.task_id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/reload", + status: "Failure", + description: "Reload ready/running task records", + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -362,6 +523,15 @@ router router .get("/purge", function (req, res) { try { + logger.logRequestStarted({ + client: "undefined", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/purge", + status: "Started", + description: "Purge tasks", + }); + g_db._executeTransaction({ collections: { read: [], @@ -379,7 +549,26 @@ router ); }, }); + logger.logRequestSuccess({ + client: "undefined", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/purge", + status: "Success", + description: "Purge tasks", + extra: "undefined", + }); } catch (e) { + logger.logRequestFailure({ + client: "undefined", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/purge", + status: "Failure", + description: "Purge tasks", + extra: "undefined", + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/api/tasks.js b/core/database/foxx/api/tasks.js index cc2de4e42..8e7ac603d 100644 --- a/core/database/foxx/api/tasks.js +++ b/core/database/foxx/api/tasks.js @@ -2,11 +2,14 @@ // local imports const g_lib = require("./support"); +const error = require("./lib/error_codes"); const { UserToken } = require("./lib/user_token"); const g_db = require("@arangodb").db; const g_graph = require("@arangodb/general-graph")._graph("sdmsg"); const g_proc = require("./process"); +const permissions = require("./lib/permissions"); + var g_internal = require("internal"); var tasks_func = (function () { @@ -25,13 +28,13 @@ var tasks_func = (function () { // Check if repo and subject exist if (!g_db._exists(a_repo_id)) - throw [g_lib.ERR_NOT_FOUND, "Repo, '" + a_repo_id + "', does not exist"]; + throw [error.ERR_NOT_FOUND, "Repo, '" + a_repo_id + "', does not exist"]; if (!g_db._exists(a_subject_id)) - throw [g_lib.ERR_NOT_FOUND, "Subject, '" + a_subject_id + "', does not exist"]; + throw [error.ERR_NOT_FOUND, "Subject, '" + a_subject_id + "', does not exist"]; // Check for proper permissions - g_lib.ensureAdminPermRepo(a_client, a_repo_id); + permissions.ensureAdminPermRepo(a_client, a_repo_id); // Check if there is already a matching allocation var alloc = g_db.alloc.firstExample({ @@ -40,7 +43,7 @@ var tasks_func = (function () { }); if (alloc) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Subject, '" + a_subject_id + "', already has as allocation on " + a_repo_id, ]; @@ -55,7 +58,7 @@ var tasks_func = (function () { ); if (res.hasNext()) { - throw [g_lib.ERR_IN_USE, "A duplicate allocation create task was found."]; + throw [error.ERR_IN_USE, "A duplicate allocation create task was found: " + res.next()]; } var repo = g_db.repo.document(a_repo_id); @@ -72,7 +75,6 @@ var tasks_func = (function () { repo_path: path, }; var task = obj._createTask(a_client._id, g_lib.TT_ALLOC_CREATE, 2, state); - if ( g_proc._lockDepsGeneral(task._id, [ { @@ -98,7 +100,6 @@ var tasks_func = (function () { }, ).new; } - return { task: task, }; @@ -158,14 +159,14 @@ var tasks_func = (function () { console.log("taskInitAllocDelete"); if (!g_db._exists(a_repo_id)) - throw [g_lib.ERR_NOT_FOUND, "Repo, '" + a_repo_id + "', does not exist"]; + throw [error.ERR_NOT_FOUND, "Repo, '" + a_repo_id + "', does not exist"]; if (!g_db._exists(a_subject_id)) - throw [g_lib.ERR_NOT_FOUND, "Subject, '" + a_subject_id + "', does not exist"]; + throw [error.ERR_NOT_FOUND, "Subject, '" + a_subject_id + "', does not exist"]; var repo = g_db.repo.document(a_repo_id); - g_lib.ensureAdminPermRepo(a_client, a_repo_id); + permissions.ensureAdminPermRepo(a_client, a_repo_id); var alloc = g_db.alloc.firstExample({ _from: a_subject_id, @@ -173,7 +174,7 @@ var tasks_func = (function () { }); if (!alloc) throw [ - g_lib.ERR_NOT_FOUND, + error.ERR_NOT_FOUND, "Subject, '" + a_subject_id + "', has no allocation on " + a_repo_id, ]; @@ -186,7 +187,7 @@ var tasks_func = (function () { }, ) .next(); - if (count) throw [g_lib.ERR_IN_USE, "Cannot delete allocation - records present"]; + if (count) throw [error.ERR_IN_USE, "Cannot delete allocation - records present"]; // Check if there is an existing alloc task to involving the same allocation (repo + subject) var res = g_db._query( @@ -199,7 +200,7 @@ var tasks_func = (function () { ); if (res.hasNext()) { - throw [g_lib.ERR_IN_USE, "A duplicate allocation delete task was found."]; + throw [error.ERR_IN_USE, "A duplicate allocation delete task was found: " + res.next()]; } var path = @@ -311,7 +312,7 @@ var tasks_func = (function () { if (result.glob_data.length + result.ext_data.length > 0 && !a_check) { var idx = a_path.indexOf("/"); if (idx == -1) - throw [g_lib.ERR_INVALID_PARAM, "Invalid destination path (must include endpoint)"]; + throw [error.ERR_INVALID_PARAM, "Invalid destination path (must include endpoint)"]; // Check for duplicate names if (a_orig_fname) { @@ -324,7 +325,7 @@ var tasks_func = (function () { ); if (fnames.has(fname)) { throw [ - g_lib.ERR_XFR_CONFLICT, + error.ERR_XFR_CONFLICT, "Duplicate filename(s) detected in transfer request.", ]; } @@ -338,7 +339,7 @@ var tasks_func = (function () { ); if (fnames.has(fname)) { throw [ - g_lib.ERR_XFR_CONFLICT, + error.ERR_XFR_CONFLICT, "Duplicate filename(s) detected in transfer request.", ]; } @@ -495,7 +496,7 @@ var tasks_func = (function () { if (result.glob_data.length > 0 && !a_check) { var idx = a_path.indexOf("/"); if (idx == -1) - throw [g_lib.ERR_INVALID_PARAM, "Invalid destination path (must include endpoint)"]; + throw [error.ERR_INVALID_PARAM, "Invalid destination path (must include endpoint)"]; var state = { path: a_path, @@ -702,10 +703,10 @@ var tasks_func = (function () { if (a_proj_id) { if (!g_db.p.exists(a_proj_id)) - throw [g_lib.ERR_INVALID_PARAM, "Project '" + a_proj_id + "' does not exist."]; + throw [error.ERR_INVALID_PARAM, "Project '" + a_proj_id + "' does not exist."]; - if (!g_lib.hasManagerPermProj(a_client, a_proj_id)) - throw [g_lib.ERR_PERM_DENIED, "Operation requires admin permissions to project."]; + if (!permissions.hasManagerPermProj(a_client, a_proj_id)) + throw [error.ERR_PERM_DENIED, "Operation requires admin permissions to project."]; owner_id = a_proj_id; } else { @@ -714,14 +715,14 @@ var tasks_func = (function () { // Verify destination repo if (!g_db.repo.exists(a_dst_repo_id)) - throw [g_lib.ERR_INVALID_PARAM, "No such repo '" + a_dst_repo_id + "'"]; + throw [error.ERR_INVALID_PARAM, "No such repo '" + a_dst_repo_id + "'"]; // Verify client/owner has an allocation var alloc = g_db.alloc.firstExample({ _from: owner_id, _to: a_dst_repo_id, }); - if (!alloc) throw [g_lib.ERR_INVALID_PARAM, "No allocation on '" + a_dst_repo_id + "'"]; + if (!alloc) throw [error.ERR_INVALID_PARAM, "No allocation on '" + a_dst_repo_id + "'"]; var result = g_proc.preprocessItems( { @@ -884,13 +885,13 @@ var tasks_func = (function () { }); if (alloc.rec_count + xfr.files.length > alloc.rec_limit) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Allocation record count limit exceeded on " + state.dst_repo_id, ]; if (alloc.data_size + xfr.size > alloc.data_limit) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Allocation data size limit exceeded on " + state.dst_repo_id, ]; @@ -898,7 +899,7 @@ var tasks_func = (function () { obj.recMoveInit(xfr.files, state.dst_repo_id); // TEST ONLY - //throw [g_lib.ERR_INTERNAL_FAULT,"TEST ONLY ERROR"]; + //throw [error.ERR_INTERNAL_FAULT,"TEST ONLY ERROR"]; // Update task step a_task.step += 1; @@ -1006,19 +1007,19 @@ var tasks_func = (function () { // Verify destination collection if (!g_db.c.exists(a_dst_coll_id)) - throw [g_lib.ERR_INVALID_PARAM, "No such collection '" + a_dst_coll_id + "'"]; + throw [error.ERR_INVALID_PARAM, "No such collection '" + a_dst_coll_id + "'"]; var owner_id = g_db.owner.firstExample({ _from: a_dst_coll_id, })._to; if (owner_id != a_client._id) { - if (owner_id.charAt(0) != "p" || !g_lib.hasManagerPermProj(a_client, owner_id)) { + if (owner_id.charAt(0) != "p" || !permissions.hasManagerPermProj(a_client, owner_id)) { var coll = g_db.c.document(a_dst_coll_id); - if (!g_lib.hasPermissions(a_client, coll, g_lib.PERM_CREATE)) + if (!permissions.hasPermissions(a_client, coll, permissions.PERM_CREATE)) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Operation requires CREATE permission on destination collection '" + a_dst_coll_id + "'", @@ -1036,13 +1037,13 @@ var tasks_func = (function () { }) .toArray(); if (!allocs.length) - throw [g_lib.ERR_PERM_DENIED, "No allocations available for '" + owner_id + "'"]; + throw [error.ERR_PERM_DENIED, "No allocations available for '" + owner_id + "'"]; g_lib.sortAllocations(allocs); } else { // Verify destination repo if (!g_db.repo.exists(a_dst_repo_id)) - throw [g_lib.ERR_INVALID_PARAM, "No such repo '" + a_dst_repo_id + "'"]; + throw [error.ERR_INVALID_PARAM, "No such repo '" + a_dst_repo_id + "'"]; // Verify client/owner has an allocation if ( @@ -1051,14 +1052,14 @@ var tasks_func = (function () { _to: a_dst_repo_id, }) ) - throw [g_lib.ERR_INVALID_PARAM, "No allocation on '" + a_dst_repo_id + "'"]; + throw [error.ERR_INVALID_PARAM, "No allocation on '" + a_dst_repo_id + "'"]; } var result = g_proc.preprocessItems(a_client, owner_id, a_res_ids, g_lib.TT_REC_OWNER_CHG); if (result.has_pub) { throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Owner change not allowed - selection contains public data.", ]; } @@ -1269,13 +1270,13 @@ var tasks_func = (function () { }); if (alloc.rec_count + xfr.files.length > alloc.rec_limit) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Allocation record count limit exceeded on " + state.dst_repo_id, ]; if (alloc.data_size + xfr.size > alloc.data_limit) throw [ - g_lib.ERR_PERM_DENIED, + error.ERR_PERM_DENIED, "Allocation data size limit exceeded on " + state.dst_repo_id, ]; @@ -1380,7 +1381,7 @@ var tasks_func = (function () { var result = g_proc.preprocessItems(a_client, null, a_ids, g_lib.TT_REC_DEL); if (result.has_pub) { - throw [g_lib.ERR_PERM_DENIED, "Deletion not allowed - selection contains public data."]; + throw [error.ERR_PERM_DENIED, "Deletion not allowed - selection contains public data."]; } var i, @@ -1570,9 +1571,9 @@ var tasks_func = (function () { for (i in a_proj_ids) { proj_id = a_proj_ids[i]; if (!g_db.p.exists(proj_id)) - throw [g_lib.ERR_INVALID_PARAM, "No such project '" + proj_id + "'"]; + throw [error.ERR_INVALID_PARAM, "No such project '" + proj_id + "'"]; - g_lib.ensureAdminPermProj(a_client, proj_id); + permissions.ensureAdminPermProj(a_client, proj_id); } obj._ensureExclusiveAccess(a_proj_ids); @@ -1728,7 +1729,7 @@ var tasks_func = (function () { case g_lib.TT_PROJ_DEL: return obj.taskRunProjDelete; default: - throw [g_lib.ERR_INVALID_PARAM, "Invalid task type: " + a_task.type]; + throw [error.ERR_INVALID_PARAM, "Invalid task type: " + a_task.type]; } }; @@ -1908,7 +1909,7 @@ var tasks_func = (function () { if (a_mode == g_lib.TT_DATA_GET || a_mode == g_lib.TT_DATA_PUT) { idx = a_remote.indexOf("/"); if (idx < 1) - throw [g_lib.ERR_INVALID_PARAM, "Invalid remote path (must include endpoint)"]; + throw [error.ERR_INVALID_PARAM, "Invalid remote path (must include endpoint)"]; //console.log("rem idx:",idx); @@ -1972,7 +1973,7 @@ var tasks_func = (function () { file.to = loc.d_src.substr(loc.d_src.lastIndexOf("/") + 1); if (fnames.has(file.to)) { throw [ - g_lib.ERR_XFR_CONFLICT, + error.ERR_XFR_CONFLICT, "Duplicate filename(s) detected in transfer request.", ]; } @@ -2037,7 +2038,7 @@ var tasks_func = (function () { idx = edat.source.indexOf("/"); if (idx < 0) { - throw [g_lib.ERR_INVALID_PARAM, "Invalid external source path: " + edat.source]; + throw [error.ERR_INVALID_PARAM, "Invalid external source path: " + edat.source]; } ep = edat.source.substr(0, idx); src = edat.source.substr(idx); @@ -2046,14 +2047,14 @@ var tasks_func = (function () { idx = src.lastIndexOf("/"); if (idx < 0) { throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Invalid external source path: " + edat.source, ]; } file.to = src.substr(idx + 1); if (fnames.has(file.to)) { throw [ - g_lib.ERR_XFR_CONFLICT, + error.ERR_XFR_CONFLICT, "Duplicate filename(s) detected in transfer request.", ]; } @@ -2551,13 +2552,13 @@ var tasks_func = (function () { // DEV-ONLY SANITY CHECKS: if (!loc.new_coll) throw [ - g_lib.ERR_INTERNAL_FAULT, + error.ERR_INTERNAL_FAULT, "Record '" + data.id + "' missing destination collection!", ]; if (!g_db.c.exists(loc.new_coll)) throw [ - g_lib.ERR_INTERNAL_FAULT, + error.ERR_INTERNAL_FAULT, "Record '" + data.id + "' destination collection '" + @@ -2569,7 +2570,7 @@ var tasks_func = (function () { if (coll.owner != loc.new_owner) throw [ - g_lib.ERR_INTERNAL_FAULT, + error.ERR_INTERNAL_FAULT, "Record '" + data.id + "' destination collection '" + @@ -2656,7 +2657,7 @@ var tasks_func = (function () { }); if (!alloc) throw [ - g_lib.ERR_INTERNAL_FAULT, + error.ERR_INTERNAL_FAULT, "Record '" + data.id + "' has mismatched allocation/location (cur)!", ]; @@ -2677,7 +2678,7 @@ var tasks_func = (function () { }); if (!alloc) throw [ - g_lib.ERR_INTERNAL_FAULT, + error.ERR_INTERNAL_FAULT, "Record '" + data.id + "' has mismatched allocation/location (new)!", ]; @@ -2702,7 +2703,7 @@ var tasks_func = (function () { obj.recMoveExt = function (a_data, a_dst_owner_id, a_dst_coll_id) { if (!g_db.c.exists(a_dst_coll_id)) { throw [ - g_lib.ERR_INTERNAL_FAULT, + error.ERR_INTERNAL_FAULT, "Destination collection '" + a_dst_coll_id + "' does not exist!", ]; } @@ -2716,7 +2717,7 @@ var tasks_func = (function () { if (coll.owner != a_dst_owner_id) throw [ - g_lib.ERR_INTERNAL_FAULT, + error.ERR_INTERNAL_FAULT, "Destination collection '" + a_dst_coll_id + "' not owned by new owner!", ]; @@ -2804,7 +2805,7 @@ var tasks_func = (function () { _to: id, }); if (lock) - throw [g_lib.ERR_PERM_DENIED, "Operation not permitted - '" + id + "' in use."]; + throw [error.ERR_PERM_DENIED, "Operation not permitted - '" + id + "' in use."]; } //console.log("_ensureExclusiveAccess done", Date.now()); }; diff --git a/core/database/foxx/api/topic_router.js b/core/database/foxx/api/topic_router.js index c504d9c4b..38bc07866 100644 --- a/core/database/foxx/api/topic_router.js +++ b/core/database/foxx/api/topic_router.js @@ -3,6 +3,7 @@ const createRouter = require("@arangodb/foxx/router"); const router = createRouter(); const joi = require("joi"); +const error = require("./lib/error_codes"); const g_db = require("@arangodb").db; const g_lib = require("./support"); @@ -72,7 +73,7 @@ router .get("/view", function (req, res) { try { if (!g_db.t.exists(req.queryParams.id)) - throw [g_lib.ERR_NOT_FOUND, "Topic, " + req.queryParams.id + ", not found"]; + throw [error.ERR_NOT_FOUND, "Topic, " + req.queryParams.id + ", not found"]; var topic = g_db.t.document(req.queryParams.id); @@ -102,7 +103,7 @@ router path, op = false; - if (tokens.length == 0) throw [g_lib.ERR_INVALID_PARAM, "Invalid topic search phrase."]; + if (tokens.length == 0) throw [error.ERR_INVALID_PARAM, "Invalid topic search phrase."]; it = 0; for (i in tokens) { diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 6738bf5b1..95cd4d368 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -8,8 +8,12 @@ const auth = createAuth("pbkdf2"); const g_db = require("@arangodb").db; const g_graph = require("@arangodb/general-graph")._graph("sdmsg"); const g_lib = require("./support"); +const error = require("./lib/error_codes"); +const permissions = require("./lib/permissions"); const { UserToken } = require("./lib/user_token"); const { UserModel } = require("./models/user"); +const logger = require("./lib/logger"); +const basePath = "usr"; module.exports = router; @@ -17,22 +21,47 @@ module.exports = router; router .get("/authn/password", function (req, res) { - console.log("Running /authn/password"); + let client = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); const is_verified = auth.verify(client.password, req.queryParams.pw); if (is_verified === false) { - throw g_lib.ERR_AUTHN_FAILED; + throw error.ERR_AUTHN_FAILED; } - //if ( client.password != req.queryParams.pw ) - // throw g_lib.ERR_AUTHN_FAILED; + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/authn/password", + status: "Started", + description: "Authenticating user via password", + }); res.send({ uid: client._id, authorized: true, }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/authn/password", + status: "Success", + description: "Authenticating user via password", + extra: "undefined", + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/authn/password", + status: "Failure", + description: "Authenticating user via password", + extra: "undefined", + error: e, + }); g_lib.handleException(e, res); } }) @@ -43,17 +72,48 @@ router router .get("/authn/token", function (req, res) { + let user = null; try { - var user = g_db._query("for i in u filter i.access == @tok return i", { + user = g_db._query("for i in u filter i.access == @tok return i", { tok: req.queryParams.token, }); - if (!user.hasNext()) throw g_lib.ERR_AUTHN_FAILED; + logger.logRequestStarted({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/authn/token", + status: "Started", + description: "Authenticating user via access token", + }); + + if (!user.hasNext()) throw error.ERR_AUTHN_FAILED; res.send({ uid: user.next()._id, authorized: true, }); + + logger.logRequestSuccess({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/authn/token", + status: "Success", + description: "Authenticating user via access token", + extra: "undefined", + }); } catch (e) { + logger.logRequestFailure({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/authn/token", + status: "Failure", + description: "Authenticating user via access token", + extra: "undefined", + error: e, + }); + g_lib.handleException(e, res); } }) @@ -63,9 +123,9 @@ router router .get("/create", function (req, res) { + let user = null; + let result = null; try { - var result; - g_db._executeTransaction({ collections: { read: ["u"], @@ -79,7 +139,7 @@ router if (idx < 1) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "ERROR: invalid user name (no first/last name) " + name, ]; @@ -120,9 +180,18 @@ router user_data.options = req.queryParams.options; } - var user = g_db.u.save(user_data, { + user = g_db.u.save(user_data, { returnNew: true, }); + logger.logRequestStarted({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Started", + description: "Create new user entry", + }); + var root = g_db.c.save( { _key: "u_" + req.queryParams.uid + "_root", @@ -174,7 +243,7 @@ router }) ) throw [ - g_lib.ERR_IN_USE, + error.ERR_IN_USE, "ERROR: linked identity value, " + uuid + ", already in use", ]; @@ -201,11 +270,30 @@ router delete user.new.name; result = [user.new]; + delete user.new.password; }, }); - res.send(result); + logger.logRequestSuccess({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Success", + description: "Create new user entry", + extra: user.new.uid, + }); } catch (e) { + logger.logRequestFailure({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Failure", + description: "Create new user entry", + extra: user.new.uid, + error: e, + }); g_lib.handleException(e, res); } }) @@ -226,23 +314,31 @@ router router .get("/update", function (req, res) { + let client = null; + let result = null; try { - var result; - g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn"], write: ["u", "admin"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); var user_id; + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Started", + description: "Update user information", + }); if (req.queryParams.subject) { user_id = req.queryParams.subject; if (!g_db.u.exists(user_id)) - throw [g_lib.ERR_INVALID_PARAM, "No such user '" + user_id + "'"]; - g_lib.ensureAdminPermUser(client, user_id); + throw [error.ERR_INVALID_PARAM, "No such user '" + user_id + "'"]; + permissions.ensureAdminPermUser(client, user_id); } else { user_id = client._id; } @@ -260,7 +356,7 @@ router if (idx < 1) { throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Invalid user name (no first/last name) " + req.queryParams.name, ]; } @@ -297,9 +393,26 @@ router result = [user.new]; }, }); - - res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Success", + description: "Update user information", + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Failure", + description: "Update user information", + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -315,6 +428,7 @@ router router .get("/find/by_uuids", function (req, res) { + let user = null; try { // Convert UUIDs to DB _ids var uuids = []; @@ -322,8 +436,15 @@ router uuids.push("uuid/" + req.queryParams.uuids[i]); } - var user = g_lib.findUserFromUUIDs(uuids); - + user = g_lib.findUserFromUUIDs(uuids); + logger.logRequestStarted({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/find/by_uuids", + status: "Started", + description: "Find a user from list of UUIDs", + }); var idents = g_db ._query("for v in 1..1 outbound @user ident return v._key", { user: user._id, @@ -341,7 +462,26 @@ router delete user.name; res.send([user]); + logger.logRequestSuccess({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/find/by_uuids", + status: "Success", + description: "Find a user from list of UUIDs", + extra: req.queryParams.uuids, + }); } catch (e) { + logger.logRequestFailure({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/find/by_uuids", + status: "Failure", + description: "Find a user from list of UUIDs", + extra: req.queryParams.uuids, + error: e, + }); g_lib.handleException(e, res); } }) @@ -351,10 +491,20 @@ router router .get("/find/by_name_uid", function (req, res) { + let name = null; try { - var name = req.queryParams.name_uid.trim(); + name = req.queryParams.name_uid.trim(); + logger.logRequestStarted({ + client: name, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/find/by_name_uid", + status: "Started", + description: "Find users matching partial name and/or uid", + }); + if (name.length < 2) - throw [g_lib.ERR_INVALID_PARAM, "Input is too short for name/uid search."]; + throw [error.ERR_INVALID_PARAM, "Input is too short for name/uid search."]; else if (name.length < 3) name = " " + name + " "; // Pad to allow matches for very short first/last names (i.e. Bo, Li, Xi) var off = req.queryParams.offset ? req.queryParams.offset : 0, @@ -384,7 +534,26 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: name, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/find/by_name_uid", + status: "Success", + description: "Find users matching partial name and/or uid", + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: name, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/find/by_name_uid", + status: "Failure", + description: "Find users matching partial name and/or uid", + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -396,6 +565,7 @@ router router .get("/keys/set", function (req, res) { + let client = null; try { g_db._executeTransaction({ collections: { @@ -403,14 +573,24 @@ router write: ["u"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/keys/set", + status: "Started", + description: "Set user public and private keys", + }); + var user_id; if (req.queryParams.subject) { user_id = req.queryParams.subject; if (!g_db.u.exists(user_id)) - throw [g_lib.ERR_INVALID_PARAM, "No such user '" + user_id + "'"]; - g_lib.ensureAdminPermUser(client, user_id); + throw [error.ERR_INVALID_PARAM, "No such user '" + user_id + "'"]; + permissions.ensureAdminPermUser(client, user_id); } else { user_id = client._id; } @@ -424,7 +604,27 @@ router }); }, }); + + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/keys/set", + status: "Success", + description: "Set user public and private keys", + extra: "undefined", + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/keys/set", + status: "Failure", + description: "Set user public and private keys", + extra: "undefined", + error: e, + }); g_lib.handleException(e, res); } }) @@ -437,6 +637,7 @@ router router .get("/keys/clear", function (req, res) { + let client = null; try { g_db._executeTransaction({ collections: { @@ -444,14 +645,23 @@ router write: ["u"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/keys/clear", + status: "Started", + description: "Clear user public and private keys", + }); + var user_id; if (req.queryParams.subject) { user_id = req.queryParams.subject; if (!g_db.u.exists(user_id)) - throw [g_lib.ERR_INVALID_PARAM, "No such user '" + user_id + "'"]; - g_lib.ensureAdminPermUser(client, user_id); + throw [error.ERR_INVALID_PARAM, "No such user '" + user_id + "'"]; + permissions.ensureAdminPermUser(client, user_id); } else { user_id = client._id; } @@ -465,7 +675,26 @@ router }); }, }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/keys/clear", + status: "Success", + description: "Clear user public and private keys", + extra: "undefined", + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/keys/clear", + status: "Failure", + description: "Clear user public and private keys", + extra: "undefined", + error: e, + }); g_lib.handleException(e, res); } }) @@ -476,30 +705,46 @@ router router .get("/keys/get", function (req, res) { + let user = null; try { - var user; - if (req.queryParams.subject) { if (!g_db.u.exists(req.queryParams.subject)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "No such user '" + req.queryParams.subject + "'", ]; user = g_db.u.document({ _id: req.queryParams.subject, }); + logger.logRequestStarted({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/keys/get", + status: "Started", + description: "Get user public and private keys", + }); } else { user = g_lib.getUserFromClientID(req.queryParams.client); } - if (!user.pub_key || !user.priv_key) + if (!user.pub_key || !user.priv_key) { res.send([ { uid: user._id, }, ]); - else + logger.logRequestSuccess({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/keys/get", + status: "Success", + description: "Get user public and private keys", + extra: "undefined", + }); + } else { res.send([ { uid: user._id, @@ -507,7 +752,27 @@ router priv_key: user.priv_key, }, ]); + logger.logRequestSuccess({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/keys/get", + status: "Success", + description: "Get user public and private keys", + extra: "undefined", + }); + } } catch (e) { + logger.logRequestFailure({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/keys/get", + status: "Failure", + description: "Get user public and private keys", + extra: "undefined", + error: e, + }); g_lib.handleException(e, res); } }) @@ -518,10 +783,38 @@ router router .get("/find/by_pub_key", function (req, res) { + let uid = null; try { - var uid = g_lib.uidFromPubKey(req.queryParams.pub_key); + uid = g_lib.uidFromPubKey(req.queryParams.pub_key); + logger.logRequestStarted({ + client: uid, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/find/by_pub_key", + status: "Started", + description: "Find a user by public key", + }); res.send(uid); + logger.logRequestSuccess({ + client: uid, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/find/by_pub_key", + status: "Success", + description: "Find a user by public key", + extra: uid, + }); } catch (e) { + logger.logRequestFailure({ + client: uid, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/find/by_pub_key", + status: "Failure", + description: "Find a user by public key", + extra: uid, + error: e, + }); g_lib.handleException(e, res); } }) @@ -531,6 +824,7 @@ router router .get("/token/set", function (req, res) { + let client = null; try { g_db._executeTransaction({ collections: { @@ -538,7 +832,17 @@ router write: ["u", "globus_coll", "globus_token"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/set", + status: "Started", + description: "Setting user token", + }); + var user_id; let user_doc; @@ -549,7 +853,7 @@ router token_type === g_lib.AccessTokenType.GLOBUS_DEFAULT ) { throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Invalid parameters passed, the default action cannot process other_token_data.", ]; } else if ( @@ -557,7 +861,7 @@ router token_type !== g_lib.AccessTokenType.GLOBUS_DEFAULT ) { throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Invalid parameters passed, type and other_token_data depend on one another.", ]; } @@ -565,21 +869,13 @@ router if (req.queryParams.subject) { user_id = req.queryParams.subject; if (!g_db.u.exists(user_id)) - throw [g_lib.ERR_INVALID_PARAM, "No such user '" + user_id + "'"]; - g_lib.ensureAdminPermUser(client, user_id); + throw [error.ERR_INVALID_PARAM, "No such user '" + user_id + "'"]; + permissions.ensureAdminPermUser(client, user_id); user_doc = g_db.u.document(user_id); } else { user_id = client._id; user_doc = client; } - console.log( - "updating tokens for", - user_id, - "acc:", - req.queryParams.access, - "exp:", - req.queryParams.expires_in, - ); var obj = { access: req.queryParams.access, refresh: req.queryParams.refresh, @@ -639,7 +935,6 @@ router ...obj, }; - console.log("writing to edge ", token_doc); const token_doc_upsert = g_db.globus_token.insert(token_doc, { overwriteMode: "replace", // TODO: perhaps use 'update' and specify values for true upsert. }); @@ -653,9 +948,32 @@ router break; } } + const tokenTypeName = Object.keys(g_lib.AccessTokenType).find( + (key) => g_lib.AccessTokenType[key] === token_type, + ); + + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/set", + status: "Success", + description: "Setting user token", + extra: `${tokenTypeName} (${token_type})`, + }); }, }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/set", + status: "Failure", + description: "Setting user tokens", + extra: "undefined", + error: e, + }); g_lib.handleException(e, res); } }) @@ -687,17 +1005,16 @@ router router .get("/token/get", function (req, res) { + let user = null; try { const collection_token = UserToken.validateRequestParams(req.queryParams); // TODO: collection type determines logic when mapped vs HA const { collection_id, collection_type } = req.queryParams; - var user; - if (req.queryParams.subject) { if (!g_db.u.exists(req.queryParams.subject)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "No such user '" + req.queryParams.subject + "'", ]; @@ -708,6 +1025,15 @@ router user = g_lib.getUserFromClientID(req.queryParams.client); } + logger.logRequestStarted({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/get", + status: "Started", + description: "Getting user token", + }); + const user_token = new UserToken({ user_id: user._id, globus_collection_id: collection_id, @@ -725,7 +1051,25 @@ router ); res.send(result); + logger.logRequestSuccess({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/get", + status: "Success", + description: "Getting user token", + }); } catch (e) { + logger.logRequestFailure({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/get", + status: "Failure", + description: "Getting user tokens", + extra: "undefined", + error: e, + }); g_lib.handleException(e, res); } }) @@ -746,13 +1090,12 @@ router router .get("/token/get/access", function (req, res) { + let user = null; try { - var user; - if (req.queryParams.subject) { if (!g_db.u.exists(req.queryParams.subject)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "No such user '" + req.queryParams.subject + "'", ]; user = g_db.u.document({ @@ -761,11 +1104,39 @@ router } else { user = g_lib.getUserFromClientID(req.queryParams.client); } + logger.logRequestStarted({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/get/access", + status: "Started", + description: "Getting User Access Token", + }); - if (!user.access) throw [g_lib.ERR_NOT_FOUND, "No access token found"]; + if (!user.access) throw [error.ERR_NOT_FOUND, "No access token found"]; res.send(user.access); + logger.logRequestSuccess({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/get/access", + status: "Success", + description: "Getting User Access Token", + extra: "undefined", + }); } catch (e) { + logger.logRequestFailure({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/get/access", + status: "Failure", + description: "Getting User Access Token", + extra: "undefined", + error: e, + }); + g_lib.handleException(e, res); } }) @@ -776,17 +1147,45 @@ router router .get("/token/get/expiring", function (req, res) { + let user = null; + let result = null; try { - //console.log("exp:",(Date.now()/1000) + req.queryParams.expires_in); + logger.logRequestStarted({ + client: user?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/get/expiring", + status: "Started", + description: "Getting expiring user access token", + }); - var results = g_db._query( + results = g_db._query( "for i in u filter i.expiration != Null && i.expiration < @exp return {id:i._id,access:i.access,refresh:i.refresh,expiration:i.expiration}", { exp: Math.floor(Date.now() / 1000) + req.queryParams.expires_in, }, ); res.send(results); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/get/expiring", + status: "Success", + description: "Getting expiring user access token", + extra: results, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/get/expiring", + status: "Failure", + description: "Getting expiring user access token", + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -797,8 +1196,17 @@ router router .get("/view", function (req, res) { + let client = null; try { - var client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: "View User Information", + }); var user, det_ok = false; @@ -806,7 +1214,7 @@ router if (req.queryParams.subject) { if (!g_db.u.exists(req.queryParams.subject)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "No such user '" + req.queryParams.subject + "'", ]; user = g_db.u.document({ @@ -879,8 +1287,27 @@ router delete user.refresh; res.send([user]); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Success", + description: "View User Information", + extra: `uid=${user.uid}, is_admin=${!!client?.is_admin}`, + }); //req.queryParams.details ? } catch (e) { g_lib.handleException(e, res); + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: "View User Information", + extra: `uid=${user.uid}, is_admin=${!!client?.is_admin}`, + error: e, + }); } }) .queryParam("client", joi.string().required(), "Client ID") @@ -891,8 +1318,17 @@ router router .get("/list/all", function (req, res) { + let client = null; var qry = "for i in u sort i.name_last, i.name_first"; var result; + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/all", + status: "Started", + description: "List all users", + }); if (req.queryParams.offset != undefined && req.queryParams.count != undefined) { qry += @@ -924,6 +1360,15 @@ router } res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/all", + status: "Success", + description: "List all users", + extra: result, + }); }) .queryParam("offset", joi.number().optional(), "Offset") .queryParam("count", joi.number().optional(), "Count") @@ -934,6 +1379,15 @@ router .get("/list/collab", function (req, res) { var result, client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/collab", + status: "Started", + description: "List collaborators of client", + }); + var qry = "for x in union_distinct((for v in 2..2 any @user owner, member, acl filter is_same_collection('u',v) return" + " distinct { uid: v._id, name_last: v.name_last, name_first: v.name_first }),(for v in 3..3 inbound @user member, outbound owner, outbound admin" + @@ -974,6 +1428,16 @@ router } res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/collab", + status: "Success", + description: "List collaborators of client", + extra: result, + }); + //res.send( g_db._query( "for x in union_distinct((for v in 2..2 any @user owner, member, acl filter is_same_collection('u',v) return distinct { uid: v._id, name: v.name }),(for v in 3..3 inbound @user member, outbound owner, outbound admin filter is_same_collection('u',v) return distinct { uid: v._id, name: v.name }),(for v in 2..2 inbound @user owner, outbound admin filter is_same_collection('u',v) return distinct { uid: v._id, name: v.name })) return x", { user: client._id })); }) .queryParam("client", joi.string().required(), "Client ID") @@ -988,6 +1452,8 @@ Note: must delete ALL data records and projects owned by the user being deleted */ router .get("/delete", function (req, res) { + let client = null; + let user_id = null; try { g_db._executeTransaction({ collections: { @@ -1013,18 +1479,24 @@ router ], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); - var user_id; + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Started", + description: "Remove existing user entry", + }); if (req.queryParams.subject) { user_id = req.queryParams.subject; if (!g_db.u.exists(user_id)) - throw [g_lib.ERR_INVALID_PARAM, "No such user '" + user_id + "'"]; - g_lib.ensureAdminPermUser(client, user_id); + throw [error.ERR_INVALID_PARAM, "No such user '" + user_id + "'"]; + permissions.ensureAdminPermUser(client, user_id); } else { user_id = client._id; } - //console.log( "delete user", user_id ); var objects, subobjects, obj, subobj, i, j; @@ -1036,7 +1508,6 @@ router .toArray(); for (i in objects) { obj = objects[i]; - //console.log( "del ident", obj ); g_graph[obj.substr(0, obj.indexOf("/"))].remove(obj); } @@ -1051,7 +1522,6 @@ router .toArray(); for (i in objects) { obj = objects[i]; - //console.log( "del proj", obj ); subobjects = g_db ._query("for v in 1..1 inbound @proj owner return v._id", { proj: obj, @@ -1059,7 +1529,6 @@ router .toArray(); for (j in subobjects) { subobj = subobjects[j]; - //console.log("del subobj",subobj); g_graph[subobj.substr(0, subobj.indexOf("/"))].remove(subobj); } @@ -1074,14 +1543,32 @@ router .toArray(); for (i in objects) { obj = objects[i]; - //console.log( "del owned", obj ); g_graph[obj.substr(0, obj.indexOf("/"))].remove(obj); } g_graph.u.remove(user_id); }, }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Success", + description: "Remove existing user entry", + extra: user_id, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Failure", + description: "Remove existing user entry", + extra: user_id, + error: e, + }); g_lib.handleException(e, res); } }) @@ -1092,30 +1579,64 @@ router router .get("/ident/list", function (req, res) { + let client = null; try { - var client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/list", + status: "Started", + description: "List user linked IDs", + }); if (req.queryParams.subject) { if (!g_db.u.exists(req.queryParams.subject)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "No such user '" + req.queryParams.subject + "'", ]; const subject = g_db.u.document(req.queryParams.subject); - g_lib.ensureAdminPermUser(client, subject._id); + permissions.ensureAdminPermUser(client, subject._id); res.send( g_db._query("for v in 1..1 outbound @client ident return v._key", { client: subject._id, }), ); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/list", + status: "Success", + description: "List user linked IDs", + }); } else { res.send( g_db._query("for v in 1..1 outbound @client ident return v._key", { client: client._id, }), ); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/list", + status: "Success", + description: "List user linked IDs", + }); } } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/list", + status: "Failure", + description: "List user linked IDs", + error: e, + }); g_lib.handleException(e, res); } }) @@ -1125,6 +1646,7 @@ router router .get("/ident/add", function (req, res) { + let client = null; try { g_db._executeTransaction({ collections: { @@ -1132,8 +1654,16 @@ router write: ["uuid", "accn", "ident"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); var id; + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/add", + status: "Started", + description: "Add new linked identity", + }); if (g_lib.isUUID(req.queryParams.ident)) { if ( @@ -1141,7 +1671,17 @@ router _id: "uuid/" + req.queryParams.ident, }) ) - return; + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/add", + status: "Success", + description: "Add new linked identity", + extra: req.queryParams.ident, + }); + + return; id = g_db.uuid.save( { _key: req.queryParams.ident, @@ -1168,6 +1708,15 @@ router }, ); } + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/add", + status: "Success", + description: "Add new linked identity", + extra: req.queryParams.ident, + }); return; } else { var accn = { @@ -1183,19 +1732,19 @@ router } } else throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Invalid identity value: " + req.queryParams.ident, ]; if (req.queryParams.subject) { if (!g_db.u.exists(req.queryParams.subject)) throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "No such user '" + req.queryParams.subject + "'", ]; const user = g_db.u.document(req.queryParams.subject); - g_lib.ensureAdminPermUser(client, user._id); + permissions.ensureAdminPermUser(client, user._id); g_db.ident.save({ _from: user._id, @@ -1209,7 +1758,26 @@ router } }, }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/add", + status: "Success", + description: "Add new linked identity", + extra: req.queryParams.ident, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/add", + status: "Failure", + description: "Add new linked identity", + extra: req.queryParams.ident, + error: e, + }); g_lib.handleException(e, res); } }) @@ -1225,6 +1793,7 @@ router router .get("/ident/remove", function (req, res) { + let client = null; try { g_db._executeTransaction({ collections: { @@ -1232,10 +1801,17 @@ router write: ["uuid", "accn", "ident"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); const owner = g_lib.getUserFromClientID(req.queryParams.ident); - - g_lib.ensureAdminPermUser(client, owner._id); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/remove", + status: "Started", + description: "Remove linked identity from user account", + }); + permissions.ensureAdminPermUser(client, owner._id); if (g_lib.isUUID(req.queryParams.ident)) { g_graph.uuid.remove("uuid/" + req.queryParams.ident); @@ -1243,12 +1819,31 @@ router g_graph.accn.remove("accn/" + req.queryParams.ident); } else throw [ - g_lib.ERR_INVALID_PARAM, + error.ERR_INVALID_PARAM, "Invalid identity value: " + req.queryParams.ident, ]; }, }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/remove", + status: "Success", + description: "Remove linked identity from user account", + extra: req.queryParams.ident, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/remove", + status: "Failure", + description: "Remove linked identity from user account", + extra: req.queryParams.ident, + error: e, + }); g_lib.handleException(e, res); } }) @@ -1259,10 +1854,39 @@ router router .get("/ep/get", function (req, res) { + let client = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ep/get", + status: "Started", + description: "Get recent end-points", + }); + res.send(client.eps ? client.eps : []); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ep/get", + status: "Success", + description: "Get recent end-points", + extra: client.eps, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ep/get", + status: "Failure", + description: "Get recent end-points", + extra: client.eps, + error: e, + }); g_lib.handleException(e, res); } }) @@ -1272,8 +1896,17 @@ router router .get("/ep/set", function (req, res) { + let client = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ep/set", + status: "Started", + description: "Set recent end-points", + }); g_db._update( client._id, { @@ -1283,7 +1916,26 @@ router keepNull: false, }, ); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ep/set", + status: "Started", + description: "Set recent end-points", + extra: client.eps, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ep/set", + status: "Failure", + description: "Set recent end-points", + extra: client.eps, + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/api/version_router.js.in b/core/database/foxx/api/version_router.js.in index cfc338ace..1808401d2 100644 --- a/core/database/foxx/api/version_router.js.in +++ b/core/database/foxx/api/version_router.js.in @@ -5,6 +5,8 @@ const router = createRouter(); const joi = require('joi'); const g_db = require('@arangodb').db; const g_lib = require('./support'); +const logger = require("./lib/logger"); +const basePath = ""; module.exports = router; @@ -14,6 +16,14 @@ module.exports = router; router.get('/version', function(req, res) { try { + logger.logRequestStarted({ + client: "anonymous", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/version", + status: "Started", + description: "Get version numbers", + }); res.send({ "release_year": @DATAFED_RELEASE_YEAR@, "release_month": @DATAFED_RELEASE_MONTH@, @@ -27,7 +37,26 @@ router.get('/version', function(req, res) { "component_minor": @DATAFED_FOXX_MINOR@, "component_patch": @DATAFED_FOXX_PATCH@ }); + logger.logRequestSuccess({ + client: "anonymous", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/version", + status: "Success", + description: "Get version numbers", + extra: null + }); } catch (e) { + logger.logRequestFailure({ + client: "anonymous", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/version", + status: "Failure", + description: "Get version numbers", + extra: null, + error: e + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/authz.test.js b/core/database/foxx/tests/authz.test.js index d9976a2c9..7b6ff062c 100644 --- a/core/database/foxx/tests/authz.test.js +++ b/core/database/foxx/tests/authz.test.js @@ -3,7 +3,7 @@ const chai = require("chai"); const expect = chai.expect; const authzModule = require("../api/authz"); const g_db = require("@arangodb").db; -const g_lib = require("../api/support"); +const permissions = require("../api/lib/permissions"); const arangodb = require("@arangodb"); describe("Authz functions", () => { @@ -49,7 +49,7 @@ describe("Authz functions", () => { _to: owner_id, }); - let req_perm = g_lib.PERM_CREATE; + let req_perm = permissions.PERM_CREATE; expect(authzModule.isRecordActionAuthorized(client, data_key, req_perm)).to.be.true; }); @@ -74,7 +74,7 @@ describe("Authz functions", () => { g_db.u.save(client); - let req_perm = g_lib.PERM_CREATE; + let req_perm = permissions.PERM_CREATE; expect(() => authzModule.isRecordActionAuthorized(client, data_key, req_perm), @@ -106,7 +106,7 @@ describe("Authz functions", () => { _to: "u/george", }); - let req_perm = g_lib.PERM_CREATE; + let req_perm = permissions.PERM_CREATE; expect(authzModule.isRecordActionAuthorized(client, data_key, req_perm)).to.be.true; }); @@ -156,7 +156,7 @@ describe("Authz functions", () => { _from: project_id, _to: bob_id, }); - let req_perm = g_lib.PERM_CREATE; + let req_perm = permissions.PERM_CREATE; expect(authzModule.isRecordActionAuthorized(bob, data_key, req_perm)).to.be.true; }); @@ -198,7 +198,7 @@ describe("Authz functions", () => { }, { waitForSync: true }, ); - let req_perm = g_lib.PERM_CREATE; + let req_perm = permissions.PERM_CREATE; expect(authzModule.isRecordActionAuthorized(bob, data_key, req_perm)).to.be.false; }); @@ -287,7 +287,7 @@ describe("Authz functions", () => { }, { waitForSync: true }, ); - let req_perm = g_lib.PERM_CREATE; + let req_perm = permissions.PERM_CREATE; // Non-project admin should not have permission expect(authzModule.isRecordActionAuthorized(mandy, data_key, req_perm)).to.be.false; @@ -353,7 +353,7 @@ describe("Authz functions", () => { g_db.u.save(tim, { waitForSync: true }); - let req_perm = g_lib.PERM_READ; + let req_perm = permissions.PERM_READ; expect(authzModule.isRecordActionAuthorized(tim, data_key, req_perm)).to.be.false; }); diff --git a/core/database/foxx/tests/authz_router.test.js b/core/database/foxx/tests/authz_router.test.js index 53ec97d26..134abfaef 100644 --- a/core/database/foxx/tests/authz_router.test.js +++ b/core/database/foxx/tests/authz_router.test.js @@ -6,7 +6,7 @@ const request = require("@arangodb/request"); const { baseUrl } = module.context; const g_db = require("@arangodb").db; const g_lib = require("../api/support"); - +const permissions = require("../api/lib/permissions"); // Constants used throughout test file // The base URL for the authz foxx route const authz_base_url = `${baseUrl}/authz`; @@ -167,8 +167,8 @@ function defaultWorkingSetupProject() { g_db.acl.save({ _from: root._id, _to: mem_grp._id, - grant: g_lib.PERM_MEMBER, - inhgrant: g_lib.PERM_MEMBER, + grant: permissions.PERM_MEMBER, + inhgrant: permissions.PERM_MEMBER, }); g_db.loc.save({ diff --git a/core/database/foxx/tests/base_repository.test.js b/core/database/foxx/tests/base_repository.test.js new file mode 100644 index 000000000..64532889f --- /dev/null +++ b/core/database/foxx/tests/base_repository.test.js @@ -0,0 +1,101 @@ +"use strict"; + +const { expect } = require("chai"); +const { BaseRepository } = require("../api/models/repositories/base_repository"); +const g_db = require("@arangodb").db; +const error = require("../api/lib/error_codes"); +const permissions = require("../api/lib/permissions"); + +class TestRepo extends BaseRepository { + constructor(config) { + return super(config, {}); + } +} + +describe("unit_base_repository: Base Repository tests", function () { + beforeEach(() => { + const collections = ["repo", "admin", "u"]; + collections.forEach((name) => { + let col = g_db._collection(name); + if (col) { + col.truncate(); // truncate after ensuring collection exists + } else { + g_db._create(name); // create if it doesn’t exist + } + }); + }); + + // The pub key is a test key + function getValidRepoData() { + return { + id: "repo/123", + key: "123", + title: "THE Repository", + capacity: 0, + desc: "A large description about the repository.", + }; + } + function getValidRawRepoData() { + return { + _id: "repo/123", + _key: "123", + title: "THE Repository", + capacity: 0, + desc: "A large description about the repository.", + }; + } + function getValidRepoDataNoIdKey() { + return { + title: "THE Repository", + capacity: 0, + desc: "A large description about the repository.", + }; + } + + it("should save a repository successfully", function () { + const repoConfig = getValidRepoDataNoIdKey(); + const result = new TestRepo(repoConfig); + const repo = result.value; + const save_result = repo.save(); + + expect(save_result.ok).to.equal(true); + const savedRepo = save_result.value; + console.log("Saved repo"); + console.log(savedRepo); + expect(savedRepo).to.have.property("_key"); + + // verify it exists in DB + expect(savedRepo.title).to.equal("THE Repository"); + }); + + it("should update a repository successfully", function () { + const repoConfig = getValidRepoDataNoIdKey(); + const result = new TestRepo(repoConfig); + const repo = result.value; + repo.save(); + + const update_result = result.value.update({ title: "Updated Repo" }); + expect(update_result.ok).to.equal(true); + const updated = update_result.value; + + expect(updated.title).to.equal("Updated Repo"); + }); + + it("should return Result.err when updating non-existent repo", function () { + const result = new TestRepo(getValidRepoDataNoIdKey()); + + const update_result = result.value.update({ title: "Should Fail" }); + console.log("Update result is"); + console.log(update_result); + expect(update_result.ok).to.be.false; + expect(update_result.error.message).to.match( + /Failed to update repository, repository document was not found \(repo\/undefined\)/, + ); + }); + + it("should return error for unimplemented validate()", function () { + const validate_result = TestRepo.validate({}); + expect(validate_result.ok).to.be.false; + expect(validate_result.error.code).to.equal(error.ERR_INVALID_OPERATION); + }); +}); diff --git a/core/database/foxx/tests/query_router.test.js b/core/database/foxx/tests/query_router.test.js new file mode 100644 index 000000000..b075e6790 --- /dev/null +++ b/core/database/foxx/tests/query_router.test.js @@ -0,0 +1,166 @@ +"use strict"; +// NOTE: completion of tests requires successful run of user_fixture.js script + +// Need to pull enum from support +const g_lib = require("../api/support"); + +// Integration test of API +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const { db } = require("@arangodb"); + +const qry_base_url = `${baseUrl}/qry`; + +describe("unit_query_router: the Foxx microservice qry_router endpoints", () => { + after(function () { + const collections = ["u", "qry"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); + }); + + beforeEach(() => { + const collections = ["u", "qry"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) { + col.truncate(); // truncate after ensuring collection exists + } else { + db._create(name); // create if it doesn’t exist + } + }); + }); + + it("should successfully run the create route", () => { + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name: "fake user", + name_first: "fake", + name_last: "user", + is_admin: true, + max_coll: 50, + max_proj: 10, + max_sav_qry: 20, + email: "fakeuser@gmail.com", + }); + + // Arrange + const request_string = `${qry_base_url}/create?client=u/fakeUser`; + + const body = { + title: "My Query", + qry_begin: "FOR i IN something", + qry_end: "RETURN i", + qry_filter: "", + params: {}, + limit: 10, + query: {}, // adjust if necessary + }; + + const response = request.post(request_string, { + json: true, + body: body, + headers: { + "x-correlation-id": "test-correlation-id", + }, + }); + + // Assert + expect(response.status).to.equal(200); + }); + + it("should fail running the create route", () => { + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name: "fake user", + name_first: "fake", + name_last: "user", + is_admin: true, + max_coll: 50, + max_proj: 10, + max_sav_qry: 20, + email: "fakeuser@gmail.com", + }); + + // Arrange + const request_string = `${qry_base_url}/create?client=u/wellthiswasunexpected`; + + const body = { + title: "My Query", + qry_begin: "FOR i IN something", + qry_end: "RETURN i", + qry_filter: "", + params: {}, + limit: 10, + query: {}, // adjust if necessary + }; + + const response = request.post(request_string, { + json: true, + body: body, + headers: { + "x-correlation-id": "test-correlation-id", + }, + }); + + // Assert + expect(response.status).to.equal(400); + }); + + it("should return a list of saved queries for a valid user", () => { + // arrange + const fakeUser = { + _key: "fakeUser", + _id: "u/fakeUser", + name: "Fake User", + email: "fakeuser@datadev.org", + is_admin: false, + max_coll: 5, + max_proj: 5, + max_sav_qry: 10, + }; + + db.u.save(fakeUser); + + // Save the query and the edge between the query and the user + var request_string = `${qry_base_url}/create?client=u/fakeUser`; + + var body = { + title: "Test Query Title", + qry_begin: "FOR i IN something", + qry_end: "RETURN i", + qry_filter: "", + params: {}, + limit: 10, + query: {}, // adjust if necessary + }; + + var response = request.post(request_string, { + json: true, + body: body, + headers: { + "x-correlation-id": "test-correlation-id", + }, + }); + + request_string = `${qry_base_url}/list?client=u/fakeUser`; + + // act + response = request.get(request_string, { + headers: { + "x-correlation-id": "test-correlation-id", + }, + }); + + var parsed = JSON.parse(response.body); + console.log("Response body:", response.body); + // assert + expect(response.status).to.equal(200); + expect(parsed).to.be.an("array"); + expect(parsed.length).to.be.greaterThan(0); + }); +}); diff --git a/core/database/foxx/tests/record.test.js b/core/database/foxx/tests/record.test.js index ef88ee9c8..ed0c4fbcb 100644 --- a/core/database/foxx/tests/record.test.js +++ b/core/database/foxx/tests/record.test.js @@ -5,6 +5,7 @@ const expect = chai.expect; const Record = require("../api/record"); const g_db = require("@arangodb").db; const g_lib = require("../api/support"); +const error = require("../api/lib/error_codes"); const arangodb = require("@arangodb"); function recordRepoAndUserSetup(record_key, user_id, repo_data) { @@ -36,7 +37,7 @@ describe("Record Class", () => { const record = new Record("invalidKey"); expect(record.exists()).to.be.false; expect(record.key()).to.equal("invalidKey"); - expect(record.error()).to.equal(g_lib.ERR_NOT_FOUND); + expect(record.error()).to.equal(error.ERR_NOT_FOUND); expect(record.errorMessage()).to.equal("Invalid key: (invalidKey). No record found."); }); @@ -83,7 +84,7 @@ describe("Record Class", () => { expect(record.isManaged()).to.be.false; expect(record.exists()).to.be.true; expect(record.key()).to.equal(valid_key); - expect(record.error()).to.equal(g_lib.ERR_PERM_DENIED); + expect(record.error()).to.equal(error.ERR_PERM_DENIED); const pattern = /^Permission denied data is not managed by DataFed/; expect(record.errorMessage()).to.match(pattern); }); @@ -195,7 +196,7 @@ describe("Record Class", () => { const record = new Record(valid_key); expect(record.isPathConsistent("/incorrect/file/path/" + valid_key)).to.be.false; - expect(record.error()).to.equal(g_lib.ERR_PERM_DENIED); + expect(record.error()).to.equal(error.ERR_PERM_DENIED); const pattern = /^Record path is not consistent/; expect(record.errorMessage()).to.match(pattern); }); @@ -311,7 +312,7 @@ describe("Record Class", () => { const record = new Record(valid_key); expect(record.isPathConsistent("/incorrect/file/path/user/sherry/" + valid_key)).to.be .false; - expect(record.error()).to.equal(g_lib.ERR_PERM_DENIED); + expect(record.error()).to.equal(error.ERR_PERM_DENIED); const pattern = /^Record path is not consistent/; expect(record.errorMessage()).to.match(pattern); }); diff --git a/core/database/foxx/tests/repo.test.js b/core/database/foxx/tests/repo.test.js index 017f3aaa3..60057efa8 100644 --- a/core/database/foxx/tests/repo.test.js +++ b/core/database/foxx/tests/repo.test.js @@ -5,9 +5,10 @@ const { expect } = chai; const { Repo, PathType } = require("../api/repo"); const g_db = require("@arangodb").db; const g_lib = require("../api/support"); +const error = require("../api/lib/error_codes"); const arangodb = require("@arangodb"); -describe("Testing Repo class", () => { +describe("unit_repo: Testing Repo class", () => { beforeEach(() => { g_db.d.truncate(); g_db.alloc.truncate(); @@ -19,7 +20,7 @@ describe("Testing Repo class", () => { const repo = new Repo("invalidKey"); expect(repo.exists()).to.be.false; expect(repo.key()).to.equal("invalidKey"); - expect(repo.error()).to.equal(g_lib.ERR_NOT_FOUND); + expect(repo.error()).to.equal(error.ERR_NOT_FOUND); }); it("unit_repo: should return REPO_ROOT_PATH for exact match with repo root", () => { diff --git a/core/database/foxx/tests/repo_globus.test.js b/core/database/foxx/tests/repo_globus.test.js new file mode 100644 index 000000000..7487dcea2 --- /dev/null +++ b/core/database/foxx/tests/repo_globus.test.js @@ -0,0 +1,498 @@ +"use strict"; + +const { expect } = require("chai"); +const { Result } = require("../api/models/repositories/types"); +const { ExecutionMethod } = require("../api/lib/execution_types"); +const { GlobusRepo } = require("../api/models/repositories/repository/globus"); +const g_tasks = require("../api/tasks"); +const error = require("../api/lib/error_codes"); +const g_lib = require("../api/support"); +const g_db = require("@arangodb").db; + +describe("unit_repository_globus: Globus Repository Operations", function () { + beforeEach(function () { + const collections = [ + "d", + "block", + "alloc", + "loc", + "lock", + "repo", + "admin", + "task", + "g", + "p", + "u", + ]; + collections.forEach((name) => { + let col = g_db._collection(name); + if (col) { + col.truncate(); // truncate after ensuring collection exists + } else { + g_db._create(name); // create if it doesn’t exist + } + }); + }); + + // The pub key is a test key + function getValidRawRepoData() { + return { + _id: "repo/123", + _key: "123", + title: "Test Globus Repository", + capacity: 5000000000, + pub_key: "{Yys%Fr7VBct5AilOs$SnW%k$Qm[DBwvGeS0MQ46", + address: "burning-fast-repo.org", + endpoint: "8b7f1c4e-3d4a-4d6a-9a76-9e4b3e95b7b8", + domain: "fire", + path: "/one/repo/to/rule/them/all/123", + }; + } + + function getValidRepoData() { + return { + id: "repo/123", + key: "123", + title: "Test Globus Repository", + capacity: 5000000000, + pub_key: "{Yys%Fr7VBct5AilOs$SnW%k$Qm[DBwvGeS0MQ46", + address: "burning-fast-repo.org", + endpoint: "8b7f1c4e-3d4a-4d6a-9a76-9e4b3e95b7b8", + domain: "fire", + path: "/one/repo/to/rule/them/all/123", + }; + } + + function getRawAllocationCreateTask() { + return { + _id: "task/43", + _key: "43", + status: 0, + msg: "Running", + client: "u456", + type: g_lib.TT_ALLOC_CREATE, + }; + } + + function getRawDataPutTask() { + return { + _id: "task/43", + _key: "43", + status: 0, + msg: "Running", + client: "u456", + type: g_lib.TT_DATA_PUT, + }; + } + + function getRawBlockingAllocationLock() { + return { + _id: "lock/31", + _key: "31", + _from: "task/43", + _to: "repo/123", + level: 1, + context: "u/456", + }; + } + + function getValidRepoData() { + return { + id: "repo/123", + key: "123", + title: "Test Globus Repository", + capacity: 5000000000, + pub_key: "{Yys%Fr7VBct5AilOs$SnW%k$Qm[DBwvGeS0MQ46", + address: "burning-fast-repo.org", + endpoint: "8b7f1c4e-3d4a-4d6a-9a76-9e4b3e95b7b8", + domain: "fire", + path: "/one/repo/to/rule/them/all/123", + }; + } + + function getValidRawUserData() { + return { + _id: "u/456", + _key: "456", + name: "Bobby", + }; + } + + function getValidRawAllocationData() { + return { + _id: "alloc/989", + _key: "989", + _from: "u/456", + _to: "repo/123", + subject: "u/456", + data_limit: 1000000000, + rec_limit: 1000000, + }; + } + + function getValidAllocationParams() { + return { + client: { + _id: "u/456", + is_admin: false, + }, + subject: "u/456", + data_limit: 1000000000, + rec_limit: 1000000, + }; + } + + describe("unit_repository_globus: validate", function () { + it("unit_repository_globus: should always return ok for valid repository data", function () { + const result = GlobusRepo.validate(getValidRepoData()); + expect(result.ok).to.be.true; + expect(result.value).to.be.true; + }); + + it("unit_repository_globus: should return ok even for incomplete repository data", function () { + const repoData = { _id: "repo/123" }; + const result = GlobusRepo.validate(repoData); + expect(result.ok).to.be.true; + }); + + it("unit_repository_globus: should return ok for null repository data", function () { + const result = GlobusRepo.validate(null); + expect(result.ok).to.be.false; + }); + }); + + describe("unit_repository_globus: createAllocation", function () { + it("unit_repository_globus: should fail to create allocation without admin role", function () { + const repoData = getValidRepoData(); + const params = getValidAllocationParams(); + const repoRawData = getValidRawRepoData(); + g_db.repo.save(repoRawData); + g_db.u.save(getValidRawUserData()); + + const globus_repo = new GlobusRepo(repoData).value; + const result = globus_repo.createAllocation(params); + + expect(result.ok).to.be.false; + expect(result.error.message).to.include( + "Client, 'u/456', does not have administrative repository permissions on repo/123", + ); + }); + + it("unit_repository_globus: should fail to create allocation if a duplicate task is found", function () { + const repoData = getValidRepoData(); + const params = getValidAllocationParams(); + const repoRawData = getValidRawRepoData(); + g_db.repo.save(repoRawData); + g_db.u.save(getValidRawUserData()); + g_db.admin.save({ + _from: repoRawData._id, + _to: params.client._id, + }); + g_db.task.save(getRawAllocationCreateTask()); + g_db.lock.save(getRawBlockingAllocationLock()); + const globus_repo = new GlobusRepo(repoData).value; + const result = globus_repo.createAllocation(params); + + expect(result.ok).to.be.false; + expect(result.error.message).to.include( + "A duplicate allocation create task was found: task/43", + ); + }); + + it("unit_repository_globus: should be blocked by previous Data put task", function () { + const repoData = getValidRepoData(); + const params = getValidAllocationParams(); + const repoRawData = getValidRawRepoData(); + g_db.repo.save(repoRawData); + g_db.u.save(getValidRawUserData()); + g_db.admin.save({ + _from: repoRawData._id, + _to: params.client._id, + }); + g_db.task.save(getRawDataPutTask()); + g_db.lock.save(getRawBlockingAllocationLock()); + const globus_repo = new GlobusRepo(repoData).value; + const result = globus_repo.createAllocation(params); + + expect(result.ok).to.be.true; + expect(result.value).to.have.property("id"); + expect(result.value).to.have.property("repo_id", repoData.id); + expect(result.value).to.have.property("subject", params.subject); + expect(result.value).to.have.property("task_id"); + expect(result.value).to.have.property("status", g_lib.TS_BLOCKED); + expect(result.value).to.have.property("state"); + expect(result.value).to.have.property("queue_time"); + }); + + it("unit_repository_globus: should create allocation with valid parameters", function () { + const repoData = getValidRepoData(); + const params = getValidAllocationParams(); + const repoRawData = getValidRawRepoData(); + g_db.repo.save(repoRawData); + g_db.u.save(getValidRawUserData()); + + g_db.admin.save({ + _from: repoRawData._id, + _to: params.client._id, + }); + + const globus_repo = new GlobusRepo(repoData).value; + const result = globus_repo.createAllocation(params); + + expect(result.ok).to.be.true; + expect(result.value).to.have.property("id"); + expect(result.value).to.have.property("repo_id", repoData.id); + expect(result.value).to.have.property("subject", params.subject); + expect(result.value).to.have.property("task_id"); + expect(result.value).to.have.property("status", g_lib.TS_READY); + expect(result.value).to.have.property("state"); + expect(result.value).to.have.property("queue_time"); + }); + + it("unit_repository_globus: missing rec_limit should throw.", function () { + const repoData = getValidRepoData(); + const params = getValidAllocationParams(); + delete params.rec_limit; + const repoRawData = getValidRawRepoData(); + g_db.repo.save(repoRawData); + g_db.u.save(getValidRawUserData()); + + g_db.admin.save({ + _from: repoRawData._id, + _to: params.client._id, + }); + + const globus_repo = new GlobusRepo(repoData).value; + const result = globus_repo.createAllocation(params); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Allocation rec_limit must be a number"); + }); + + it("unit_repository_globus: should reject allocation with missing subject", function () { + const repoData = getValidRepoData(); + const params = getValidAllocationParams(); + delete params.subject; + const repoRawData = getValidRawRepoData(); + g_db.repo.save(repoRawData); + g_db.u.save(getValidRawUserData()); + + g_db.admin.save({ + _from: repoRawData._id, + _to: params.client._id, + }); + + const globus_repo = new GlobusRepo(repoData).value; + const result = globus_repo.createAllocation(params); + + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Allocation subject is required"); + }); + }); + + describe("unit_repository_globus: deleteAllocation", function () { + it("unit_repository_globus: should delete allocation with valid subject ID", function () { + const repoData = getValidRepoData(); + const repoRawData = getValidRawRepoData(); + const userRawData = getValidRawUserData(); + g_db.repo.save(repoRawData); + g_db.u.save(userRawData); + + g_db.admin.save({ + _from: repoRawData._id, + _to: userRawData._id, + }); + g_db.alloc.save(getValidRawAllocationData()); + + const globus_repo = new GlobusRepo(repoData).value; + + const result = globus_repo.deleteAllocation( + { _id: userRawData._id, is_admin: false }, + userRawData._id, + ); + + expect(result.ok).to.be.true; + expect(result.value).to.have.property("execution_method", ExecutionMethod.DEFERRED); + expect(result.value.task).to.have.property("_id"); + expect(result.value.task).to.have.property("msg", "Pending"); + expect(result.value.task).to.have.property("type", g_lib.TT_ALLOC_DEL); + expect(result.value.task).to.have.property("ct"); + expect(result.value.task).to.have.property("ut"); + expect(result.value.task).to.have.property("status", g_lib.TS_READY); + expect(result.value.task).to.have.property("step", 0); + expect(result.value.task).to.have.property("steps", 2); + }); + + it("unit_repository_globus: should delete allocation with valid system level admin permissions.", function () { + const repoData = getValidRepoData(); + const repoRawData = getValidRawRepoData(); + const userRawData = getValidRawUserData(); + g_db.repo.save(repoRawData); + g_db.u.save(userRawData); + + g_db.alloc.save(getValidRawAllocationData()); + + const globus_repo = new GlobusRepo(repoData).value; + + const result = globus_repo.deleteAllocation( + { _id: userRawData._id, is_admin: true }, + userRawData._id, + ); + + expect(result.ok).to.be.true; + expect(result.value).to.have.property("execution_method", ExecutionMethod.DEFERRED); + expect(result.value.task).to.have.property("_id"); + expect(result.value.task).to.have.property("msg", "Pending"); + expect(result.value.task).to.have.property("type", g_lib.TT_ALLOC_DEL); + expect(result.value.task).to.have.property("ct"); + expect(result.value.task).to.have.property("ut"); + expect(result.value.task).to.have.property("status", g_lib.TS_READY); + expect(result.value.task).to.have.property("step", 0); + expect(result.value.task).to.have.property("steps", 2); + }); + + it("unit_repository_globus: should reject deletion with null subject ID", function () { + const repoData = getValidRepoData(); + const repoRawData = getValidRawRepoData(); + const userRawData = getValidRawUserData(); + g_db.repo.save(repoRawData); + g_db.u.save(userRawData); + + g_db.admin.save({ + _from: repoRawData._id, + _to: userRawData._id, + }); + g_db.alloc.save(getValidRawAllocationData()); + + const globus_repo = new GlobusRepo(repoData).value; + + const result = globus_repo.deleteAllocation( + { _id: userRawData._id, is_admin: false }, + null, + ); + + expect(result.ok).to.be.false; + + expect(result.error.message).to.include( + "Subject ID is required for allocation deletion", + ); + }); + + it("unit_repository_globus: should reject deletion if client does not have permissions", function () { + const repoData = getValidRepoData(); + const repoRawData = getValidRawRepoData(); + const userRawData = getValidRawUserData(); + g_db.repo.save(repoRawData); + g_db.u.save(userRawData); + + g_db.alloc.save(getValidRawAllocationData()); + + const globus_repo = new GlobusRepo(repoData).value; + + const result = globus_repo.deleteAllocation( + { _id: userRawData._id, is_admin: false }, + userRawData._id, + ); + + expect(result.ok).to.be.false; + expect(result.error.message).to.include( + "Failed to create allocation task: Client, 'u/456', does not have administrative repository permissions on repo/123", + ); + }); + + it("unit_repository_globus: should reject allocation deletion when allocation does not exist", function () { + const repoData = getValidRepoData(); + const repoRawData = getValidRawRepoData(); + const userRawData = getValidRawUserData(); + g_db.repo.save(repoRawData); + g_db.u.save(userRawData); + + g_db.admin.save({ + _from: repoRawData._id, + _to: userRawData._id, + }); + + const globus_repo = new GlobusRepo(repoData).value; + + const result = globus_repo.deleteAllocation( + { _id: userRawData._id, is_admin: false }, + userRawData._id, + ); + + expect(result.ok).to.be.false; + expect(result.error.message).to.include( + "Failed to create allocation task: Subject, \'u/456\', has no allocation on repo/123", + ); + }); + + it("unit_repository_globus: should reject duplicate allocation deletion.", function () { + const repoData = getValidRepoData(); + const repoRawData = getValidRawRepoData(); + const userRawData = getValidRawUserData(); + g_db.repo.save(repoRawData); + g_db.u.save(userRawData); + + g_db.alloc.save(getValidRawAllocationData()); + g_db.admin.save({ + _from: repoRawData._id, + _to: userRawData._id, + }); + + const globus_repo = new GlobusRepo(repoData).value; + + globus_repo.deleteAllocation( + { _id: userRawData._id, is_admin: false }, + userRawData._id, + ); + const result = globus_repo.deleteAllocation( + { _id: userRawData._id, is_admin: false }, + userRawData._id, + ); + + expect(result.ok).to.be.false; + expect(result.error.message).to.include( + "Failed to create allocation task: A duplicate allocation delete task was found: task/", + ); + }); + }); + + describe("unit_repository_globus: supportsDataOperations", function () { + it("unit_repository_globus: should always return true for Globus repositories", function () { + const repoData = getValidRepoData(); + const { id, key, ...repo_without_key } = repoData; + const globus_repo_result = new GlobusRepo(repo_without_key); + expect(globus_repo_result.ok).to.be.true; + const globus_repo = globus_repo_result.value; + const result = globus_repo.supportsDataOperations(); + expect(result.ok).to.be.true; + expect(result.value).to.be.true; + }); + + it("unit_repository_globus: should return true even for incomplete repository data", function () { + const repoData = {}; + + const globus_repo = new GlobusRepo(repoData).value; + const result = globus_repo.supportsDataOperations(); + expect(result.ok).to.be.true; + expect(result.value).to.be.true; + }); + }); + + describe("unit_repository_globus: getCapacityInfo", function () { + it("unit_repository_globus: should return capacity information for repository", function () { + const repoData = getValidRepoData(); + const globus_repo = new GlobusRepo(repoData).value; + const capacity = globus_repo.capacity(); + expect(capacity).to.equal(5000000000); + }); + + it("unit_repository_globus: should handle repository without capacity field", function () { + const repoData = { + _id: "repo/123", + title: "Test Repository", + }; + const globus_repo = new GlobusRepo(repoData).value; + const capacity = globus_repo.capacity(); + expect(capacity).to.equal(undefined); + }); + }); +}); diff --git a/core/database/foxx/tests/repo_metadata.test.js b/core/database/foxx/tests/repo_metadata.test.js new file mode 100644 index 000000000..dc6393a55 --- /dev/null +++ b/core/database/foxx/tests/repo_metadata.test.js @@ -0,0 +1,651 @@ +"use strict"; + +const { expect } = require("chai"); +const { Result } = require("../api/models/repositories/types"); +const { ExecutionMethod } = require("../api/lib/execution_types"); +const { MetadataRepo } = require("../api/models/repositories/repository/metadata"); +const g_tasks = require("../api/tasks"); +const g_db = require("@arangodb").db; +const error = require("../api/lib/error_codes"); +const permissions = require("../api/lib/permissions"); + +describe("unit_repository_metadata: Metadata Only Repository Operations", function () { + beforeEach(() => { + const collections = ["repo", "d", "alloc", "loc", "repo", "admin", "g", "p", "u"]; + collections.forEach((name) => { + let col = g_db._collection(name); + if (col) { + col.truncate(); // truncate after ensuring collection exists + } else { + g_db._create(name); // create if it doesn’t exist + } + }); + }); + + // The pub key is a test key + function getValidRepoData() { + return { + id: "repo/123", + key: "123", + title: "Test Metadata Repository", + capacity: 0, + }; + } + + function getValidRepoDataNoIDKEY() { + return { + title: "Test Metadata Repository", + capacity: 0, + }; + } + + function getValidRawRepoData() { + return { + _id: "repo/123", + _key: "123", + title: "Test Metadata Repository", + capacity: 0, + }; + } + + function getValidUserData() { + return { + _id: "u/456", + _key: "456", + name: "Bobby", + }; + } + + function getValidGroupData() { + return { + _id: "g/789", + _key: "789", + name: "Biker Bandits", + }; + } + + function getValidProjectData() { + return { + _id: "p/999", + _key: "999", + title: "The Golden Peach", + }; + } + + function getValidAllocationParams() { + return { + client: { + _id: "u/456", + is_admin: false, + }, + subject: "u/456", + rec_limit: 10000, + data_limit: 0, + }; + } + + describe("constructor and validation", function () { + it("should create MetadataRepo successfully with valid config", function () { + g_db.u.save(getValidUserData()); + const repoRawConfig = getValidRawRepoData(); + g_db.repo.save(repoRawConfig); + + const repoConfig = getValidRepoData(); + const result = new MetadataRepo(repoConfig); + expect(result.ok).to.be.true; + expect(result.value.type()).to.equal("metadata"); + }); + + it("should fail if capacity is not 0", function () { + const config = { ...getValidRepoData(), capacity: 10 }; + const repo = new MetadataRepo(config); + expect(repo.ok).to.be.false; + expect(repo.error.code).to.equal(error.ERR_INVALID_PARAM); + }); + + it("should fail if invalid fields exist", function () { + const config = { ...getValidRepoData(), pub_key: "something" }; + const repo = new MetadataRepo(config); + expect(repo.ok).to.be.false; + expect(repo.error.message).to.include("Metadata-only repositories should not have"); + }); + }); + + describe("unit_repository_metadata: Validation failures", function () { + it("unit_repository_metadata: should fail when subject is missing", function () { + const params = getValidAllocationParams(); + delete params.subject; + + const rawRepoData = getValidRawRepoData(); + // Repo exists but subject doesn't + g_db.repo.save(rawRepoData); + + const repo = new MetadataRepo(getValidRepoData()).value; + const result = repo.createAllocation(params); + + expect(result.ok).to.be.false; + expect(result.error.code).to.equal(error.ERR_INVALID_PARAM); + expect(result.error.message).to.include("Allocation subject"); + }); + + it("unit_repository_metadata: should fail when subject is empty string", function () { + const params = getValidAllocationParams(); + params.subject = ""; + + const rawRepoData = getValidRawRepoData(); + // Repo exists but subject doesn't + g_db.repo.save(rawRepoData); + const repo = new MetadataRepo(getValidRepoData()).value; + const result = repo.createAllocation(params); + + expect(result.ok).to.be.false; + expect(result.error.code).to.equal(error.ERR_INVALID_PARAM); + expect(result.error.message).to.include("Allocation subject"); + }); + + it("unit_repository_metadata: should fail when data_limit is not a number", function () { + const params = getValidAllocationParams(); + params.data_limit = "not-a-number"; + + const rawRepoData = getValidRawRepoData(); + // Repo exists but subject doesn't + g_db.repo.save(rawRepoData); + const repo = new MetadataRepo(getValidRepoData()).value; + const result = repo.createAllocation(params); + + expect(result.ok).to.be.false; + expect(result.error.code).to.equal(error.ERR_INVALID_PARAM); + expect(result.error.message).to.include("data_limit must be a number"); + }); + + it("unit_repository_metadata: should fail when rec_limit is not a number", function () { + const repoData = getValidRepoData(); + const params = getValidAllocationParams(); + params.rec_limit = "invalid"; + + const rawRepoData = getValidRawRepoData(); + // Repo exists but subject doesn't + g_db.repo.save(rawRepoData); + const repo = new MetadataRepo(getValidRepoData()).value; + const result = repo.createAllocation(params); + + expect(result.ok).to.be.false; + expect(result.error.code).to.equal(error.ERR_INVALID_PARAM); + }); + + it("unit_repository_metadata: should fail when path is provided but not a string", function () { + const repoData = getValidRepoData(); + const params = getValidAllocationParams(); + params.path = 123; + + const rawRepoData = getValidRawRepoData(); + // Repo exists but subject doesn't + g_db.repo.save(rawRepoData); + const repo = new MetadataRepo(getValidRepoData()).value; + const result = repo.createAllocation(params); + + expect(result.ok).to.be.false; + expect(result.error.code).to.equal(error.ERR_INVALID_PARAM); + expect(result.error.message).to.include("path must be a string"); + }); + }); + + describe("unit_repository_metadata: validate", function () { + it("unit_repository_metadata: should always return ok for valid repository data", function () { + const repoData = getValidRepoData(); + + const result = MetadataRepo.validate(repoData); + expect(result.ok).to.be.true; + expect(result.value).to.be.true; + }); + + it("unit_repository_metadata: should return false because of all of the incomplete repository data.", function () { + const repoData = { id: "repo/123" }; + const result = MetadataRepo.validate(repoData); + expect(result.ok).to.be.false; + expect(result.error.message).to.include( + "Metadata repository capacity must be 0: capacity=undefined", + ); + }); + + it("unit_repository_metadata: should return false for null repository data", function () { + const result = MetadataRepo.validate(null); + expect(result.ok).to.be.false; + }); + }); + + describe("unit_repository_metadata: createAllocation", function () { + it("unit_repository_metadata: should fail to create allocation with non existent repo.", function () { + const params = getValidAllocationParams(); + + const repo = new MetadataRepo(getValidRepoDataNoIDKEY()).value; + const result = repo.createAllocation(params); + + expect(result.ok).to.be.false; + expect(result.error.message).to.include( + "Failed to create metadata allocation: Repo, \'repo/undefined\', does not exist", + ); + }); + + it("unit_repository_metadata: should create allocation with valid parameters", function () { + const repoRawData = getValidRawRepoData(); + const userData = getValidUserData(); + const params = getValidAllocationParams(); + + g_db.repo.save(repoRawData); + g_db.u.save(userData); + g_db.admin.save({ + _from: repoRawData._id, + _to: params.client._id, + }); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.createAllocation(params); + + expect(rv.ok).to.be.true; + expect(rv.value.result).to.have.property("id"); + expect(rv.value.result).to.have.property("repo_id", repoRawData._id); + expect(rv.value.result).to.have.property("subject", params.subject); + expect(rv.value.result).to.have.property("rec_limit", params.rec_limit); + }); + }); + describe("unit_repository_metadata: Repository and subject existence checks", function () { + it("should fail when repository does not exist", function () { + //const repoData = getValidRepoData(); + const params = getValidAllocationParams(); + + // Subject exists but repo doesn't + g_db.u.save(getValidUserData()); + + const repo = new MetadataRepo(getValidRepoDataNoIDKEY()).value; + const rv = repo.createAllocation(params); + + expect(rv.ok).to.be.false; + expect(rv.error.code).to.equal(error.ERR_NOT_FOUND); + expect(rv.error.message).to.equal( + "Failed to create metadata allocation: Repo, \'repo/undefined\', does not exist.", + ); + }); + + it("should fail when subject does not exist", function () { + const repoData = getValidRawRepoData(); + const params = getValidAllocationParams(); + + // Repo exists but subject doesn't + g_db.repo.save(repoData); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.createAllocation(params); + + expect(rv.ok).to.be.false; + expect(rv.error.code).to.equal(error.ERR_NOT_FOUND); + expect(rv.error.message).to.equal( + "Failed to create metadata allocation: Subject, 'u/456', does not exist.", + ); + }); + + it("should work with different subject types (user, group, project)", function () { + const repoData = getValidRawRepoData(); + g_db.repo.save(repoData); + + // Test with group subject + const groupData = getValidGroupData(); + g_db.g.save(groupData); + + const paramsWithGroup = { + client: { + _id: "u/456", + is_admin: true, + }, + subject: groupData._id, + rec_limit: 5000, + data_limit: 0, + }; + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.createAllocation(paramsWithGroup); + + expect(rv.ok).to.be.true; + expect(rv.value.result.subject).to.equal(groupData._id); + }); + }); + + describe("unit_repository_metadata: Permission checks", function () { + it("should fail when client lacks admin permissions", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + const params = getValidAllocationParams(); + + g_db.repo.save(repoData); + g_db.u.save(userData); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.createAllocation(params); + console.log("RV is"); + console.log(rv); + expect(rv.error.code).to.equal(error.ERR_PERM_DENIED); + expect(rv.error.message).to.include( + "Allocation creation failed - Client, \'u/456\', does not have administrative repository permissions on repo/123", + ); + }); + + it("should succeed when client has admin permissions", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + const params = getValidAllocationParams(); + + g_db.repo.save(repoData); + g_db.u.save(userData); + g_db.admin.save({ + _from: repoData._id, + _to: params.client._id, + }); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.createAllocation(params); + + expect(rv.ok).to.be.true; + }); + }); + + describe("unit_repository_metadata: Duplicate allocation checks", function () { + it("should fail when allocation already exists for subject-repo pair", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + const params = getValidAllocationParams(); + + g_db.repo.save(repoData); + g_db.u.save(userData); + g_db.admin.save({ + _from: repoData._id, + _to: params.client._id, + }); + + // Create existing allocation + g_db.alloc.save({ + _from: params.subject, + _to: repoData._id, + data_limit: 1000, + rec_limit: 500, + }); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.createAllocation(params); + + expect(rv.ok).to.be.false; + expect(rv.error.code).to.equal(error.ERR_INVALID_PARAM); + expect(rv.error.message).to.include("already has an allocation"); + }); + + it("should allow allocation for same subject on different repo", function () { + const repoData1 = getValidRawRepoData(); + const repoData2 = { + _id: "repo/999", + _key: "999", + title: "Another Repository", + capacity: 0, + }; + + const userData = getValidUserData(); + const params = getValidAllocationParams(); + + // Setup both repos and user + g_db.repo.save(repoData1); + g_db.repo.save(repoData2); + g_db.u.save(userData); + + // Admin permissions for both repos + g_db.admin.save({ + _from: repoData1._id, + _to: params.client._id, + }); + g_db.admin.save({ + _from: repoData2._id, + _to: params.client._id, + }); + + // Create allocation on second repo + g_db.alloc.save({ + _from: params.subject, + _to: repoData2._id, + data_limit: 1000, + rec_limit: 500, + }); + + // Try to create allocation on first repo - should succeed + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.createAllocation(params); + + expect(rv.ok).to.be.true; + expect(rv.value.result.repo_id).to.equal(repoData1._id); + }); + }); + + describe("unit_repository_metadata: Successful allocation creation", function () { + it("should create allocation with all required fields", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + const params = getValidAllocationParams(); + + g_db.repo.save(repoData); + g_db.u.save(userData); + g_db.admin.save({ + _from: repoData._id, + _to: params.client._id, + }); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.createAllocation(params); + + expect(rv.ok).to.be.true; + expect(rv.value.execution_method).to.equal(ExecutionMethod.DIRECT); + expect(rv.value.result).to.have.all.keys(["id", "repo_id", "subject", "rec_limit"]); + expect(rv.value.result.repo_id).to.equal(repoData._id); + expect(rv.value.result.subject).to.equal(params.subject); + expect(rv.value.result.rec_limit).to.equal(params.rec_limit); + + // Verify allocation was saved in database + const savedAlloc = g_db.alloc.firstExample({ + _from: params.subject, + _to: repoData._id, + }); + + expect(savedAlloc).to.exist; + expect(savedAlloc.data_limit).to.equal(params.data_limit); + expect(savedAlloc.rec_limit).to.equal(params.rec_limit); + expect(savedAlloc.rec_count).to.equal(0); + expect(savedAlloc.data_size).to.equal(0); + expect(savedAlloc.path).to.equal("/"); + expect(savedAlloc.type).to.equal("metadata"); + }); + + it("should handle custom path parameter", function () { + // const repoData = getValidRepoData(); + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + const params = getValidAllocationParams(); + params.path = "/custom/path"; + + g_db.repo.save(repoData); + g_db.u.save(userData); + g_db.admin.save({ + _from: repoData._id, + _to: params.client._id, + }); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.createAllocation(params); + + expect(rv.ok).to.be.true; + + // Note: The current implementation doesn't use the custom path, + // it always sets path to "/". This test documents current behavior. + const savedAlloc = g_db.alloc.firstExample({ + _from: params.subject, + _to: repoData._id, + }); + expect(savedAlloc.path).to.equal("/"); + }); + + it("should handle different rec_limit values", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + const params = getValidAllocationParams(); + params.rec_limit = 99999; + + g_db.repo.save(repoData); + g_db.u.save(userData); + g_db.admin.save({ + _from: repoData._id, + _to: params.client._id, + }); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.createAllocation(params); + + expect(rv.ok).to.be.true; + expect(rv.value.result.rec_limit).to.equal(99999); + }); + }); + + describe("unit_repository_metadata: Parameter validation", function () { + it("should reject null subject", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + g_db.repo.save(repoData); + g_db.u.save(userData); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.deleteAllocation(userData, null); + + expect(rv.ok).to.be.false; + expect(rv.error.code).to.equal(error.ERR_INVALID_PARAM); + expect(rv.error.message).to.equal("Subject ID is required for allocation deletion"); + }); + + it("should reject undefined subject", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + g_db.repo.save(repoData); + g_db.u.save(userData); + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.deleteAllocation(userData, undefined); + + expect(rv.ok).to.be.false; + expect(rv.error.code).to.equal(error.ERR_INVALID_PARAM); + expect(rv.error.message).to.equal("Subject ID is required for allocation deletion"); + }); + + it("should reject empty string subject", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + g_db.repo.save(repoData); + g_db.u.save(userData); + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.deleteAllocation(userData, ""); + + expect(rv.ok).to.be.false; + expect(rv.error.code).to.equal(error.ERR_INVALID_PARAM); + expect(rv.error.message).to.equal("Subject ID is required for allocation deletion"); + }); + + it("should reject non-string subject", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + g_db.repo.save(repoData); + g_db.u.save(userData); + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.deleteAllocation(userData, 123); + + expect(rv.ok).to.be.false; + expect(rv.error.code).to.equal(error.ERR_INVALID_PARAM); + expect(rv.error.message).to.equal("Subject ID is required for allocation deletion"); + }); + + it("should reject object as subject", function () { + const rawRepoData = getValidRawRepoData(); + const userData = getValidUserData(); + g_db.repo.save(rawRepoData); + g_db.u.save(userData); + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.deleteAllocation(userData, { id: "u/user" }); + + expect(rv.ok).to.be.false; + expect(rv.error.code).to.equal(error.ERR_INVALID_PARAM); + expect(rv.error.message).to.equal("Subject ID is required for allocation deletion"); + }); + }); + + describe("unit_repository_metadata: Repository existence checks", function () { + it("should fail when repository does not exist", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + g_db.u.save(userData); + + const repo = new MetadataRepo(getValidRepoDataNoIDKEY()).value; + const rv = repo.deleteAllocation(userData, userData._id); + + expect(rv.ok).to.be.false; + expect(rv.error.code).to.equal(error.ERR_NOT_FOUND); + expect(rv.error.message).to.include("Failed to delete metadata allocation: Repo"); + expect(rv.error.message).to.include("does not exist"); + }); + + it("should fail when subject does not exist", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + g_db.u.save(userData); + g_db.repo.save(repoData); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.deleteAllocation(userData, "u/ghost"); + expect(rv.ok).to.be.false; + expect(rv.error.code).to.equal(error.ERR_NOT_FOUND); + expect(rv.error.message).to.include("Failed to delete metadata allocation: Subject"); + expect(rv.error.message).to.include("u/ghost"); + expect(rv.error.message).to.include("does not exist"); + }); + }); + + describe("unit_repository_metadata: Allocation existence checks", function () { + it("should fail when allocation does not exist", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + g_db.u.save(userData); + g_db.repo.save(repoData); + g_db.admin.save({ + _from: repoData._id, + _to: userData._id, + }); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.deleteAllocation(userData, userData._id); + + expect(rv.ok).to.be.false; + expect(rv.error.code).to.equal(error.ERR_NOT_FOUND); + expect(rv.error.message).to.include("Failed to delete metadata allocation: Subject"); + expect(rv.error.message).to.include(userData._id); + expect(rv.error.message).to.include("has no allocation on"); + expect(rv.error.message).to.include(repoData._id); + }); + + it("should proceed when allocation exists", function () { + const repoData = getValidRawRepoData(); + const userData = getValidUserData(); + g_db.u.save(userData); + g_db.repo.save(repoData); + const alloc = g_db.alloc.save({ + _from: userData._id, + _to: repoData._id, + }); + g_db.admin.save({ + _from: repoData._id, + _to: userData._id, + }); + + const repo = new MetadataRepo(getValidRepoData()).value; + const rv = repo.deleteAllocation(userData, userData._id); + expect(g_db._exists(alloc._id)).to.be.false; + }); + }); +}); diff --git a/core/database/foxx/tests/repo_router.test.js b/core/database/foxx/tests/repo_router.test.js new file mode 100644 index 000000000..72fecc3a0 --- /dev/null +++ b/core/database/foxx/tests/repo_router.test.js @@ -0,0 +1,256 @@ +"use strict"; + +// Integration test of API +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const g_db = require("@arangodb").db; + +const repo_base_url = `${baseUrl}/repo`; + +// NOTE: describe block strings are compared against test specification during test call, not file name +describe("integration_repo_router: the Foxx microservice repo_router create endpoint", () => { + beforeEach(() => { + const collections = ["repo", "d", "alloc", "loc", "repo", "admin", "g", "p", "u"]; + collections.forEach((name) => { + let col = g_db._collection(name); + if (col) { + col.truncate(); // truncate after ensuring collection exists + } else { + g_db._create(name); // create if it doesn’t exist + } + }); + }); + + const user_params = { + id: "u/shredder", + key: "shredder", + is_admin: false, + }; + + const user_params_admin = { + id: "u/splinter", + key: "splinter", + is_admin: false, + }; + + const user_params_raw = { + _key: "shredder", + is_admin: false, + }; + + const user_params_raw_admin = { + _key: "splinter", + is_admin: true, + }; + + const minimal_repo = { + id: "heavymetal", + title: "Rock On!!!!", + capacity: 0, + admins: ["u/shredder"], + type: "metadata", + }; + + const minimal_repo_admin = { + id: "heavymetal", + title: "Rock On!!!!", + capacity: 0, + admins: ["u/splinter"], + type: "metadata", + }; + + // All keys are examples + const minimal_globus_repo_admin = { + id: "heavymetal", + title: "Rock On!!!!", + capacity: 10000000000, + admins: ["u/splinter"], + address: "tcp://music.com", + endpoint: "c9b1b56e-3bde-4f7d-a932-92f6c4f046b", + path: "/mnt/nfs/large/heavymetal", + pub_key: "Zm7W6W5vJjZZqFj7okjBOS8K9wVjHhYyLzX+zA8B", + }; + + it("should deny creating a metadata repo without admin perms", () => { + // arrange + const rv = g_db.u.save(user_params_raw, { returnNew: true }); + const client_id = encodeURIComponent(user_params.id); + const request_string = `${repo_base_url}/create?client=${client_id}`; + + // act + const response = request.post(request_string, { + body: JSON.stringify(minimal_repo), + headers: { "Content-Type": "application/json" }, + }); + + console.log(response); + // assert + expect(response.status).to.equal(400); + const json = JSON.parse(response.body); + expect(json.errorMessage).to.include("Permission Denied"); + }); + + it("should create a metadata repo when user has admin perms", () => { + // arrange + const rv = g_db.u.save(user_params_raw_admin, { returnNew: true }); + const request_string = `${repo_base_url}/create?client=${user_params_admin.id}`; + + // act + const response = request.post(request_string, { + body: JSON.stringify(minimal_repo_admin), + headers: { "Content-Type": "application/json" }, + }); + + console.log(response); + + // assert + expect(response.status).to.equal(200); + const json = JSON.parse(response.body); + + expect(json).to.be.an("array").with.lengthOf(1); + + // Object structure + expect(json[0]).to.have.all.keys("type", "title", "capacity", "id"); + + // Property values + expect(json[0]).to.have.property("type", "metadata"); + expect(json[0]).to.have.property("title", "Rock On!!!!"); + expect(json[0]).to.have.property("capacity", 0); + expect(json[0]).to.have.property("id", "repo/heavymetal"); + }); + + it("should fail when metadata repo is assigned a capacity greater than 0", () => { + // arrange + const rv = g_db.u.save(user_params_raw_admin, { returnNew: true }); + const request_string = `${repo_base_url}/create?client=${user_params_admin.id}`; + + let non_zero_capacity = JSON.parse(JSON.stringify(minimal_repo_admin)); + non_zero_capacity.capacity = 1; + // act + const response = request.post(request_string, { + body: JSON.stringify(non_zero_capacity), + headers: { "Content-Type": "application/json" }, + }); + + console.log(response); + // assert + expect(response.status).to.equal(400); + const json = JSON.parse(response.body); + + expect(json.errorMessage).to.include("Metadata repository capacity must be 0: capacity=1"); + }); + + it("should fail to create a repo when id is missing", () => { + // arrange + const rv = g_db.u.save(user_params_raw_admin, { returnNew: true }); + const request_string = `${repo_base_url}/create?client=${user_params_admin.id}`; + + let missing_id = JSON.parse(JSON.stringify(minimal_repo_admin)); + delete missing_id.id; + // act + const response = request.post(request_string, { + body: JSON.stringify(missing_id), + headers: { "Content-Type": "application/json" }, + }); + + console.log(response); + // assert + expect(response.status).to.equal(400); + const json = JSON.parse(response.body); + console.log(json); + expect(json.errorMessage).to.include('child "id" fails because ["id" is required]'); + }); + + it("should fail to create a repo when title is missing", () => { + // arrange + const rv = g_db.u.save(user_params_raw_admin, { returnNew: true }); + const request_string = `${repo_base_url}/create?client=${user_params_admin.id}`; + + let missing_title = JSON.parse(JSON.stringify(minimal_repo_admin)); + delete missing_title.title; + // act + const response = request.post(request_string, { + body: JSON.stringify(missing_title), + headers: { "Content-Type": "application/json" }, + }); + + console.log(response); + + // assert + expect(response.status).to.equal(400); + + const json = JSON.parse(response.body); + expect(json.errorMessage).to.include('child "title" fails because ["title" is required]'); + }); + + it("should fail to create a repo when capacity is missing", () => { + // arrange + const rv = g_db.u.save(user_params_raw_admin, { returnNew: true }); + console.log(rv); + const request_string = `${repo_base_url}/create?client=${user_params_admin.id}`; + + let missing_capacity = JSON.parse(JSON.stringify(minimal_repo_admin)); + delete missing_capacity.capacity; + // act + const response = request.post(request_string, { + body: JSON.stringify(missing_capacity), + headers: { "Content-Type": "application/json" }, + }); + + console.log(response); + // assert + expect(response.status).to.equal(400); + + const json = JSON.parse(response.body); + console.log(json); + expect(json.errorMessage).to.include( + 'child "capacity" fails because ["capacity" is required]', + ); + }); + + it("should create a globus repo when user has admin perms", () => { + // arrange + const rv = g_db.u.save(user_params_raw_admin, { returnNew: true }); + console.log(rv); + const request_string = `${repo_base_url}/create?client=${user_params_admin.id}`; + + // act + const response = request.post(request_string, { + body: JSON.stringify(minimal_globus_repo_admin), + headers: { "Content-Type": "application/json" }, + }); + + console.log(response); + // assert + expect(response.status).to.equal(200); + const json = JSON.parse(response.body); + + console.log(json); + expect(json).to.be.an("array").with.lengthOf(1); + + // Object structure + expect(json[0]).to.have.all.keys( + "type", + "title", + "capacity", + "id", + "address", + "endpoint", + "path", + "pub_key", + ); + + // Property values + expect(json[0]).to.have.property("type", "globus"); + expect(json[0]).to.have.property("title", "Rock On!!!!"); + expect(json[0]).to.have.property("capacity", 10000000000); + expect(json[0]).to.have.property("id", "repo/heavymetal"); + expect(json[0]).to.have.property("address", "tcp://music.com"); + expect(json[0]).to.have.property("endpoint", "c9b1b56e-3bde-4f7d-a932-92f6c4f046b"); + + // path should end with '/' + expect(json[0]).to.have.property("path", "/mnt/nfs/large/heavymetal/"); + expect(json[0]).to.have.property("pub_key", "Zm7W6W5vJjZZqFj7okjBOS8K9wVjHhYyLzX+zA8B"); + }); +}); diff --git a/core/database/foxx/tests/repositories.test.js b/core/database/foxx/tests/repositories.test.js new file mode 100644 index 000000000..d89040344 --- /dev/null +++ b/core/database/foxx/tests/repositories.test.js @@ -0,0 +1,114 @@ +"use strict"; + +const { expect } = require("chai"); +const { Repositories } = require("../api/models/repositories/repositories"); // adjust path if needed +const { RepositoryType, Result } = require("../api/models/repositories/types"); +const error = require("../api/lib/error_codes"); +const g_db = require("@arangodb").db; + +// Mock imports if needed (adjust to your actual repo) +const { createRepository } = require("../api/models/repositories/types"); +const { GlobusRepo } = require("../api/models/repositories/repository/globus"); +const { MetadataRepo } = require("../api/models/repositories/repository/metadata"); + +describe("integration_repositories: Repository Factory and Operations", function () { + const repositories = new Repositories(); + + beforeEach(() => { + const collections = ["repo", "d", "alloc", "loc", "admin", "g", "p", "u"]; + collections.forEach((name) => { + let col = g_db._collection(name); + if (col) { + col.truncate(); + } else { + g_db._create(name); + } + }); + }); + + function getValidGlobusConfig() { + return { + id: "repo/123", + type: RepositoryType.GLOBUS, + title: "Globus Repo", + desc: "Valid globus repo", + capacity: 1000, + endpoint: "XXXXYYYY-XXXX-YYYY-XXXX-YYYYXXXXYYYY", + path: "/data/123", + pub_key: "ABC123", + address: "tcp://localhost:5555", + exp_path: "/export", + }; + } + + function getValidGlobusConfigNoIdKey() { + return { + type: RepositoryType.GLOBUS, + title: "Globus Repo", + desc: "Valid globus repo", + capacity: 1000, + endpoint: "XXXXYYYY-XXXX-YYYY-XXXX-YYYYXXXXYYYY", + path: "/data/123", + pub_key: "ABC123", + address: "tcp://localhost:5555", + exp_path: "/export", + }; + } + + function getValidMetadataConfig() { + return { + id: "repo/456", + type: RepositoryType.METADATA, + title: "Metadata Repo", + desc: "Valid metadata repo", + capacity: 0, + }; + } + + function getValidMetadataConfigNoIdKey() { + return { + type: RepositoryType.METADATA, + title: "Metadata Repo", + desc: "Valid metadata repo", + capacity: 0, + }; + } + + describe("integration_repositories: createRepositoryByType()", function () { + it("should create a valid GLOBUS repository", function () { + const result = Repositories.createRepositoryByType(getValidGlobusConfigNoIdKey()); + console.log("result is"); + console.log(result); + expect(result.ok).to.be.true; + expect(result.value).to.exist; + expect(result.value.type()).to.equal("globus"); + }); + + it("should create a valid METADATA repository", function () { + const result = Repositories.createRepositoryByType(getValidMetadataConfigNoIdKey()); + expect(result.ok).to.be.true; + expect(result.value).to.exist; + expect(result.value.type()).to.equal("metadata"); + }); + + it("should fail if required fields are missing", function () { + const invalid = { type: RepositoryType.GLOBUS }; + const result = Repositories.createRepositoryByType(invalid); + expect(result.ok).to.be.false; + expect(result.error.code).to.equal(error.ERR_INVALID_PARAM); + expect(result.error.message).to.include("Missing required repository fields"); + }); + + it("should fail for unknown repository type", function () { + const invalid = { + id: "repo/999", + type: "UNKNOWN_TYPE", + title: "Invalid Repo", + capacity: 1000, + }; + const result = Repositories.createRepositoryByType(invalid); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Unknown repository type"); + }); + }); +}); diff --git a/core/database/foxx/tests/tag_router.test.js b/core/database/foxx/tests/tag_router.test.js new file mode 100644 index 000000000..3c7cdba73 --- /dev/null +++ b/core/database/foxx/tests/tag_router.test.js @@ -0,0 +1,63 @@ +"use strict"; +// NOTE: completion of tests requires successful run of user_fixture.js script + +// Need to pull enum from support +const g_lib = require("../api/support"); + +// Integration test of API +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const { db } = require("@arangodb"); + +const tag_base_url = `${baseUrl}/tag`; + +describe("unit_tag_router: the Foxx microservice topic_router /search endpoint", () => { + after(function () { + const collections = ["tag"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); + }); + + beforeEach(() => { + const collections = ["tag"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) { + col.truncate(); // truncate after ensuring collection exists + } else { + db._create(name); // create if it doesn’t exist + } + }); + }); + + it("should successfully run the search route", () => { + db.tag.save({ + name: "testName", + }); + + // arrangesudo journalctl -u arangodb3.service -f + // TODO: make encoded query params less hard coded + const request_string = `${tag_base_url}/search?name=testName`; + // act + const response = request.post(request_string); + // assert + expect(response.status).to.equal(200); + }); + + it("should successfully run the list by count route", () => { + db.tag.save({ + name: "testName", + }); + + // arrangesudo journalctl -u arangodb3.service -f + // TODO: make encoded query params less hard coded + const request_string = `${tag_base_url}/list/by_count`; + // act + const response = request.post(request_string); + // assert + expect(response.status).to.equal(200); + }); +}); diff --git a/core/database/foxx/tests/task_router.test.js b/core/database/foxx/tests/task_router.test.js new file mode 100644 index 000000000..8815e09a0 --- /dev/null +++ b/core/database/foxx/tests/task_router.test.js @@ -0,0 +1,154 @@ +"use strict"; +// NOTE: completion of tests requires successful run of user_fixture.js script + +// Need to pull enum from support +const g_lib = require("../api/support"); + +// Integration test of API +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const { db } = require("@arangodb"); + +const task_base_url = `${baseUrl}/task`; + +after(function () { + const collections = ["u", "task"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); +}); + +describe("unit_task_router: the Foxx microservice task_router list/ endpoint", () => { + beforeEach(() => { + const collections = ["u", "task"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) { + col.truncate(); // truncate after ensuring collection exists + } else { + db._create(name); // create if it doesn’t exist + } + }); + }); + + it("should successfully run the list route", () => { + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name: "fake user", + name_first: "fake", + name_last: "user", + is_admin: true, + max_coll: 50, + max_proj: 10, + max_sav_qry: 20, + email: "fakeuser@gmail.com", + }); + db.task.save({ + _key: "1", + _id: "task/1", + client: "u/fakeUser", // Add this so the query doesn't fail on client match + ut: Date.now() / 1000, + status: 1, + }); + + // arrange + // TODO: make encoded query params less hard coded + const request_string = `${task_base_url}/list?client=u/fakeUser&task_id=task/1`; + // act + const response = request.get(request_string); + // assert + expect(response.status).to.equal(200); + }); + + it("should raise an exception with invalid client", () => { + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name: "fake user", + name_first: "fake", + name_last: "user", + is_admin: true, + max_coll: 50, + max_proj: 10, + max_sav_qry: 20, + email: "fakeuser@gmail.com", + }); + + // arrange + // TODO: make encoded query params less hard coded + const request_string = `${task_base_url}/list?client=u/BOOMPOWSHOUT`; + // act + const response = request.get(request_string); + // assert + expect(response.status).to.equal(400); + }); +}); + +// NOTE: describe block strings are compared against test specification during test call, not file name +describe("unit_task_router: the Foxx microservice task_router view/ endpoint", () => { + it("should succeed running the view route", () => { + db.task.save({ + _key: "2", + _id: "task/2", + client: "u/fakeUser", // Add this so the query doesn't fail on client match + ut: Date.now() / 1000, + status: 1, + }); + // arrange + // TODO: make encoded query params less hard coded + const request_string = `${task_base_url}/view?client=u/fakeUser&task_id=task/2`; + + // act + const response = request.get(request_string); + // assert + expect(response.status).to.equal(200); + }); + + it("should raise an exception with invalid task", () => { + // arrange + // TODO: make encoded query params less hard coded + const request_string = `${task_base_url}/view?client=u/fakeUser&task_id=task/thisaintitchief`; + + // act + const response = request.get(request_string); + // assert + expect(response.status).to.equal(400); + }); +}); + +describe("unit_task_router: the Foxx microservice task_router run/ endpoint", () => { + it("should succeed the run route", () => { + db.task.save({ + _key: "3", + _id: "task/3", + client: "u/fakeUser", // Add this so the query doesn't fail on client match + ut: Date.now() / 1000, + status: 1, + type: g_lib.TT_DATA_GET, + }); + + // arrange + // TODO: make encoded query params less hard coded + const request_string = `${task_base_url}/run?client=u/fakeUser&task_id=task/3`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(200); + }); + it("should raise an exception with invalid task", () => { + // arrange + // TODO: make encoded query params less hard coded + const request_string = `${task_base_url}/run?client=u/fakeUser&task_id=task/wow...Icantbelieveitdidntwork`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(400); + }); +}); diff --git a/core/database/foxx/tests/validation.test.js b/core/database/foxx/tests/validation.test.js new file mode 100644 index 000000000..b3d487d40 --- /dev/null +++ b/core/database/foxx/tests/validation.test.js @@ -0,0 +1,111 @@ +"use strict"; + +const { expect } = require("chai"); +const { Result } = require("../api/models/repositories/types"); +const { + validateNonEmptyString, + validateCommonFields, + validatePOSIXPath, + validateRepositoryPath, +} = require("../api/models/repositories/validation"); + +describe("unit_validation_repository: Repository Validation Tests", function () { + describe("unit_validation_repository: validateNonEmptyString", function () { + it("should accept valid non-empty strings", function () { + const result = validateNonEmptyString("valid string", "Test field"); + expect(result.ok).to.be.true; + }); + + it("should reject null values", function () { + const result = validateNonEmptyString(null, "Test field"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Test field is required"); + }); + + it("should reject empty strings", function () { + const result = validateNonEmptyString("", "Test field"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Test field is required"); + }); + + it("should reject whitespace-only strings", function () { + const result = validateNonEmptyString(" ", "Test field"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Test field is required"); + }); + + it("should reject non-string values", function () { + const result = validateNonEmptyString(123, "Test field"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Test field is required"); + }); + }); + + describe("unit_validation_repository: validateCommonFields", function () { + it("should accept valid common fields", function () { + const config = { + id: "test-repo", + title: "Test Repository", + capacity: 1000000, + admins: ["user1", "user2"], + }; + const result = validateCommonFields(config); + expect(result.ok).to.be.true; + }); + }); + + describe("unit_validation_repository: validatePOSIXPath", function () { + it("should accept valid absolute paths", function () { + const result = validatePOSIXPath("/valid/path", "Test path"); + expect(result.ok).to.be.true; + }); + + it("should reject relative paths", function () { + const result = validatePOSIXPath("relative/path", "Test path"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("must be an absolute path"); + }); + + it("should reject paths with ..", function () { + const result = validatePOSIXPath("/path/../other", "Test path"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("contains invalid path sequences"); + }); + + it("should reject paths with //", function () { + const result = validatePOSIXPath("/path//other", "Test path"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("contains invalid path sequences"); + }); + + it("should reject empty paths", function () { + const result = validatePOSIXPath("", "Test path"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("must be a non-empty string"); + }); + }); + + describe("unit_validation_repository: validateRepositoryPath", function () { + it("should accept path ending with repo ID", function () { + const result = validateRepositoryPath("/data/repos/test-repo", "test-repo"); + expect(result.ok).to.be.true; + }); + + it("should accept path ending with repo ID and trailing slash", function () { + const result = validateRepositoryPath("/data/repos/test-repo/", "test-repo"); + expect(result.ok).to.be.true; + }); + + it("should reject path not ending with repo ID", function () { + const result = validateRepositoryPath("/data/repos/other-name", "test-repo"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("must end with repository ID (test-repo)"); + }); + + it("should inherit POSIX path validation", function () { + const result = validateRepositoryPath("relative/path/test-repo", "test-repo"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("must be an absolute path"); + }); + }); +}); diff --git a/core/database/foxx/tests/version_router.test.js b/core/database/foxx/tests/version_router.test.js new file mode 100644 index 000000000..9286ddf56 --- /dev/null +++ b/core/database/foxx/tests/version_router.test.js @@ -0,0 +1,53 @@ +"use strict"; + +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const { db } = require("@arangodb"); + +// (replace “myrouter” with the actual base path) +const base_url = `${baseUrl}/`; + +after(function () { + // cleanup collections if needed + const collections = [ + /* list collections to truncate */ + ]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); +}); + +describe("unit_version_router: the Foxx microservice version/ endpoint", () => { + beforeEach(() => { + // ensure collections exist & trimmed/initialized + const collections = [ + /* list collections */ + ]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) { + col.truncate(); + } else { + db._create(name); + } + }); + }); + + it("should succeed when valid parameters given", () => { + // arrange: setup fixture data + // e.g., db.u.save({...}); db.mycoll.save({...}); + // TODO: insert required documents + + // arrange: build request string + const request_string = `${base_url}/version`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(200); + // additional assertions on response body (if needed) + }); +}); diff --git a/core/database/tests/test_fixture_setup.sh b/core/database/tests/test_fixture_setup.sh index b88b2c521..1f54992f2 100755 --- a/core/database/tests/test_fixture_setup.sh +++ b/core/database/tests/test_fixture_setup.sh @@ -4,10 +4,10 @@ set -uef -o pipefail SCRIPT=$(realpath "$BASH_SOURCE[0]") SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath "${SOURCE}/../../../") -source "${PROJECT_ROOT}/config/datafed.sh" -source "${PROJECT_ROOT}/scripts/dependency_versions.sh" -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" +DATAFED_PROJECT_ROOT=$(realpath "${SOURCE}/../../../") +source "${DATAFED_PROJECT_ROOT}/config/datafed.sh" +source "${DATAFED_PROJECT_ROOT}/external/DataFedDependencies/scripts/dependency_versions.sh" +source "${DATAFED_PROJECT_ROOT}/external/DataFedDependencies/scripts/dependency_install_functions.sh" Help() { echo "$(basename $0) Will initialize fixtures for Foxx tests" @@ -42,7 +42,7 @@ else fi if [ -z "${FOXX_MAJOR_API_VERSION:-}" ]; then - local_FOXX_MAJOR_API_VERSION=$(cat ${PROJECT_ROOT}/cmake/Version.cmake | grep -o -P "(?<=FOXX_API_MAJOR).*(?=\))" | xargs) + local_FOXX_MAJOR_API_VERSION=$(cat ${DATAFED_PROJECT_ROOT}/cmake/Version.cmake | grep -o -P "(?<=FOXX_API_MAJOR).*(?=\))" | xargs) else local_FOXX_MAJOR_API_VERSION=$(printenv FOXX_MAJOR_API_VERSION) fi diff --git a/core/database/tests/test_foxx.sh b/core/database/tests/test_foxx.sh index 5265d6100..9d91b4318 100755 --- a/core/database/tests/test_foxx.sh +++ b/core/database/tests/test_foxx.sh @@ -12,10 +12,10 @@ set -euf -o pipefail SCRIPT=$(realpath "$BASH_SOURCE[0]") SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath ${SOURCE}/../../../) -source ${PROJECT_ROOT}/config/datafed.sh -source "${PROJECT_ROOT}/scripts/dependency_versions.sh" -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" +DATAFED_PROJECT_ROOT=$(realpath ${SOURCE}/../../../) +source "${DATAFED_PROJECT_ROOT}/config/datafed.sh" +source "${DATAFED_PROJECT_ROOT}/external/DataFedDependencies/scripts/dependency_versions.sh" +source "${DATAFED_PROJECT_ROOT}/external/DataFedDependencies/scripts/dependency_install_functions.sh" Help() { echo "$(basename $0) Will run a Foxx unit test" @@ -46,7 +46,7 @@ else fi if [ -z "${FOXX_MAJOR_API_VERSION:-}" ]; then - local_FOXX_MAJOR_API_VERSION=$(cat ${PROJECT_ROOT}/cmake/Version.cmake | grep -o -P "(?<=FOXX_API_MAJOR).*(?=\))" | xargs) + local_FOXX_MAJOR_API_VERSION=$(cat ${DATAFED_PROJECT_ROOT}/cmake/Version.cmake | grep -o -P "(?<=FOXX_API_MAJOR).*(?=\))" | xargs) else local_FOXX_MAJOR_API_VERSION=$(printenv FOXX_MAJOR_API_VERSION) fi diff --git a/core/database/tests/test_setup.sh b/core/database/tests/test_setup.sh index 33b542bf6..93195eb04 100755 --- a/core/database/tests/test_setup.sh +++ b/core/database/tests/test_setup.sh @@ -13,10 +13,10 @@ set -uef -o pipefail SCRIPT=$(realpath "$BASH_SOURCE[0]") SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath "${SOURCE}/../../../") -source "${PROJECT_ROOT}/config/datafed.sh" -source "${PROJECT_ROOT}/scripts/dependency_versions.sh" -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" +DATAFED_PROJECT_ROOT=$(realpath "${SOURCE}/../../../") +source "${DATAFED_PROJECT_ROOT}/config/datafed.sh" +source "${DATAFED_PROJECT_ROOT}/external/DataFedDependencies/scripts/dependency_versions.sh" +source "${DATAFED_PROJECT_ROOT}/external/DataFedDependencies/scripts/dependency_install_functions.sh" Help() { echo "$(basename $0) Will set up a configuration file for the core server" @@ -52,7 +52,7 @@ else fi if [ -z "${FOXX_MAJOR_API_VERSION:-}" ]; then - local_FOXX_MAJOR_API_VERSION=$(cat ${PROJECT_ROOT}/cmake/Version.cmake | grep -o -P "(?<=FOXX_API_MAJOR).*(?=\))" | xargs) + local_FOXX_MAJOR_API_VERSION=$(cat ${DATAFED_PROJECT_ROOT}/cmake/Version.cmake | grep -o -P "(?<=FOXX_API_MAJOR).*(?=\))" | xargs) else local_FOXX_MAJOR_API_VERSION=$(printenv FOXX_MAJOR_API_VERSION) fi @@ -115,7 +115,7 @@ if [[ "$output" =~ .*"sdms".* ]]; then echo "SDMS already exists do nothing" else echo "Creating SDMS" - arangosh --server.endpoint "tcp://${local_DATAFED_DATABASE_HOST}:8529" --server.password "${local_DATAFED_DATABASE_PASSWORD}" --server.username "${local_DATABASE_USER}" --javascript.execute "${PROJECT_ROOT}/core/database/foxx/db_create.js" + arangosh --server.endpoint "tcp://${local_DATAFED_DATABASE_HOST}:8529" --server.password "${local_DATAFED_DATABASE_PASSWORD}" --server.username "${local_DATABASE_USER}" --javascript.execute "${DATAFED_PROJECT_ROOT}/core/database/foxx/db_create.js" # Give time for the database to be created sleep 2 arangosh --server.endpoint "tcp://${local_DATAFED_DATABASE_HOST}:8529" --server.password "${local_DATAFED_DATABASE_PASSWORD}" --server.username "${local_DATABASE_USER}" --javascript.execute-string 'db._useDatabase("sdms"); db.config.insert({"_key": "msg_daily", "msg" : "DataFed servers will be off-line for regular maintenance every Sunday night from 11:45 pm until 12:15 am EST Monday morning."}, {overwrite: true});' diff --git a/core/database/tests/test_teardown.sh b/core/database/tests/test_teardown.sh index b7a1652a6..9ff83e0cc 100755 --- a/core/database/tests/test_teardown.sh +++ b/core/database/tests/test_teardown.sh @@ -12,8 +12,8 @@ set -euf -o pipefail SCRIPT=$(realpath "$0") SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath "${SOURCE}/../../../") -source "${PROJECT_ROOT}/config/datafed.sh" +DATAFED_PROJECT_ROOT=$(realpath "${SOURCE}/../../../") +source "${DATAFED_PROJECT_ROOT}/config/datafed.sh" PATH_TO_PASSWD_FILE="${SOURCE}/database_temp.password" rm "${PATH_TO_PASSWD_FILE}" diff --git a/core/docker/Dockerfile b/core/docker/Dockerfile index 2b8a12e21..b1981e9ba 100644 --- a/core/docker/Dockerfile +++ b/core/docker/Dockerfile @@ -2,15 +2,16 @@ # cd ${PROJECT_ROOT} or cd DataFed # docker build -f core/docker/Dockerfile . -ARG BUILD_BASE="debian:bookworm-slim" -ARG DEPENDENCIES="dependencies" -ARG RUNTIME="runtime" -ARG DATAFED_DIR="/datafed" -ARG DATAFED_INSTALL_PATH="/opt/datafed" +ARG BUILD_BASE="debian:bookworm-slim" +ARG DEPENDENCIES="dependencies" +ARG RUNTIME="runtime" +ARG DATAFED_DIR="/datafed" +ARG DATAFED_INSTALL_PATH="/opt/datafed" ARG DATAFED_DEPENDENCIES_INSTALL_PATH="/opt/datafed/dependencies" -ARG GCS_IMAGE="code.ornl.gov:4567/dlsw/datafed/gcs-ubuntu-focal" -ARG BUILD_DIR="$DATAFED_DIR/source" -ARG LIB_DIR="/usr/local/lib" +ARG BUILD_DIR="$DATAFED_DIR/source" +ARG DATAFED_DEPENDENCIES_ROOT="$BUILD_DIR/external/DataFedDependencies" +ARG GCS_IMAGE="code.ornl.gov:4567/dlsw/datafed/gcs-ubuntu-focal" +ARG LIB_DIR="/usr/local/lib" FROM ${DEPENDENCIES} AS core-build @@ -20,6 +21,7 @@ ARG DATAFED_DIR ARG BUILD_DIR ARG DATAFED_INSTALL_PATH ARG DATAFED_DEPENDENCIES_INSTALL_PATH +ARG DATAFED_DEPENDENCIES_ROOT ENV DATAFED_INSTALL_PATH="${DATAFED_INSTALL_PATH}" # For communicating with repo server @@ -27,9 +29,13 @@ EXPOSE 7512 # For listening to web server EXPOSE 7513 +RUN mkdir -p ${DATAFED_DEPENDENCIES_ROOT}/scripts/ && \ + mv ./scripts/dependency_versions.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/ && \ + mv ./scripts/generate_dependencies_config.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/ + +COPY ./common ${BUILD_DIR}/common COPY ./core/CMakeLists.txt ${BUILD_DIR}/core/CMakeLists.txt COPY ./CMakeLists.txt ${BUILD_DIR} -COPY ./scripts/dependency_versions.sh ${BUILD_DIR}/scripts/ COPY ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/ COPY ./scripts/generate_core_config.sh ${BUILD_DIR}/scripts/ COPY ./scripts/install_core.sh ${BUILD_DIR}/scripts/ @@ -37,15 +43,17 @@ COPY ./cmake ${BUILD_DIR}/cmake COPY ./core/docker/entrypoint.sh ${BUILD_DIR}/core/docker/ COPY ./core/server ${BUILD_DIR}/core/server -RUN ${BUILD_DIR}/scripts/generate_datafed.sh && \ - ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake -S. -B build \ - -DBUILD_REPO_SERVER=False \ - -DBUILD_AUTHZ=False \ - -DBUILD_CORE_SERVER=True \ - -DBUILD_WEB_SERVER=False \ - -DBUILD_DOCS=False \ - -DBUILD_PYTHON_CLIENT=False \ - -DBUILD_FOXX=False +RUN ${DATAFED_DEPENDENCIES_ROOT}/scripts/generate_dependencies_config.sh && \ + ${BUILD_DIR}/scripts/generate_datafed.sh && \ + ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake -S. -B build \ + -DBUILD_REPO_SERVER=False \ + -DBUILD_AUTHZ=False \ + -DBUILD_CORE_SERVER=True \ + -DBUILD_WEB_SERVER=False \ + -DBUILD_DOCS=False \ + -DBUILD_PYTHON_CLIENT=False \ + -DBUILD_FOXX=False \ + -DENABLE_INTEGRATION_TESTS=False RUN ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake --build build -j 8 RUN ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake --build build --target install @@ -55,6 +63,7 @@ SHELL ["/bin/bash", "-c"] ARG DATAFED_DIR ARG DATAFED_INSTALL_PATH +ARG DATAFED_DEPENDENCIES_ROOT ARG DATAFED_DEPENDENCIES_INSTALL_PATH ARG BUILD_DIR ARG LIB_DIR @@ -62,6 +71,7 @@ ARG LIB_DIR # The above should also be available at runtime ENV DATAFED_INSTALL_PATH="$DATAFED_INSTALL_PATH" ENV DATAFED_DEPENDENCIES_INSTALL_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}" +ENV DATAFED_DEPENDENCIES_ROOT="${DATAFED_DEPENDENCIES_ROOT}" ENV DATAFED_DIR="$DATAFED_DIR" ENV BUILD_DIR="$BUILD_DIR" ENV LIB_DIR="$LIB_DIR" @@ -76,18 +86,21 @@ COPY --chown=datafed:root ./scripts/generate_core_config.sh ${BUILD_DIR}/scripts COPY --chown=datafed:root ./scripts/install_core.sh ${BUILD_DIR}/scripts/install_core.sh COPY --chown=datafed:root ./cmake/Version.cmake ${BUILD_DIR}/cmake/Version.cmake COPY --from=core-build --chown=datafed:root ${BUILD_DIR}/core/docker/entrypoint.sh ${BUILD_DIR}/core/docker/entrypoint.sh +COPY --from=core-build --chown=datafed:root ${DATAFED_DEPENDENCIES_ROOT}/scripts ${DATAFED_DEPENDENCIES_ROOT}/scripts COPY --from=core-build --chown=datafed:root ${DATAFED_INSTALL_PATH}/core/datafed-core ${DATAFED_INSTALL_PATH}/core/datafed-core USER root -RUN chown -R datafed:root ${DATAFED_DIR} /opt /home/datafed && \ +RUN chown -R datafed:root ${DATAFED_DIR} /opt /home/datafed \ + ${DATAFED_DEPENDENCIES_ROOT} && \ chmod 774 ${DATAFED_DIR} ${BUILD_DIR} ${BUILD_DIR}/core/docker/entrypoint.sh \ ${DATAFED_INSTALL_PATH}/core/datafed-core \ ${BUILD_DIR}/scripts/generate_datafed.sh \ ${BUILD_DIR}/scripts/generate_core_config.sh \ ${BUILD_DIR}/scripts/install_core.sh \ ${DATAFED_INSTALL_PATH}/core && \ + find ${DATAFED_DEPENDENCIES_ROOT} -type d -exec chmod 0774 {} + && \ chmod 664 ${BUILD_DIR}/cmake/Version.cmake && \ chmod +t ${DATAFED_DIR} ${DATAFED_INSTALL_PATH} diff --git a/core/server/DatabaseAPI.cpp b/core/server/DatabaseAPI.cpp index 924e6c652..aa0d35905 100644 --- a/core/server/DatabaseAPI.cpp +++ b/core/server/DatabaseAPI.cpp @@ -126,12 +126,20 @@ long DatabaseAPI::dbGet(const char *a_url_path, curl_easy_setopt(m_curl, CURLOPT_WRITEDATA, &res_json); curl_easy_setopt(m_curl, CURLOPT_ERRORBUFFER, error); curl_easy_setopt(m_curl, CURLOPT_HTTPGET, 1); + struct curl_slist* headers = nullptr; + // safe: curl_slist_append copies the string internally + std::string header = "x-correlation-id: " + log_context.correlation_id; + headers = curl_slist_append(headers, header.c_str()); + + // attach headers to the CURL handle + curl_easy_setopt(m_curl, CURLOPT_HTTPHEADER, headers); CURLcode res = curl_easy_perform(m_curl); long http_code = 0; curl_easy_getinfo(m_curl, CURLINFO_RESPONSE_CODE, &http_code); - + curl_slist_free_all(headers); + if (res == CURLE_OK) { if (res_json.size()) { try { @@ -168,13 +176,14 @@ bool DatabaseAPI::dbGetRaw(const std::string url, string &a_result) { a_result.clear(); error[0] = 0; + curl_easy_setopt(m_curl, CURLOPT_HTTPHEADER, nullptr); // Clear any previous headers + // attach headers to the CURL handle curl_easy_setopt(m_curl, CURLOPT_URL, url.c_str()); curl_easy_setopt(m_curl, CURLOPT_WRITEDATA, &a_result); curl_easy_setopt(m_curl, CURLOPT_ERRORBUFFER, error); curl_easy_setopt(m_curl, CURLOPT_HTTPGET, 1); CURLcode res = curl_easy_perform(m_curl); - long http_code = 0; curl_easy_getinfo(m_curl, CURLINFO_RESPONSE_CODE, &http_code); if (res == CURLE_OK && (http_code >= 200 && http_code < 300)) @@ -198,7 +207,14 @@ long DatabaseAPI::dbPost(const char *a_url_path, // TODO: construct URL outside of function const string url = buildSearchParamURL(a_url_path, a_params); + struct curl_slist* headers = nullptr; + // safe: curl_slist_append copies the string internally + std::string header = "x-correlation-id: " + log_context.correlation_id; + headers = curl_slist_append(headers, header.c_str()); + + // attach headers to the CURL handle + curl_easy_setopt(m_curl, CURLOPT_HTTPHEADER, headers); curl_easy_setopt(m_curl, CURLOPT_URL, url.c_str()); curl_easy_setopt(m_curl, CURLOPT_WRITEDATA, &res_json); curl_easy_setopt(m_curl, CURLOPT_ERRORBUFFER, error); @@ -210,6 +226,7 @@ long DatabaseAPI::dbPost(const char *a_url_path, a_body ? a_body->c_str() : empty_body); CURLcode res = curl_easy_perform(m_curl); + curl_slist_free_all(headers); long http_code = 0; curl_easy_getinfo(m_curl, CURLINFO_RESPONSE_CODE, &http_code); @@ -2158,23 +2175,28 @@ void DatabaseAPI::repoCreate(const Auth::RepoCreateRequest &a_request, Value result; nlohmann::json payload; + + // Required attributes payload["id"] = a_request.id(); payload["title"] = a_request.title(); - payload["path"] = a_request.path(); - payload["pub_key"] = a_request.pub_key(); - payload["address"] = a_request.address(); - payload["endpoint"] = a_request.endpoint(); - payload["capacity"] = to_string(a_request.capacity()); + payload["capacity"] = std::to_string(a_request.capacity()); + + // Helper to add optional fields if present + auto add_if_present = [&](auto has_fn, auto get_fn, const std::string& key) { + if ((a_request.*has_fn)()) { + payload[key] = (a_request.*get_fn)(); + } + }; + + // List of optional fields to check + add_if_present(&Auth::RepoCreateRequest::has_path, &Auth::RepoCreateRequest::path, "path"); + add_if_present(&Auth::RepoCreateRequest::has_pub_key, &Auth::RepoCreateRequest::pub_key, "pub_key"); + add_if_present(&Auth::RepoCreateRequest::has_address, &Auth::RepoCreateRequest::address, "address"); + add_if_present(&Auth::RepoCreateRequest::has_endpoint, &Auth::RepoCreateRequest::endpoint, "endpoint"); + add_if_present(&Auth::RepoCreateRequest::has_desc, &Auth::RepoCreateRequest::desc, "desc"); + add_if_present(&Auth::RepoCreateRequest::has_domain, &Auth::RepoCreateRequest::domain, "domain"); + add_if_present(&Auth::RepoCreateRequest::has_exp_path, &Auth::RepoCreateRequest::exp_path, "exp_path"); - if (a_request.has_desc()) { - payload["desc"] = a_request.desc(); - } - if (a_request.has_domain()) { - payload["domain"] = a_request.domain(); - } - if (a_request.has_exp_path()) { - payload["exp_path"] = a_request.exp_path(); - } if (a_request.admin_size() > 0) { nlohmann::json admins = nlohmann::json::array(); for (int i = 0; i < a_request.admin_size(); ++i) { @@ -3656,69 +3678,97 @@ void DatabaseAPI::taskPurge(uint32_t a_age_sec, LogContext log_context) { } */ +/** + * @brief Converts client metrics into a structured JSON string. + * + * This method takes a timestamp, a total count, and a nested map of client + * metrics, and produces a JSON string with the following structure: + * + * - `timestamp`: the timestamp provided. + * - `total`: the total count provided. + * - `uids` (or clients): a JSON object where each key is a client ID and the + * value is an object containing: + * - `tot`: subtotal for the client (from message type 0, if present) + * - `msg`: an object mapping message types (as strings) to counts. + * + * @param a_timestamp The timestamp to include in the JSON payload. + * @param a_total The total count across all clients (sum of all subtotals). + * @param a_metrics A map where: + * - The first key (`std::string`) is the client ID. + * - The nested map (`std::map`) maps: + * - Key: message type (uint16_t), where `0` is reserved for subtotal. + * - Value: count of messages for that type, at key `0` it is the + * subtotal. + * - If a client has no metrics, the nested map will be empty. + * - Message type `0` is used as a subtotal and is never a valid message type. + * + * @return A formatted JSON string representing the clients and their message + * counts. + * + * @note The JSON structure will always include the `"uids"` object, even if + * empty. + * @note Message type keys in `"msg"` are converted to strings to ensure valid + * JSON objects. + * + * In the below example total is equivalent to subtotal because there is only + * one client. + * + * @example + * Input: + * @code + * timestamp = 111 + * total = 15 + * metrics = { + * {"client1", {{0, 15}, {1, 10}, {2, 5}}} + * } + * @endcode + * + * Output JSON: + * @code + * { + * "timestamp": 111, + * "total": 15, + * "uids": { + * "client1": { + * "tot": 15, + * "msg": { + * "1": 10, + * "2": 5 + * } + * } + * } + * } + * @endcode + */ std::string DatabaseAPI::newJsonMetricParse( uint32_t a_timestamp, uint32_t a_total, const std::map> &a_metrics) { - map>::const_iterator u; - map::const_iterator m; + nlohmann::json payload; - payload["timestamp"] = to_string(a_timestamp); - payload["total"] = to_string(a_total); - - nlohmann::json uids; - for (u = a_metrics.begin(); u != a_metrics.end(); ++u) { - nlohmann::json uid_body; - uid_body["tot"] = to_string(u->second.at(0)); - nlohmann::json uid_msg; - for (m = u->second.begin(); m != u->second.end(); ++m) { - if (m->first != 0) { - uid_msg[to_string(m->first)] = to_string(m->second); - } - } - uid_body["msg"] = uid_msg; + payload["timestamp"] = a_timestamp; + payload["total"] = a_total; - uids[u->first] = uid_body; - } + nlohmann::json clients_json = nlohmann::json::object(); - payload["uids"] = uids; - string body = payload.dump(-1, ' ', true); - return body; -} + for (const auto &[client_id, client_metrics] : a_metrics) { + nlohmann::json client_json; -// TODO: verify and remove -std::string DatabaseAPI::oldJsonMetricParse( - uint32_t a_timestamp, uint32_t a_total, - const std::map> &a_metrics) { - map>::const_iterator u; - map::const_iterator m; - string body = "{\"timestamp\":" + to_string(a_timestamp) + - ",\"total\":" + to_string(a_total) + ",\"uids\":{"; - bool c = false, cc; - - for (u = a_metrics.begin(); u != a_metrics.end(); ++u) { - if (c) - body += ","; - else - c = true; - - body += "\"" + u->first + "\":{\"tot\":" + to_string(u->second.at(0)) + - ",\"msg\":{"; - - for (cc = false, m = u->second.begin(); m != u->second.end(); ++m) { - if (m->first != 0) { - if (cc) - body += ","; - else - cc = true; - - body += "\"" + to_string(m->first) + "\":" + to_string(m->second); - } + // Use safe access for total + auto it = client_metrics.find(0); + client_json["tot"] = (it != client_metrics.end()) ? it->second : 0; + + nlohmann::json msg_json; + for (const auto &[msg_id, msg_count] : client_metrics) { + if (msg_id != 0) + msg_json[std::to_string(msg_id)] = msg_count; } - body += "}}"; + + client_json["msg"] = msg_json; + clients_json[client_id] = client_json; } - body += "}}"; - return body; + payload["uids"] = clients_json; + return payload.dump(-1, ' ', true); } void DatabaseAPI::metricsUpdateMsgCounts( @@ -3726,21 +3776,7 @@ void DatabaseAPI::metricsUpdateMsgCounts( const std::map> &a_metrics, LogContext log_context) { - string body; - string new_body = newJsonMetricParse(a_timestamp, a_total, a_metrics); - string old_body = oldJsonMetricParse(a_timestamp, a_total, a_metrics); - - if (new_body == old_body) { - // on match use safer serialization - body = new_body; - } else { - body = old_body; - DL_WARNING( - log_context, - "Serialized metric bodies did not match, new serialization yielded:\n" - << new_body << "\n old serialization yielded:\n" - << old_body); - } + std::string body = newJsonMetricParse(a_timestamp, a_total, a_metrics); libjson::Value result; diff --git a/core/server/DatabaseAPI.hpp b/core/server/DatabaseAPI.hpp index ce5e0a649..bf94ce90b 100644 --- a/core/server/DatabaseAPI.hpp +++ b/core/server/DatabaseAPI.hpp @@ -333,7 +333,7 @@ class DatabaseAPI { LogContext); void metricsPurge(uint32_t a_timestamp, LogContext); -private: +protected: long dbGet(const char *a_url_path, const std::vector> &a_params, libjson::Value &a_result, LogContext, bool a_log = true); diff --git a/core/server/tests/unit/CMakeLists.txt b/core/server/tests/unit/CMakeLists.txt index f42b95ef9..054d0c5f9 100644 --- a/core/server/tests/unit/CMakeLists.txt +++ b/core/server/tests/unit/CMakeLists.txt @@ -2,6 +2,7 @@ foreach(PROG test_AuthMap test_AuthenticationManager + test_DatabaseAPI ) file(GLOB ${PROG}_SOURCES ${PROG}*.cpp) diff --git a/core/server/tests/unit/test_DatabaseAPI.cpp b/core/server/tests/unit/test_DatabaseAPI.cpp new file mode 100644 index 000000000..23812af2b --- /dev/null +++ b/core/server/tests/unit/test_DatabaseAPI.cpp @@ -0,0 +1,215 @@ +#define BOOST_TEST_MAIN + +#define BOOST_TEST_MODULE authmap + +// Local includes +#include "DatabaseAPI.hpp" + +// Third party includes +#include +#include +#include +#include +#include +#include + +// Standard includes +#include +#include +#include + +using namespace SDMS::Core; + +class DatabaseAPITestHelper : public DatabaseAPI { +public: + // Inherit constructor(s) from DatabaseAPI + using DatabaseAPI::DatabaseAPI; + + // Or you can just use protected methods directly in your own logic + std::string newJsonMetricParse( + uint32_t ts, uint32_t total, + const std::map> &metrics) { + return DatabaseAPI::newJsonMetricParse(ts, total, + metrics); // if it's protected + } +}; + +struct CurlGlobalFixture { + CurlGlobalFixture() { curl_global_init(CURL_GLOBAL_DEFAULT); } + + ~CurlGlobalFixture() { curl_global_cleanup(); } +}; + +// Register fixture to run once per test module +BOOST_TEST_GLOBAL_CONFIGURATION(CurlGlobalFixture); + +const std::string url("https://localhost:8529"); +const std::string user("bob"); +const std::string pass("open_sesame"); + +BOOST_AUTO_TEST_SUITE(DatabaseAPITest) + +/** + * @brief Unit test for DatabaseAPI::newJsonMetricParse() with empty metrics. + * + * This test verifies that the `newJsonMetricParse` function correctly handles + * the case where the metrics map is empty. + * + * Test details: + * - The `metrics` map is empty (no users or messages). + * - The function is called with a timestamp and total value. + * - The test asserts that the JSON output contains the correct timestamp, + * total, and an empty "uids" object. + * + * Example of expected JSON structure: + * @code + * { + * "timestamp": 123456, + * "total": 42, + * "uids": {} + * } + * @endcode + * + * Notes: + * - Ensures that `newJsonMetricParse` does not fail or throw exceptions + * when no user metrics are provided. + * - Uses DatabaseAPITestHelper to invoke the function. + * - Uses nlohmann::json to build and compare the expected JSON output. + */ +BOOST_AUTO_TEST_CASE(testing_empty_metrics) { + + DatabaseAPITestHelper db_api(url, user, pass); + + std::map> metrics; + + std::string result = db_api.newJsonMetricParse(123456, 42, metrics); + + std::cout << "Result is " << std::endl; + std::cout << result << std::endl; + + nlohmann::json expected; + expected["timestamp"] = 123456; + expected["total"] = 42; + expected["uids"] = nlohmann::json::object(); + + std::cout << "expected dump" << std::endl; + std::cout << expected.dump(-1, ' ', true) << std::endl; + BOOST_TEST(result == expected.dump(-1, ' ', true)); +} + +/** + * @brief Unit test for DatabaseAPI::newJsonMetricParse() with a single user. + * + * This test verifies that the `newJsonMetricParse` function correctly converts + * a metrics map containing a single user into the expected JSON format. + * + * Test details: + * - One user is included: "user1". + * - The user has a "tot" value (total) and a "msg" object containing + * message IDs mapped to counts. + * - The total across all messages is passed as `total` to the function. + * - The test asserts that the JSON output matches the expected structure. + * + * Example of expected JSON structure: + * @code + * { + * "timestamp": 111, + * "total": 5, + * "uids": { + * "user1": { + * "tot": 15, + * "msg": { + * "1": 10, + * "2": 5 + * } + * } + * } + * } + * @endcode + * + * Notes: + * - Keys inside the "msg" object must be strings to avoid being interpreted + * as JSON arrays by nlohmann::json. + * - Uses DatabaseAPITestHelper to invoke `newJsonMetricParse`. + * - Uses nlohmann::json to build and compare expected JSON output. + */ +BOOST_AUTO_TEST_CASE(testing_single_uid_with_tot_and_msg) { + + DatabaseAPITestHelper db_api(url, user, pass); + + std::map> metrics = { + {"user1", {{0, 15}, {1, 10}, {2, 5}}}}; + + std::string result = db_api.newJsonMetricParse(111, 5, metrics); + + nlohmann::json expected; + expected["timestamp"] = 111; + expected["total"] = 5; + nlohmann::json uids; + // NOTE keys must be strings + uids["user1"] = {{"tot", 15}, {"msg", {{"1", 10}, {"2", 5}}}}; + expected["uids"] = uids; + + std::cout << "result" << std::endl; + std::cout << result << std::endl; + + std::cout << "expected" << std::endl; + std::cout << expected.dump(-1, ' ', true) << std::endl; + BOOST_TEST(result == expected.dump(-1, ' ', true)); +} + +/** + * @brief Unit test for DatabaseAPI::newJsonMetricParse() with multiple users. + * + * This test verifies that the `newJsonMetricParse` function correctly converts + * a metrics map containing multiple users into the expected JSON format. + * + * Test details: + * - Two users are included: "userA" and "userB". + * - Each user has a "tot" value (total) and a "msg" object containing + * message IDs mapped to counts. + * - The total across all users is passed as `total` to the function. + * - The test asserts that the JSON output matches the expected structure. + * + * Example of expected JSON structure: + * @code + * { + * "timestamp": 999, + * "total": 351, + * "uids": { + * "userA": { "tot": 201, "msg": {"1": 200, "4": 1} }, + * "userB": { "tot": 150, "msg": {"2": 150} } + * } + * } + * @endcode + * + * Uses: + * - DatabaseAPITestHelper for invoking `newJsonMetricParse`. + * - nlohmann::json for building and comparing expected JSON output. + */ +BOOST_AUTO_TEST_CASE(testing_multiple_uids) { + DatabaseAPITestHelper db_api(url, user, pass); + + std::map> metrics = { + {"userA", {{0, 201}, {1, 200}, {4, 1}}}, {"userB", {{0, 150}, {2, 150}}}}; + + std::string result = db_api.newJsonMetricParse(999, 351, metrics); + + nlohmann::json expected; + expected["timestamp"] = 999; + expected["total"] = 351; + nlohmann::json uids; + uids["userA"] = {{"tot", 201}, {"msg", {{"1", 200}, {"4", 1}}}}; + uids["userB"] = {{"tot", 150}, {"msg", {{"2", 150}}}}; + expected["uids"] = uids; + + std::cout << "result" << std::endl; + std::cout << result << std::endl; + + std::cout << "expected" << std::endl; + std::cout << expected.dump(-1, ' ', true) << std::endl; + + BOOST_TEST(result == expected.dump(-1, ' ', true)); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/doc_source/source/admin/install_bare_metal.rst b/doc_source/source/admin/install_bare_metal.rst index 44171394d..554015be9 100644 --- a/doc_source/source/admin/install_bare_metal.rst +++ b/doc_source/source/admin/install_bare_metal.rst @@ -41,6 +41,8 @@ Prior to building DataFed, the build environment must be properly configured as Downloading DataFed:: git clone https://github.com/ORNL/DataFed.git + cd DataFed + git submodule update --init --recursive Install packages required to build DataFed: @@ -50,7 +52,7 @@ Install packages required to build DataFed: * libboost-all-dev * protobuf-compiler * libzmq3-dev -* libssl-dev +* libssl-dev (version 3.0 or higher) * libcurl4-openssl-dev * libglobus-common-dev * libfuse-dev @@ -71,9 +73,9 @@ The npm packages needed primarily by the web server are: This can be done with a helper scripts these scripts are for ubuntu:: - ./DataFed/scripts/install_core_dependencies.sh - ./DataFed/scripts/install_repo_dependencies.sh - ./DataFed/scripts/install_ws_dependencies.sh + ./DataFed/external/DataFedDependencies/scripts/install_core_dependencies.sh + ./DataFed/external/DataFedDependencies/scripts/install_repo_dependencies.sh + ./DataFed/external/DataFedDependencies/scripts/install_ws_dependencies.sh The next step is to enter configuration options that are listed in ./config/datafed.sh. To generate a template for this file you will first need to run:: @@ -86,7 +88,6 @@ of the configuration options: 1. DATAFED_DEFAULT_LOG_PATH - Needed by core, repo, web services 2. DATAFED_DATABASE_PASSWORD - Needed by core 3. DATAFED_ZEROMQ_SESSION_SECRET - Needed by web server -4. DATAFED_ZEROMQ_SYSTEM_SECRET - Needed by web server 5. DATAFED_LEGO_EMAIL - Needed by web server 6. DATAFED_WEB_KEY_PATH - Needed by web server 7. DATAFED_WEB_CERT_PATH - Needed by web server @@ -141,8 +142,8 @@ Example download/install of ArangoDB 3.12.4 for Ubuntu:: sudo apt-get update sudo apt-get install arangodb3 -It should start automatically with an install but to run the arangodb service, you -can also interact with it via systemctl:: +It should start automatically with an install but to run the arangodb service, +you can also interact with it via systemctl:: sudo systemctl start arangodb3.service @@ -181,7 +182,8 @@ Building the compiling the core service:: cmake --build build --parallel 6 sudo cmake --build build --target install -Example datafed-core.cfg file:: +Example datafed-core.cfg file, note you will need to swap http for https in the +db-url if the Arango database is running with ssl.:: port = 9100 client-threads = 4 diff --git a/doc_source/source/admin/install_docker.rst b/doc_source/source/admin/install_docker.rst index 0fddb6a30..3c8c4c339 100644 --- a/doc_source/source/admin/install_docker.rst +++ b/doc_source/source/admin/install_docker.rst @@ -53,6 +53,8 @@ Downloading DataFed: .. code-block:: bash git clone https://github.com/ORNL/DataFed.git + cd DataFed + git submodule update --init --recursive If you are deploying in development mode, the next step is to enter configuration options that are listed in ./.env To generate a template for this file you will first need to run: @@ -67,7 +69,6 @@ of the relevant configuration options to an initial deployment: 1. DATAFED_GLOBUS_APP_SECRET 2. DATAFED_GLOBUS_APP_ID 3. DATAFED_ZEROMQ_SESSION_SECRET -4. DATAFED_ZEROMQ_SYSTEM_SECRET 5. DATAFED_DATABASE_PASSWORD 6. DATAFED_DATABASE_IP_ADDRESS 7. DATAFED_GCS_ROOT_NAME @@ -143,7 +144,6 @@ Here is an example for the core service: -e DATAFED_GLOBUS_APP_SECRET="" \ -e DATAFED_GLOBUS_APP_ID="" \ -e DATAFED_ZEROMQ_SESSION_SECRET="" \ - -e DATAFED_ZEROMQ_SYSTEM_SECRET="" \ -e DATAFED_DOMAIN="" \ -e DATAFED_DATABASE_PASSWORD="" \ -e DATAFED_DATABASE_IP_ADDRESS_PORT="" \ @@ -158,6 +158,16 @@ Here is an example for the core service: -v "/local/path/keys/datafed-core-key.priv:/opt/datafed/keys/datafed-core-key.priv" \ -t "datafed-core:latest" +If arango has been built with self signed certificates and the core service needs to communicate +with it, the following additional environment variables need to be added. + +``` +# An env variable to point to the certificate + -e SSL_CERT_FILE: "/opt/datafed/keys/datafed-arango.crt" +# A volume mount that actually mountes the .crt file + -v "/local/path/keys/datafed-arango.crt:/opt/datafed/keys/datafed-arango.crt" +``` + Web Service ------------ @@ -171,21 +181,20 @@ Here is an example for the web service: -e DATAFED_GLOBUS_APP_SECRET="" \ -e DATAFED_GLOBUS_APP_ID="" \ -e DATAFED_ZEROMQ_SESSION_SECRET="" \ - -e DATAFED_ZEROMQ_SYSTEM_SECRET="" \ -e DATAFED_DOMAIN="" \ - -e DATAFED_WEB_CERT_PATH="" \ - -e DATAFED_WEB_KEY_PATH="" \ + -e DATAFED_WEB_CERT_PATH="" \ + -e DATAFED_WEB_KEY_PATH="" \ -e DATAFED_DEFAULT_LOG_PATH="" \ -e DATAFED_CORE_ADDRESS_PORT_INTERNAL="" \ - -e DATAFED_GOOGLE_ANALYTICS_TAG="" \ + -e DATAFED_GOOGLE_ANALYTICS_TAG="" \ -e UID="" \ --network datafed-network \ -p 7513:7513 \ -p 7512:7512 \ -v "/local/path/logs:/datafed/logs" \ -v "/local/path/keys/datafed-core-key.pub:/opt/datafed/keys/datafed-core-key.pub" \ - -v "$DATAFED_WEB_CERT_PATH:$DATAFED_WEB_CERT_PATH" \ - -v "$DATAFED_WEB_KEY_PATH:$DATAFED_WEB_KEY_PATH" \ + -v "$DATAFED_WEB_CERT_PATH:$DATAFED_WEB_CERT_PATH" \ + -v "$DATAFED_WEB_KEY_PATH:$DATAFED_WEB_KEY_PATH" \ -t "datafed-web:latest" Repository Service @@ -201,8 +210,7 @@ Here is an example for the repository service: -e DATAFED_GLOBUS_APP_SECRET="" \ -e DATAFED_GLOBUS_APP_ID="" \ -e DATAFED_ZEROMQ_SESSION_SECRET="" \ - -e DATAFED_ZEROMQ_SYSTEM_SECRET="" \ - -e DATAFED_HTTPS_SERVER_PORT="" \ + -e DATAFED_HTTPS_SERVER_PORT="" \ -e DATAFED_DOMAIN="" \ -e DATAFED_DEFAULT_LOG_PATH="" \ -e DATAFED_CORE_ADDRESS_PORT_INTERNAL="" \ @@ -231,7 +239,6 @@ Here is an example for the Globus Connect Server service: -e DATAFED_GLOBUS_APP_SECRET="" \ -e DATAFED_GLOBUS_APP_ID="" \ -e DATAFED_ZEROMQ_SESSION_SECRET="" \ - -e DATAFED_ZEROMQ_SYSTEM_SECRET="" \ -e DATAFED_HTTPS_SERVER_PORT="" \ -e DATAFED_DOMAIN="" \ -e DATAFED_CORE_ADDRESS_PORT_INTERNAL="" \ @@ -258,6 +265,23 @@ Here is an example for the Globus Connect Server service: Notice that the gcs container must run in host networking mode to avoid performance bottlenecks with GridFTP. +ArangoDB +------------- + +Notes on running arango in a container can be found on their official web site. I will only include here that if running the arango with ssl enabled the certificate must be passed in. + +.. code-block:: bash + + docker run -d \ + --restart=always \ + --network datafed-network \ + -p 8529:8529 \ + -v "/opt/datafed/keys/datafed-arango.pem:/usr/local/share/ca-certificates/datafed-arango.pem:ro" \ + -e ARANGO_ROOT_PASSWORD="" \ + arangodb/arangodb:latest \ + --ssl.keyfile /usr/local/share/ca-certificates/datafed-arango.pem \ + --server.endpoint ssl://0.0.0.0:8529 + Nginx Service ------------- @@ -286,4 +310,4 @@ Networking If the web server and core server are on different machines you will need to ensure that they can communicate, this will require exchanging the public keys -that are in the /opt/datafed/keys folder. \ No newline at end of file +that are in the /opt/datafed/keys folder. diff --git a/doc_source/source/dev/design.rst b/doc_source/source/dev/design.rst index 81adca6fe..373d1d406 100644 --- a/doc_source/source/dev/design.rst +++ b/doc_source/source/dev/design.rst @@ -2,4 +2,74 @@ Architecture & Design ===================== -DataFed is designed in part as a hub and spoke model. Where the metadata services act as the hub and the spokes are DataFed managed repositories that connect to it. +DataFed is built around a hub-and-spoke architecture. In this design, the +centralized metadata services act as the hub, while DataFed-managed repositories +function as the spokes that connect into the system. This architecture supports +flexible growth, distributed storage, and robust metadata management. + +Central Services (Hub) +====================== + +The core of DataFed consists of three primary services: the database layer, +the C++ core service, and the web server. + +Database Layer (ArangoDB) +------------------------- + +At the center of the architecture is the metadata database, powered by +**ArangoDB**. ArangoDB is a multimodel database that supports key–value, +document-store, and graph data models. This combination makes it especially +well-suited for metadata-driven systems: + +- The **graph model** enables rich representation of provenance relationships, + dataset linkages, and hierarchical structures. +- The **document store** and **key–value** capabilities support nested metadata + documents, allowing flexible schemas and expressive, domain-specific queries. +- The multimodel nature of ArangoDB allows DataFed to unify structured, + semi-structured, and relational metadata within a single backend. + +C++ Core Service +---------------- + +The C++ core service forms the primary interface between the database and all +other components of the system. It is the only service with direct access to +the database. Its responsibilities include: + +- **Message relay and coordination:** It brokers communication between external + systems—such as repository services, Python clients, and the web server. +- **Task management:** It manages long-running background operations, including + Globus data transfers and other asynchronous workflows. +- **Schema validation:** It executes JSON Schema validation to ensure that all + metadata ingested into the system conforms to the expected structure. + +Web Server +---------- + +The web server provides browser-based access to DataFed. It exposes the user +interface and API endpoints and can be deployed behind NGINX or any other +reverse proxy for additional security, load balancing, or TLS termination. + +Repository Services (Spokes) +============================ + +The spokes in the hub-and-spoke model are the DataFed repositories that provide +the actual data storage capabilities. Currently, these repositories rely on the +**Globus** platform. + +A typical repository consists of: + +- **Globus Connect Server (GCS):** + A customized GCS deployment that incorporates a DataFed authorization (authz) + callout module. This module integrates with the GridFTP server and routes all + access-control decisions back to the C++ core service, ensuring centralized + authorization. + +- **DataFed Repository Service:** + A companion service that runs alongside the Globus Connect Server. It handles + additional repository-specific responsibilities, including data preparation, + repository state management, and supporting repository workflows. + +Together, the central services and distributed repositories form a cohesive, +scalable system for managing scientific metadata and data movement across +institutions and storage environments. + diff --git a/doc_source/source/dev/project.rst b/doc_source/source/dev/project.rst index 844480249..7d37db1c4 100644 --- a/doc_source/source/dev/project.rst +++ b/doc_source/source/dev/project.rst @@ -4,17 +4,23 @@ Project Management DataFed is an open-source project hosted on `GitHub `_. Development is on-going and the development team is based at `Oak Ridge National Laboratories `_ (ORNL) -within the `Advanced Technologies Section `_ (ATS) +within the `HPC Operations Section `_ of the `Oak Ridge Leadership Computing Facility `_ (OLCF). -Personnel -========= +Active Team +=========== + +- Josh Brown, Technical Applications lead for Data and Platforms Services +- Blake, Nedved, Software developer and lead developer of DataFed +- Austin, Hampton, Intern/Junior Software engineer on the DataFed team +- Polina, Shpilker, Postdoctoral researcher and contributor +- Tony, Ramirez, Software Developer and user assistance team + +Acknowledgement to Contributors of the Project and their Roles +============================================================== - Olga Kuchar, Data Lifecycle and Scalable Workflows group leader within ATS. - Dale Stansberry, Sr. Software Developer - PI, architect, lead developer of the DataFed Project - Suhas Somnath, Computer Scientist - Scientific user requirements and Python interface development - Jessica Breet, Data Scientist - Scientific user requirements and system testing -- Josh Brown, Software Engineer - Aaron Perez, Full Stack -- Blake, Nedved, Sotware Developer -- Tony, Ramirez, Software Developer diff --git a/doc_source/source/dev/release.rst b/doc_source/source/dev/release.rst index 0153c7c0d..84166f58d 100644 --- a/doc_source/source/dev/release.rst +++ b/doc_source/source/dev/release.rst @@ -2,6 +2,11 @@ Release Notes ============= +For release notes, for dates after May 28, 2021 please refer to what is posted +on the github website. + +https://github.com/ORNL/DataFed/releases + 1.2.0-4, May 28, 2021 ===================== diff --git a/doc_source/source/dev/roadmap.rst b/doc_source/source/dev/roadmap.rst index 5bb39604d..b67e70fc7 100644 --- a/doc_source/source/dev/roadmap.rst +++ b/doc_source/source/dev/roadmap.rst @@ -1,69 +1,17 @@ -================ -Feature Road Map -================ - -Planned Features -================ - -- Metadata Schema Support (#33) -- Full Documentation (#594) - -New User Features +================= + Feature Roadmap ================= -- Improved Data Search Interface (#596) -- Provenance-Based Relational Data Search (#597) -- Data Events, Notifications, & Subscriptions (#401) -- Multimedia Attachments for Data Records (#12) -- HTTPS Data Transfer (#571) -- Organization and Project Directory (#35) - - add orgs/facilities, affiliated users & projects - - contacts and repo admin requests - - sci network geo map of orgs/facilities -- Provide REST API as alternative to Python API (#595) -- Full-Featured Python HL-API (#598) -- Full-Featured CLI (#599) -- System Administration Interfaces (#600) - -Production-Related Changes +Planned Features — FY 2026 ========================== -- Database Scaling / Resilience - - cluster configuration with replication - - mirror record metadata files on repositories - - automatic DB back-ups -- Core Service Scaling / Resilience - - switch comms from ZeroMQ to TCP/TLS - - add core service load balancer - - update task management to operate non-exclusively - - support dynamic repositories (add/remove) -- Web Service Scaling / Resilience - - web server farm w/ load balancer -- Repository Service Updates - - object stores - - policy enforcement -- Update to Globus Connect Ver. 5 - - impacts repository endpoint configuration & authentication - - required for HTTP data access -- Rebuild Current Web Portal Using a Modern Web Framework +- Metadata schema support (LinkML) (https://github.com/ORNL/DataFed/milestone/24) +- Metadata-only repository mode (https://github.com/ORNL/DataFed/milestone/19) +- NIST and ORNL compliance, including: + - OAuth flow support, token support and customization of OIDC provider (https://github.com/ORNL/DataFed/issues/1578) + - Login banners (https://github.com/ORNL/DataFed/milestone/27) + - Improved audit logging (https://github.com/ORNL/DataFed/milestone/18) +- Kubernetes-compatible containers (https://github.com/ORNL/DataFed/milestone/25) +- Migration from ZeroMQ to HTTPS for external communication (https://github.com/ORNL/DataFed/milestone/26) -Potential Features / Changes -============================ -- Data Communities / Groups - - like google groups, share data with members, notifications -- Integration with Data Publishing Systems - - users can easily publish from DataFed - - data can be retained in datafed, or linked - - datafed APIs can be used to get published data - - data events would work on published data -- Client Data Ingest Tools - - metadata extraction - - tar/zip directories - - batch import directories - - synchronize managed directories -- Revisit Peer-to-Peer Data Indexing Architecture - - currently requires central index - - can be robust, but still single point of failure (think google) - - remote searches would be very slow - - mirroring peer data would require very large synchronized indexes diff --git a/doc_source/source/dev/testing.rst b/doc_source/source/dev/testing.rst index b2e026831..68b8af55a 100644 --- a/doc_source/source/dev/testing.rst +++ b/doc_source/source/dev/testing.rst @@ -60,13 +60,12 @@ Assuming you're at the root of the DataFed repository: Now, run the Foxx container with the appropriate environment variables to install the services and enable testing: -DATAFED_DATABASE_PASSWORD, and DATAFED_ZEROMQ_SYSTEM_SECRET can be anything but they should be consistent with what is in the config/datafed.sh file. +DATAFED_DATABASE_PASSWORD, can be anything but it should be consistent with what is in the config/datafed.sh file. .. code-block:: bash docker run \ -e DATAFED_DATABASE_PASSWORD="" \ - -e DATAFED_ZEROMQ_SYSTEM_SECRET="" \ -e INSTALL_FOXX=ON \ -e ENABLE_FOXX_TESTS=TRUE \ --user $(id -u):0 \ diff --git a/doc_source/source/system/getting_started.rst b/doc_source/source/system/getting_started.rst index ca154cfc0..2409f6548 100644 --- a/doc_source/source/system/getting_started.rst +++ b/doc_source/source/system/getting_started.rst @@ -2,61 +2,61 @@ Getting Started =============== -Please follow this guide to get started with DataFed +Please follow this guide to get started with DataFed. 1. Get a Globus account ----------------------- -Follow **only step 1** of `instructions here `_ to get a Globus account. +Follow **only Step 1** of the `instructions here `_ to create a Globus account. 2. Get a Globus ID ------------------ -Ensure that your ``globus ID`` is linked with your institutional ID in your globus account: +Ensure that your ``Globus ID`` is linked with your institutional ID in your Globus account: -1. Log into `globus.org `_ -2. Click on ``Account`` on the left hand pane -3. Select the ``Identities`` tab in the window that opens up -4. You should see (at least these) two identities: +1. Log in to `globus.org `_. +2. Click on ``Account`` on the left-hand pane. +3. Select the ``Identities`` tab in the window that opens. +4. You should see (at least) two identities: - a. One from your home institution (that is listed as ``primary`` with a crown) - b. Globus ID (your_username@globusid.org) + a. One from your home institution (listed as ``primary`` with a crown). + b. A Globus ID (your_username@globusid.org) -e. If you do not see the ``Globus ID``, click on ``Link another identity``. Select ``Globus ID`` and link this ID. +5. If you do not see the ``Globus ID``, click on ``Link another identity``. Select ``Globus ID`` and link this identity. 3. Register at DataFed ---------------------- 1. Once you have a Globus ID, visit the `DataFed web portal `_. -2. Click on the ``Log in / Register`` button on the top right of the page. -3. Follow the steps to register yourself with DataFed. -4. Though you can log into the DataFed web portal with your institution's credentials, - you will need the username and password you set up during your registration for scripting. +2. Click on the ``Log in / Register`` button on the top-right of the page. +3. Follow the steps to register with DataFed. +4. Although you can log in to the DataFed web portal using your institutional credentials, + you will need the username and password you set up during registration for scripting. .. note:: Your institutional credentials are not the same as your DataFed credentials. - The latter is only required for using DataFed via python / CLI. + The latter are required only when using DataFed via the Python API or CLI. 4. Get data allocations ----------------------- -As the name suggests, a data allocation is just the data storage space that users and projects can use to store and share data of their own. -Though you can start to use DataFed at this point to view and get publicly shared data, it would not be possible to create or manipulate data of your own -unless you have a data allocation in a DataFed data repository. +As the name suggests, a data allocation is the data storage space that users and projects can use to store and share their own data. +Although you can use DataFed at this point to view and access publicly shared data, it is not possible to create or manipulate your own data +unless you have a data allocation on a DataFed data repository. -Users are recommended to request an allocation from the principle investigator of the project and/or the IT administrator of the facility using DataFed. -Make sure to communicate your DataFed user ID with administrators or collaborators so that you can be added to projects, be provided data allocations, etc. +Users are encouraged to request an allocation from the **principal investigator** of the project and/or the IT administrator of the facility using DataFed. +Make sure to communicate your DataFed user ID to administrators or collaborators so you can be added to projects, provided data allocations, etc. -.. note :: +.. note:: - The completion of these steps so far should be sufficient for users to view, edit, and manage data on DataFed. - In order to upload and download data, users are recommended to complete the next few steps. + Completing the steps so far is sufficient for users to view, edit, and manage metadata on DataFed. + However, to upload and download data, users are recommended to complete the next few steps. 5. Install / identify Globus Endpoint ------------------------------------- -You will need a `Globus endpoint `_ on **every machine** where you intend to download / upload data. +You will need a `Globus endpoint `_ on **every machine** where you intend to download or upload data. -High Performance Compute clusters +High-performance compute clusters ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Most high performance computing clusters will already have at-least one endpoint already configured. See the table below for some popular examples: +Most high-performance computing clusters will already have at least one endpoint configured. See the table below for some popular examples: +----------+---------------------------------+-------------------------------------------------------------------------------------------+ | Facility | Machine(s) | Endpoint | @@ -74,63 +74,65 @@ Most high performance computing clusters will already have at-least one endpoint | CADES | CADES Moderate Research | `cades#CADES-MOD `_ | +----------+---------------------------------+-------------------------------------------------------------------------------------------+ -If your cluster is not listed above, you may need to identify the endpoint(s) from the cluster's documentation or -by searching on the Globus data transfer web interface as shown below: +If your cluster is not listed above, you may need to identify the endpoint from the cluster’s documentation or +by searching on the Globus Data Transfer web interface: -1. Log into Globus' `web portal `_ -2. Select ``File Manager`` on the left hand pane if it is not already selected. +1. Log in to the Globus `web portal `_. +2. Select ``File Manager`` on the left-hand pane if it is not already selected. .. image:: ../_static/globus_endpoints/finding_endpoint_01.png -3. Start typing the name of the machine or compute facility in the search box + +3. Start typing the name of the machine or compute facility in the search box. .. image:: ../_static/globus_endpoints/finding_endpoint_02.png -4. Select the option that seems most reasonable (avoid endpoints named as ``test`` or those that seem project specific) -5. If the endpoint name is not clearly listed in the short description, click on the three dots signifying the options to view details about the endpoint -6. Scroll down the page till you find ``Legacy name``. This is the short-hand identifier for the endpoint + +4. Select the option that appears most appropriate (avoid endpoints named ``test`` or those that appear project-specific). +5. If the endpoint name is not clearly listed, click the three dots icon to view details about the endpoint. +6. Scroll down until you find ``Legacy Name`` — this is the short-hand identifier for the endpoint. .. image:: ../_static/globus_endpoints/finding_endpoint_03.png - The ``Endpoint UUID`` could also be used in place of the ``Legacy Name`` in the DataFed context. +The ``Endpoint UUID`` may also be used in place of the ``Legacy Name`` in the DataFed context. .. note:: - The DataFed web portal features a built-in endpoint search capability in the data upload and download dialogs. Simply enter a - portion of an endpoint title or legacy ID in the source or destination path input field, and after a short delay DataFed will show - any matching endpoints in the source or destination endpoint input list. + The DataFed web portal features a built-in endpoint search capability in the data upload and download dialogs. + Simply enter a portion of an endpoint title or legacy ID in the source or destination path input field, and DataFed will display + matching endpoints after a brief delay. -Personal Computers and Workstations +Personal computers and workstations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you plan on using DataFed to upload and download data from your own computer, -you will need to `follow these instructions `_ -to install Globus Connect Personal and set up your own Globus endpoint on your computer. +If you plan to use DataFed to upload or download data from your own computer, +follow the `instructions here `_ +to install Globus Connect Personal and set up a Globus endpoint on your system. - -Just as in the High performance computing clusters scenario, -you would need to look up information regarding your endpoint on Globus' web portal. -You would need to note down either the ``Endpoint UUID`` or the ``Legacy Name`` for your endpoint. +As with high-performance computing clusters, you will need to look up information about your endpoint on the Globus web portal. +You should note either the ``Endpoint UUID`` or the ``Legacy Name`` for your endpoint. 6. Activate Globus Endpoint --------------------------- -In order to transfer from or to the Globus endpoint attached to the cluster's file system or your personal computer, -you would need to activate the Globus endpoint identified in the previous step. -Just clicking on the endpoint in the Globus web portal will prompt you to authenticate yourself with the institution-specific credentials. -Here are `example instructions `_ for activating CADES' endpoints. +To transfer data to or from the Globus endpoint associated with a cluster’s file system or your personal computer, +you will need to activate the endpoint identified in the previous step. +Clicking on the endpoint in the Globus web portal will prompt you to authenticate using your institution-specific credentials. +Here are `example instructions `_ for activating CADES endpoints. .. note:: - Globus endpoints are active only for a short period of time and must be reactivated if they expire. - -Once the endpoint is activated, it will be active for data transfers for a period determined by the hosting facility - typically 2 or 3 days. -It is possible to renew an endpoints activation before it expires from the Globus `endpoint -management page `_. DataFed data repositories use internal Globus endpoints -for data uploads and downloads; however, these endpoints are managed by DataFed and do not need to be activated by users. + Globus endpoints remain active only for a limited time and must be reactivated when they expire. +Once activated, an endpoint remains available for transfers for a period determined by the hosting facility—typically 2 or 3 days. +It is possible to renew an endpoint’s activation before it expires from the Globus +`endpoint management page `_. +DataFed data repositories use internal Globus endpoints for uploads and downloads; however, these endpoints are managed by DataFed and do not require user activation. Programming interfaces to DataFed --------------------------------- -Though it is indeed possible to use the web interface exclusively for managing data in DataFed, -DataFed's python interface and CLI are very handy tools for automating data orchestration and accessing DataFed -when a web interface is not available (i.e. when using a terminal). +Although it is possible to use the web interface exclusively to manage your data, +DataFed’s Python interface and CLI are valuable tools for automating data orchestration and for accessing DataFed +when a web interface is not available (for example, when using a terminal). + +Please follow the +`installation and configuration guide `_ +for the client package to get started with the Python and command-line interfaces to DataFed. -Please follow the `installation and configuration guide `_ for the client package to get started with the python and command line interfaces to DataFed. diff --git a/doc_source/source/system/introduction.rst b/doc_source/source/system/introduction.rst index 97ddbc65e..30da03cd7 100644 --- a/doc_source/source/system/introduction.rst +++ b/doc_source/source/system/introduction.rst @@ -20,7 +20,7 @@ Background The primary goal of DataFed is to improve scientific data quality by enabling precise early-lifecycle control over data artifacts, with the ability to uniformly share and access data -across geographically distributed facilities. DataFed can be thought of as a "tier 2+" distributed data storage system - meaning it is intended +across geographically distributed facilities. DataFed can be thought of as a "tier 2+" distributed data storage system — meaning it is intended for creating and working with data that is of medium- to long-term significance to the owner and/or collaborators. Unlike a tier 1 storage system (i.e. a local file system), DataFed compromises raw data access performance in favor of FAIR data principles. @@ -29,27 +29,26 @@ While DataFed shares many features with tier 3 storage systems (i.e. data archival systems), DataFed allows data and metadata to be modified after it is ingested and specifically includes features for disseminating subsequent changes to downstream data consumers via automatic provenance-based alerts as well as opt-in data subscriptions. DataFed -also provides powerful and easy to use collaboration features to encourage "in-band" data-related +also provides powerful and easy-to-use collaboration features to encourage "in-band" data-related communication instead of ad hoc and error-prone methods, such as email. DataFed utilizes `Globus `_ for efficient and secure data transfers, as well as for user authentication. Globus can be thought of as a "data network" where data transfers take place -between Globus "endpoints" - which are Globus services that enable access to underlying file systems +between Globus "endpoints" — which are Globus services that enable access to underlying file systems hosted by member organizations. DataFed adds a data management layer on top of Globus that permits data to be located and accessed without needing to know where the data is physically stored within the Globus network. Because DataFed relies heavily on Globus, it is recommended that DataFed users familiarize themselves with `how Globus works `_. - Scientific Data Management ========================== Traditional scientific data management systems (SDMS) are restricted to individual organizations or a small number of organizations connected via a "virtual organization" (VO) configuration. These systems typically support domain-specific and predetermined data workflows that cannot be readily applied to other domains or applications. -On the other hand, data cataloging systems are typically single-site and provide access to static datasets. Also catalogs -usually only support HTTP data transfers, thus limiting the size of datasets that can be served. Neither -SDMSs nor cataloging systems can easily scale-out to accommodate large numbers of users across multiple organizations. +On the other hand, data cataloging systems are typically single-site and provide access to static datasets. Also, +catalogs usually only support HTTP data transfers, thus limiting the size of datasets that can be served. Neither +SDMSs nor cataloging systems can easily scale out to accommodate large numbers of users across multiple organizations. DataFed provides a combination of the features and benefits of both SDMSs and data cataloging services while also supporting big data. For example, DataFed provides storage and access to structured and unstructured @@ -65,12 +64,12 @@ Briefly, DataFed provides the following unique blend of capabilities and benefit - Encourages FAIR-principled data practices via user- and community-defined schemas. - Enhances data awareness with automatic notification of "data events". - Scales out and up to enable efficient big data research across organizations/facilities. -- Provides high-quality data management foundation for use by other applications and services. +- Provides a high-quality data management foundation for use by other applications and services. Data Lifecycle ============== -DataFed is an enabling technology for the early lifecycle stages of scientific data - from data ingest +DataFed is an enabling technology for the early lifecycle stages of scientific data — from data ingest to pre-publication. The figure below illustrates how DataFed supports the lifecycle stages: .. image:: /_static/data_lifecycle.png @@ -81,7 +80,7 @@ records. Pre-publication is supported by providing powerful data organization and data handling capabilities to help ensure that the right data is being published and that it contains proper metadata and provenance information. Note that data publishing systems may have additional metadata requirements that are not available from the data records themselves (i.e. contract numbers, sponsoring -organizations, etc.) +organizations, etc.). Why DataFed? ============ @@ -93,17 +92,17 @@ observational, compute, or analytics resources within the Department of Energy's **Domain Agnosticism** DataFed is **general purpose**. DataFed is domain- and methodology-neutral in that it does not require -users to utilize pre-defined data formats or processes - yet, despite this, DataFed provides powerful -domain-specific metadata indexing and query capabilities augmented by user/community defined schemas. +users to utilize predefined data formats or processes — yet, despite this, DataFed provides powerful +domain-specific metadata indexing and query capabilities augmented by user/community-defined schemas. **Working Data Support** -DataFed supports the **pre-publication data lifecycle** which is both critical and complex in -collaborative scientific research - yet is under served by existing data management systems. +DataFed supports the **pre-publication data lifecycle**, which is both critical and complex in +collaborative scientific research — yet is underserved by existing data management systems. **Scalability** -DataFed is **Scalable**. Datafed was designed to easily scale-out across multiple/many organizations +DataFed is **scalable**. DataFed was designed to easily scale out across multiple organizations and facilities by relying on federated identity technology and a common access control mechanism; however, individual organizations are still able to manage their own data storage resources and policies. In contrast, most existing SDMS products either cannot span organizations at all, or rely on virtual @@ -111,22 +110,18 @@ organization (VO) technologies that are highly labor-intensive to scale beyond a **Big Data Support** -DataFed **understands big data**. DataFed was design from the start to support "big data" and +DataFed **understands big data**. DataFed was designed from the start to support "big data" and the often complex environments in which such data is created and processed. Many existing SDMS products -rely on tightly-coupled file systems or HTTP/S for moving data; however, DataFed utilizes Globus (GridFTP) -for data transfer between facilities because it is the defacto standard for high performance movement -of very large data sets (Petabyte scale) between government-funded user facilities (DoE), research +rely on tightly coupled file systems or HTTP/S for moving data; however, DataFed utilizes Globus (GridFTP) +for data transfer between facilities because it is the de facto standard for high-performance movement +of very large datasets (petabyte scale) between government-funded user facilities (DOE), research universities, and commercial cloud computing services. **Foundation for Data Applications** -DataFed focuses on providing high quality, uniform, and easy-to-use data management services +DataFed focuses on providing high-quality, uniform, and easy-to-use data management services and does not overreach by bundling complementary features such as instrument control, workflow processing, or data analytics that are better served by dedicated application-specific tools. However, DataFed does provide application programming interfaces (APIs) to allow other services or applications -to be utilize DataFed's data management capabilities. - - - - +to utilize DataFed's data management capabilities. diff --git a/doc_source/source/system/overview.rst b/doc_source/source/system/overview.rst index 968354a62..aa1f34fe7 100644 --- a/doc_source/source/system/overview.rst +++ b/doc_source/source/system/overview.rst @@ -5,7 +5,7 @@ System Overview DataFed is a scientific data federation formed from a network of distributed services and data storage repositories that enable users to create, locate, share, and access working scientific data from any organization, facility, or workstation within the DataFed network. DataFed provides a software framework -for the federation of distributed raw data storage resources along with centralized metadata indexing, +for federating distributed raw data storage resources along with centralized metadata indexing, data discovery, and collaboration services that combine to form a virtual "data backplane" connecting otherwise disjoint systems into a uniform data environment. Conceptually, DataFed is a modern and domain- agnostic "data grid" application with a host of advanced data management and collaboration features @@ -14,20 +14,19 @@ aimed at the open science and HPC communities. DataFed features a robust and scalable centralized data indexing and orchestration service that ties potentially large numbers of independent DataFed data storage repositories together with high-performance data transfer protocols and federated identity technologies. This approach prevents the formation of -independent "data silos" that suppress data discovery and access from outside of specific host organizations -or domains - yet this architecture is scalable since data storage and transfer loading is distributed across -many independently managed data repositories. Currently, DataFed's central services are hosted within the -Oak Ridge Leadership Computing Facility (OLCF) at the Department of Energy's Oak Ridge National Laboratory -(ORNL). +independent "data silos" that suppress data discovery and access outside specific host organizations +or domains. At the same time, this architecture remains scalable because data storage and transfer loads +are distributed across many independently managed data repositories. Currently, DataFed's central services +are hosted within the Oak Ridge Leadership Computing Facility (OLCF) at the Department of Energy's Oak Ridge +National Laboratory (ORNL). DataFed presents managed data using a *logical* view (similar to a database) rather than a direct physical view of files in directories on a particular file system. Data that is managed by a DataFed repository is -maintained in system-controlled storage with no user-level file system access. This is to both protect the -managed data from inadvertent changes or deletions, and to ensure that all data read/write operations go -through a DataFed interface for proper system-wide coordination and access control. This approach is a step -towards unifying and simplifying data discovery, access, and sharing - as well as avoiding the inherent -entropy of traditional file systems that can lead to data misidentification, mishandling, and an eventual -loss of scientific reproducibility. +maintained in system-controlled storage with no user-level file system access. This protects managed data +from inadvertent changes or deletions and ensures that all data read/write operations go through a DataFed +interface for proper system-wide coordination and access control. This approach helps unify and simplify +data discovery, access, and sharing, while also avoiding the inherent entropy of traditional file systems +that can lead to data misidentification, mishandling, and eventual loss of scientific reproducibility. Cross-Facility Data Management ============================== @@ -35,39 +34,39 @@ Cross-Facility Data Management Figure 1, below, shows a simplified representation of an example DataFed network consisting of the central DataFed services and several connected facilities and DataFed repositories. The enclosing gray boxes represent the physical boundaries of geographically distributed facilities. The wide blue arrows represent -the DataFed high-speed raw data transfer "bus" (i.e. GridFTP) that is used to move data between facilities, -and the green arrows represent the DataFed communication "bus" use by clients to send requests to DataFed. +the DataFed high-speed raw data transfer "bus" (i.e., GridFTP) used to move data between facilities, +and the green arrows represent the DataFed communication "bus" used by clients to send requests to DataFed. -.. figure:: /_static/simplified_architecture.png +.. figure:: /_static/simplified_architecture.png :align: center Figure 1 - An Example DataFed Network -In this example, there is an observational facility and a compute facility that each have a local DataFed +In this example, an observational facility and a compute facility each have a local DataFed data repository (a cylinder labeled with an 'R'). Any facility in the system can read or write data from or to -the data repositories in the observational or compute facilities (assuming proper access permissions); however, -users within these two facilities will have lower latency access to the data stored there. In addition, -independent workstations can also access data in these repositories - also assuming proper access permissions +the data repositories in the observational or compute facilities (assuming proper access permissions). However, +users within these two facilities will experience lower-latency access to the data stored there. In addition, +independent workstations can also access data in these repositories, assuming proper access permissions are granted. -When data is stored to a DataFed repository, Globus is used to transfer a user-specified source file (as a Globus -path) into the repository where it becomes associated with a DataFed data record. Likewise, when data is retrieved +When data is stored in a DataFed repository, Globus is used to transfer a user-specified source file (as a Globus +path) into the repository, where it becomes associated with a DataFed data record. Likewise, when data is retrieved from a DataFed repository, Globus is used to transfer the raw data of a DataFed record from the repository to a user- -specified Globus destination; however, note that the raw data is simply copied - not moved - from the DataFed +specified Globus destination. Note that the raw data is copied—not moved—from the DataFed repository. The central DataFed service maintains data record tracking information and orchestrates raw data transfers, but never directly processes raw data. .. note:: - DataFed provides a universal storage allocation and fine-grained access control mechanisms to - enable users at disjoint organizations to share and access data with each other without undue burden on - local system administrators. Local administrators are able to maintain and enforce data policies - on local DataFed repositories without disrupting remote DataFed facilities or users in any way. + DataFed provides universal storage allocation and fine-grained access control mechanisms that + enable users at disjoint organizations to share and access data without undue burden on + local system administrators. Local administrators can maintain and enforce data policies + on local DataFed repositories without disrupting remote DataFed facilities or users. Continuing with the previous example, the experimental facility shown does not have a local DataFed repository and, instead, could use allocations on the DataFed repository within the compute facility (if, for example, these facilities were collaborating or were managed by the same organization). In this scenario, users at the experimental -facility would store and retrieve data using a DataFed allocation granted by the compute facility, but from the users +facility would store and retrieve data using a DataFed allocation granted by the compute facility, but from the users' perspective, all DataFed interactions would behave as if the repository were local. The only noticeable difference would be increased latency associated with DataFed data transfers. @@ -77,103 +76,102 @@ in the DataFed :doc:`Use Cases ` document. System Architecture =================== -The DataFed system is composed of a number of system components and interfaces that are deployed across +The DataFed system is composed of a number of system components and interfaces deployed across the DataFed network to implement scalable distributed data storage and indexing. A simplified system architecture -is shown in Figure 2, below, and shows only the central DataFed services, one DataFed data repository, and +is shown in Figure 2, below, and includes only the central DataFed services, one DataFed data repository, and supporting interfaces. -.. figure:: /_static/system_components.png +.. figure:: /_static/system_components.png :scale: 75% :align: center Figure 2 - DataFed System Components -DataFed's central services include the "Core" service which is essentially the "brains" of DataFed. The core +DataFed's central services include the "Core" service, which is essentially the "brains" of DataFed. The core service manages the metadata associated with managed raw data and also implements access control, orchestration, -and concurrency controls for data movements across the DataFed network. The core service, however, is not directly -involved in the transfer of raw data - this function is delegated to Globus services, or more specifically, to the -GridFTP servers (managed by GLobus) located at DataFed data repositories and other facilities. (The blue lines in +and concurrency controls for data movement across the DataFed network. The core service is not directly +involved in the transfer of raw data—this function is delegated to Globus services, specifically the +GridFTP servers (managed by Globus) located at DataFed data repositories and other facilities. (The blue lines in Figure 2 indicate high-performance raw data transfer pathways.) -The raw data storage resources within a DataFed data repository can be any form of physical storage hardware, so long -as the interface to this storage is supported by Globus. Currently this includes POSIX file systems and S3 object -stores. The inherit reliability of the physical storage of a repository is determined by the host facility and -could range from inexpensive magnetic disks to high-speed solid state drives or even archival-quality geographically -distribute storage systems. Local administrators control repository policies and determine which DataFed users can +The raw data storage resources within a DataFed data repository can be any form of physical storage hardware, as long +as the storage interface is supported by Globus. Currently this includes POSIX file systems and S3 object +stores. The inherent reliability of a repository's physical storage is determined by the host facility and +may range from inexpensive magnetic disks to high-speed solid-state drives or even archival-quality geographically +distributed storage systems. Local administrators control repository policies and determine which DataFed users can utilize a repository by granting (or revoking) repository allocations. These local administrative policies and actions -have no impact on DataFed on repositories at other facilities. +have no impact on DataFed repositories at other facilities. -Figure 2 shows a DataFed repository in isolation; however, a host facility would typically integrate their DataFed -repositories with their own local storage and compute resources. For example, a facility would likely have additional -Globus endpoints that would mount the primary file system(s) of the facility, and they would install high-speed +Figure 2 shows a DataFed repository in isolation; however, a host facility would typically integrate its DataFed +repositories with its own local storage and compute resources. For example, a facility would likely have additional +Globus endpoints that mount the primary file system(s) of the facility, and it would install high-speed interconnects between the DataFed repository endpoint and the facility endpoint(s) to increase data transfer speeds between the two storage systems. The web services within the DataFed central services primarily support a web portal that allows users to easily organize -and share data from a web browser; however, the web services also play a critical role in authenticating DataFed users +and share data from a web browser. These web services also play a critical role in authenticating DataFed users through Globus' federated identity system (which is based on OAuth2). New DataFed users must register through the -DataFed data portal and grant certain permission to DataFed through Globus' authorization system. These permissions +DataFed data portal and grant specific permissions to DataFed through Globus' authorization system. These permissions relate to user identification and enabling automatic data transfers on behalf of DataFed users. ---------- Interfaces ---------- -Users are able to interact with DataFed through several available interfaces including a graphical web application, +Users can interact with DataFed through several available interfaces, including a graphical web application, a command-line interface (CLI), and both high- and low-level application programming interfaces (APIs). The easiest -way to interact with DataFed is through the web application (see :doc:`DataFed Web Portal `), and -the web application is where users initially register for DataFed accounts. +way to interact with DataFed is through the web application (see :doc:`DataFed Web Portal `), which +is also where users initially register for DataFed accounts. -The DataFed CLI and APIs are all provided through a single Python-based DataFed client packaged and available on PyPi. Refer +The DataFed CLI and APIs are all provided through a single Python-based DataFed client package available on PyPI. Refer to the :doc:`Client Installation `, :doc:`CLI User Guide `, and :doc:`Python Scripting Guide ` for more information. -DataFed's interfaces can be used from any workstation, laptop, or compute node; however, these interfaces only provide -users with the ability to issue commands to the DataFed central service. If users need to be able to also transfer raw -data to or from a given host machine, the local file system of the host machine must be connected to a Globus endpoint. -Typically, research facilities will already provide Globus endpoints to access specific local file systems; however, for +DataFed interfaces can be used from any workstation, laptop, or compute node; however, these interfaces only provide +users with the ability to issue commands to the DataFed central service. If users also need to transfer raw +data to or from a given host machine, the local file system of that machine must be connected to a Globus endpoint. +Typically, research facilities already provide Globus endpoints to access specific local file systems; however, for individual workstations and laptops, users will need to install Globus Personal Connect. See `DataFed Client Installation ` for more information. User Accounts ============= -User must register with DataFed in order to access public or shared data records and collections; however, registration -is free and only requires a Globus account. (Refer to the `/system/getting_started` document for help with the +Users must register with DataFed to access public or shared data records and collections; registration +is free and requires only a Globus account. (Refer to the `/system/getting_started` document for help with the registration process.) Once registered, users are tracked internally by their Globus identity but can also be searched for -using their proper names. In order for users to be able to create their own data records, an allocation on one or more DataFed -data repositories is required. Please contact the IT department at a DataFed-enabled facility for assistance with +using their proper names. To create their own data records, users must have an allocation on one or more DataFed +data repositories. Please contact the IT department of a DataFed-enabled facility for assistance with acquiring a DataFed repository allocation. .. note:: - In a future release of DataFed, a searchable directory of available data repositories will be made available and allow - users to request allocations directly from within DataFed. + In a future release of DataFed, a searchable directory of available data repositories will allow + users to request allocations directly within DataFed. -DataFed registration utilizes a standard Globus authentication and authorization process. When you begin the registration -process from the DataFed welcome page, you will be redirected to Globus for authentication (log-in) using your Globus account. -Globus will then ask you to authorize DataFed to access your Globus identity and to allow DataFed to transfer data on your behalf. -Once this process is complete, you will be redirected to a DataFed post-registration page where you will create a DataFed password. -This password is only used when manually authenticating from the DataFed command-line interface, and it can be updated from -DataFed Web Portal at any time. +DataFed registration uses a standard Globus authentication and authorization process. When you begin the registration +process from the DataFed welcome page, you are redirected to Globus for authentication (login) using your Globus account. +Globus will then ask you to authorize DataFed to access your Globus identity and allow DataFed to transfer data on your behalf. +Once this process is complete, you are redirected to a DataFed post-registration page where you create a DataFed password. +This password is used only when manually authenticating from the DataFed command-line interface, and it can be updated from +the DataFed Web Portal at any time. -Note that DataFed will only initiate data transfers when you (or a process running as you) explicitly request it to. Further, -DataFed data transfers are constrained to be between DataFed data storage repositories and Globus endpoints that you have pre-authorized -(or "activated") for access. Globus end-point activation is transient and access will expire within a period determined by the +Note that DataFed will initiate data transfers only when you (or a process acting as you) explicitly request them. Further, +DataFed data transfers are restricted to transfers between DataFed data storage repositories and Globus endpoints that you have pre-authorized +(or "activated") for access. Globus endpoint activation is transient, and access expires within a period determined by the policies of the host facility. - System Concepts =============== -DataFed provides a uniform, holistic, and logical view of the data, users, and various organizational structures associated -with the federation of facilities and data storage resources that make up the DataFed network. From a users perspective, -all data operations look and feel the same from within DataFed regardless of where DataFed is being accessed, where data is -physically stored, or which DataFed interface is being utilized. In order to understand the features and capabilities of -DataFed, as a whole, it is necessary to understand the underlying terminology and concepts, and these are discussed in this +DataFed provides a uniform, holistic, and logical view of the data, users, and organizational structures associated +with the federation of facilities and data storage resources that make up the DataFed network. From a user's perspective, +all data operations appear consistent within DataFed, regardless of where DataFed is accessed, where data is +physically stored, or which DataFed interface is used. To fully understand DataFed’s features and capabilities, +it is necessary to understand the underlying terminology and concepts discussed in this section. -Because DataFed relies heavily upon Globus for data transfers, it is helpful to understand the basics of how Globus works and +Because DataFed relies heavily on Globus for data transfers, it is helpful to understand the basics of how Globus works and how to use it to move data between Globus endpoints. A good starting point for understanding Globus can be found `here `_. --------------- @@ -181,7 +179,7 @@ Quick Reference --------------- Below is a brief, alphabetical list of the most common DataFed terms and concepts. These topics are discussed in -greater detail in following sections of this document. +greater detail in the following sections of this document. - **Access Control** - Access controls are sets of fine-grained permissions associated with data records and/or collections that may be applied to specific users or groups of users. @@ -191,13 +189,13 @@ greater detail in following sections of this document. - **Aliases** - An alias is an optional, human-friendly alternate identifier for data records and collections. - **Allocation** - An allocation is a storage allowance on a specific DataFed repository. One or more allocations are required - in order to create DataFed data records. + to create DataFed data records. - **Annotation** - Annotations are a mechanism for opening and tracking issues associated with data records and collections. Depending on the severity and outcome of an issue, DataFed may propagate issues to downstream data records for further impact assessment. - **Attributes** - Attributes are searchable system-defined (fixed) metadata fields associated with certain entities (data records, collections, - etc.) within DataFed. Textual attributes of data records and collections (ie. title, description) are full-text indexed. The term + etc.) within DataFed. Textual attributes of data records and collections (e.g., title, description) are full-text indexed. The term "attributes" is used to avoid confusion with optional user-defined "metadata". - **Catalog** - The DataFed catalog is a categorized searchable index of internally "published" DataFed collections. All included @@ -206,32 +204,32 @@ greater detail in following sections of this document. - **Collection** - A collection is a logical (or virtual) folder with a unique identifier and attributes that can be used to hierarchically organize, share, and download groups of data records and/or other collections. -- **Creator** - The user that originally creates a Data Record becomes the owner (and creator) of the record and has full irrevocable access to the given record. +- **Creator** - The user that originally creates a DataFed record becomes the owner (and creator) of the record and has full irrevocable access. -- **Data Record** - A data record is the most basic unit of data within DataFed and consists of a unique identifier, attributes, +- **Data Record** - A data record is the basic unit of data within DataFed and consists of a unique identifier, attributes, and, optionally, raw data and domain-specific metadata. - **Group** - A group is a user-defined set of users for applying access controls to data records or collections. Groups are not the same as projects. -- **Identifier** - Identifiers are system-unique alphanumeric strings that are automatically assigned to all entities within DataFed. +- **Identifier** - Identifiers are system-unique alphanumeric strings automatically assigned to all entities within DataFed. -- **Metadata** - The term "metadata" refers to optional searchable user-defined (domain-specific) structured metadata associated with data +- **Metadata** - Metadata refers to optional searchable user-defined (domain-specific) structured information associated with data records. Required top-level metadata is referred to as "attributes" to avoid confusion. -- **Owner** - The user or project that originally creates a Data Record becomes the owner of the record and has full access - to the given record. Ownership can be transferred to another user or project. +- **Owner** - The user or project that originally creates a DataFed record becomes the owner and retains full access. + Ownership can be transferred to another user or project. -- **Project** - A DataFed project is a logical grouping of users to enable collective ownership of data and to simplify collaboration. +- **Project** - A DataFed project is a logical grouping of users that enables collective ownership of data and simplifies collaboration. Projects have their own data storage allocations. - **Project Administrator** - A user designated by a Project Owner to have managerial access to a specified project. -- **Project Owner** - Any user that creates a DataFed Project is the owner, with full access rights, of the project. +- **Project Owner** - Any user who creates a DataFed project and holds full access rights for that project. -- **Project Member** - A user designated by either a Project Owner or Administrator to have member access to a specified project. +- **Project Member** - A user designated by a Project Owner or Administrator to have member access to a specified project. -- **Provenance** - Provenance is a form of metadata associated with data records that captures relationships with other data records. - Provenance is maintained by DataFed using direct links between records rather than identifier references in record attributes or metadata. +- **Provenance** - Provenance is metadata associated with data records that captures relationships with other data records. + Provenance is maintained using direct links rather than identifier references in record attributes or metadata. - **Repository** - A repository is a federated storage system located at a specific facility that stores the raw data associated with DataFed data records. Users and projects may be granted allocations on repositories to enable data storage. @@ -239,39 +237,36 @@ greater detail in following sections of this document. - **Repository Administrator** - A user designated by a DataFed Administrator to have managerial access to a data repository. - **Root Collection** - The root collection is a reserved collection that acts as the parent for all other (top-level) collections - and data records. Each user and project has their own root collection. + and data records. Each user and project has its own root collection. -- **Saved Query** - A saved query is a data search expression that is stored in a query object such that it can be subsequently - run by referencing the associated query identifier. The results of saved queries are dynamic (i.e. matches from when the query - is run, rather than when it was saved). +- **Saved Query** - A saved query is a data search expression stored in a query object so that it can be rerun by referencing + the query identifier. Results of saved queries are dynamic (i.e., based on matches at execution time). -- **Shared Data** - When a user grants permission to access data records and/or collections to other users, those records and collections - become visible to the referred users as "shared data". +- **Shared Data** - When a user grants permissions to access data records and/or collections to other users, those records and collections + become visible as "shared data". -- **Tags** - Tags are searchable, user-defined words that may be associated with data records and collections. Tags use is tallied internally - to allow popular tags to be identified by users. +- **Tags** - Tags are searchable, user-defined words that may be associated with data records and collections. Tag usage is tallied internally + to identify popular tags. -- **Task** - Tasks are trackable background processes that run on the DataFed server for specific longer-running operations such as data +- **Task** - Tasks are trackable background processes that run on the DataFed server for longer-running operations such as data transfers and allocation changes. -- **User** - Any person with a DataFed account. Users are identified by their unique Globus ID account name, with optionally linked organizational accounts. - +- **User** - Any person with a DataFed account. Users are identified by their unique Globus ID, with optionally linked organizational accounts. ----------------------- Identifiers and Aliases ----------------------- -All system "entities" in DataFed (data, collections, user, projects, etc.) are automatically assigned system-unique identifiers (IDs) -consisting of a prefix (that determines entity type) followed by an alphanumeric value. For example, "d/12345678" would be -an ID for data record, and "c/87654321" would be a collection. The numeric portion of these IDs is not in any particular order -and can be considered essentially random, but unique for a given entity type. System IDs are not easy for humans to remember and -use, thus for data records and collections (which are referenced frequently) users may opt to assign a human-friendly "alias" -that can be used in place of the system identifier. - -Aliases are lowercase alphanumeric strings that can contain the letters 'a' through 'z', the numbers '0' through '9', and the -special characters '-','_', and '.'. Aliases can be considered to be the equivalent of a file or directory name in a file -system. A scoping prefix is automatically attached to aliases in order to ensure aliases are unique across all users -(and projects) in DataFed. These prefixes consist of the type of the alias owner ("u" for users, and "p" for projects), +All system "entities" in DataFed (data, collections, users, projects, etc.) are automatically assigned system-unique identifiers (IDs) +consisting of a prefix (which determines entity type) followed by an alphanumeric value. For example, "d/12345678" is +a data record identifier, and "c/87654321" is a collection identifier. The numeric portion of these IDs is not in any particular order +and can be considered essentially random but unique for a given entity type. System IDs are not easy for humans to remember and +use, so users may optionally assign human-friendly "aliases" to data records and collections. + +Aliases are lowercase alphanumeric strings that may contain the letters 'a' through 'z', the numbers '0' through '9', and the +special characters '-', '_', and '.'. Aliases can be considered the equivalent of file or directory names in a file +system. A scoping prefix is automatically attached to ensure aliases are unique across all users +and projects in DataFed. These prefixes consist of the type of the alias owner ("u" for users and "p" for projects), followed by the user or project ID, separated by colons. For example: .. code-block:: text @@ -284,12 +279,12 @@ followed by the user or project ID, separated by colons. For example: .. note:: - In both the DataFeb web portal and the command-line interface, scoping prefixes are not required to be entered for aliases + In both the DataFed web portal and the command-line interface, scoping prefixes do not need to be entered for aliases (nor are they displayed) except when referencing data owned by another user or project. -In general, aliases are intended to support interactive data browsing and sharing and, thus, should be easy to use and understand. -Aliases should *not* be used to encode complex parameters or other information that is more appropriately placed in a data record's -searchable metadata. This is especially true when sharing data with users that may not be familiar with an ad hoc name-based parameter +In general, aliases are intended to support interactive data browsing and sharing, and thus should be easy to use and understand. +Aliases should *not* be used to encode complex parameters or other details that are better represented in a data record’s +searchable metadata. This is especially important when sharing data with users who may not be familiar with an ad hoc name-based encoding scheme. .. note:: @@ -301,341 +296,387 @@ encoding scheme. Data Records ------------ -A data record is the basic unit of data storage within DataFed and consist of, at a minimum, an identifier and a title. A number -of additional optional informational fields can be specified including an alias, a textual description, structured metadata, +A data record is the basic unit of data storage within DataFed and consists, at a minimum, of an identifier and a title. A number +of additional optional informational fields can be specified, including an alias, a textual description, structured metadata, provenance relationships, and tags. All of these data record fields are maintained centrally within DataFed and do not count -against a users storage allocation(s). Refer to the :ref:`Field Summary`, below, for a full list of data record fields. +against a user's storage allocation(s). Refer to the :ref:`Field Summary`, below, for a full list of data record fields. -While metadata-only data records can be useful for specific use cases, it is likely some form of source data will need to be -associated with a given data record. This data is referred to as "raw data" because DataFed treats it as an oblique attachment to a -data record (i.e. DataFed cannot "see" inside this raw data for purposes of indexing or searching). Raw data can be any format -and any size so long as a user has sufficient allocation space to store it. See the :ref:`Raw Data` section, below, for further details. +While metadata-only data records can be useful for specific use cases, it is likely that some form of source data will need to be +associated with a given data record. This source data is referred to as "raw data" because DataFed treats it as an opaque attachment to a +data record (i.e., DataFed cannot inspect the raw data for indexing or searching). Raw data may be any format +and any size, provided the user has sufficient allocation space. See the :ref:`Raw Data` section, below, for further details. When creating a data record, a storage allocation on a DataFed repository must be available. If a user has multiple allocations, -then an allocation can be specified or the default allocation will be used instead. The default allocation can be viewed and set -in the DataFed web portal. After creation, it is possible to move a record to an allocation on a different repository, and if raw -data has been uploaded it will be relocated automatically. Similarly, data record ownership can be transferred to another DataFed -user or project, and again, raw data will be relocated. +an allocation can be specified or the default allocation will be used. The default allocation can be viewed and set +in the DataFed web portal. After creation, it is possible to move a record to an allocation on a different repository; if raw +data has been uploaded, it will be relocated automatically. Similarly, data record ownership can be transferred to another DataFed +user or project, and raw data will be relocated accordingly. .. note:: - If large collections of data records are moved between allocations, or to new owners, the server-side background task associated - with moving the raw data may take a significant amount of time to complete. Progress can be monitored via the web portal or the CLI. + If large collections of data records are moved between allocations or to new owners, the background server task associated + with moving raw data may take a significant amount of time. Progress can be monitored via the web portal or the CLI. Metadata -------- -The metadata of a data record is distinct from the built-in record fields such as title and description, -and is represented using Javascript Object Notation (JSON). JSON was selected because it is human-readable, can represent +The metadata of a data record is distinct from the built-in record fields such as title and description +and is represented using JavaScript Object Notation (JSON). JSON was selected because it is human-readable, can represent arbitrary structured documents, and is easily validated using JSON-based schemas (see ``_). Like other fields, metadata is searchable using the powerful built-in query language described in the :ref:`Data and Collection Search` section of this document. -When creating or updating a data record, metadata may be directly specified or a JSON file may be referenced as the metadata source. -When updating the metadata associated with an existing data record, the user has the option to either replace all of the existing -metadata or to merge new metadata with existing metadata. In the case of merging, any keys that are present in both the new and -existing metadata will be overwritten by the new values - other existing keys are left unchanged and new keys are inserted. +When creating or updating a data record, metadata may be specified directly or provided via a JSON file. When updating the metadata +of an existing data record, users may choose to replace all existing metadata or merge new metadata with existing metadata. In the +case of merging, any keys present in both the new and existing metadata will be overwritten by the new values, while other existing +keys remain unchanged and new keys are inserted. .. note:: - When providing metadata, it must fully comply with the JSON specification, located at ``_. + Provided metadata must fully comply with the JSON specification located at ``_. Provenance ---------- -Provenance information in DataFed is maintained as direct links between any two data records, and includes a direction -and a type. Currently three types of provenance relationships are supported, as shown in the table below. The direction of +Provenance information in DataFed is maintained as direct links between any two data records and includes both a direction +and a type. Currently, three types of provenance relationships are supported, as shown in the table below. The direction of provenance relationships is implicitly defined by setting relationship information on "dependent" data records only. +----------------------+ -|Relationship | +| Relationship | +======================+ -|Is Derived From | +| Is Derived From | +----------------------+ -|Is a Component Of | +| Is a Component Of | +----------------------+ -|Is a Newer Version Of | +| Is a Newer Version Of| +----------------------+ - -It is easy to understand provenance direction by thinking of the dependent record as the subject of the relationship statement. -For example, if data record "xyz" "is derived from" data record "pqr", then data record "xyz" is the dependent and the provenance +Provenance direction is easiest to understand by thinking of the dependent record as the subject of the relationship statement. +For example, if data record "xyz" "is derived from" data record "pqr", then data record "xyz" is the dependent, and the provenance relationship to record "pqr" should be set on record "xyz". Raw Data -------- -Raw data is associated with a DataFed data record by uploading a source file from a Globus endpoint, and, once uploaded, it +Raw data is associated with a DataFed data record by uploading a source file from a Globus endpoint. Once uploaded, it can be downloaded to any other Globus endpoint. Users uploading and/or downloading raw data must have appropriate -permissions both on the source/destination Globus endpoints and on the DataFed record itself. When data is uploaded to a -data record, the source path, extension, and data size is captured in the data record. When downloading, users can request -either the original filename or the record identifier be used as the name for the downloaded file. +permissions on both the source/destination Globus endpoints and the DataFed record itself. When data is uploaded to a +data record, the source path, extension, and data size are captured in the data record. When downloading, users may request +either the original filename or the record identifier as the filename. .. note:: - As with all Globus transfers, it is the users responsibility to ensure that the source or destination endpoints are activated + As with all Globus transfers, it is the user's responsibility to ensure that the source or destination endpoints are activated prior to initiating a raw data transfer in DataFed. This restriction is due to the inherent security design of Globus, which - prohibits agent processes, like DataFed, from activating endpoints on behalf of users. Note, however, that DataFed data + prohibits agent processes, such as DataFed, from activating endpoints on behalf of users. Note, however, that DataFed data repositories never require activation. -When a raw data transfer is initiated from DataFed, the transfer can be monitored in DataFed using the "Task ID" of the transfer -request. In the DataFed CLI and Python API, the task ID is provided in the output of the request. In the DataFed web portal, -the most recent tasks will be shown and periodically updated under the "Tasks" tab. When a transfer completes without errors, -the task status will become "SUCCESS"; otherwise an error message will be provided. Common problems include forgetting to +When a raw data transfer is initiated from DataFed, the transfer can be monitored using the "Task ID" of the transfer +request. In the DataFed CLI and Python API, the task ID is provided in the request output. In the DataFed web portal, +recent tasks are shown and periodically updated under the "Tasks" tab. When a transfer completes without errors, +the task status becomes "SUCCESS"; otherwise, an error message is provided. Common issues include forgetting to activate an endpoint, endpoint activation expiring, or referencing an invalid path or filename. Field Summary ------------- -The table below lists all of the fields of a data record. Most of these fields are searchable using simple -equality tests (i.e. == and !=); however the title and description fields are full-text indexed - enabling root-word and phrase -searches as well. When composing search expressions, the field names as shown in the third column of the table must be used. -User-specified metadata fields can be searched by prefixing the field names in the associated JSON document with "md.". - -=============== ======== ======== ========================================= -Field Type Name Description -=============== ======== ======== ========================================= -ID Auto id Auto-assigned system-unique identifier -Alias Optional alias Human-friendly alternative identifier -Title Required title Title of record -Description Optional desc Description of record (markdown allowed) -Tags Optional --- Tag list -Metadata Schema Optional schema Schema ID for metadata -Metadata Optional md.* User-specified JSON document -Provenance Optional --- Relationship(s) with other data records -Allocation Default --- Data repository ID of allocation used -Owner Auto owner User ID of current record owner -Creator Auto creator User ID of original record creator -Source Auto source Globus path of source raw data -Size Auto size Size of raw data, in bytes -Ext Optional ext Extension of raw data file -Create Time Auto ct Creation timestamp (Unix) -Update Time Auto ut Update timestamp (Unix) -=============== ======== ======== ========================================= +The table below lists all of the fields of a data record. Most of these fields are +searchable using simple equality tests (i.e., ``==`` and ``!=``); however, the +``title`` and ``description`` fields are full-text indexed, enabling root-word +and phrase searches as well. When composing search expressions, the field names +shown in the third column of the table must be used. + +User-specified metadata fields can be searched by prefixing the field names in +the associated JSON document with ``md.``. + +================= ========= =========== ============================================ +Field Type Name Description +================= ========= =========== ============================================ +ID Auto id Auto-assigned system-unique identifier +Alias Optional alias Human-friendly alternative identifier +Title Required title Title of record +Description Optional desc Description of record (Markdown allowed) +Tags Optional --- Tag list +Metadata Schema Optional schema Schema ID for metadata +Metadata Optional md.* User-specified JSON document +Provenance Optional --- Relationship(s) with other data records +Allocation Default --- Data repository ID of allocation used +Owner Auto owner User ID of current record owner +Creator Auto creator User ID of original record creator +Source Auto source Globus path of source raw data +Size Auto size Size of raw data, in bytes +Ext Optional ext Extension of raw data file +Create Time Auto ct Creation timestamp (Unix) +Update Time Auto ut Update timestamp (Unix) +================= ========= =========== ============================================ ------------ Collections ----------- -Collections in DataFed are a logical mechanism for organizing, sharing, and downloading sets of data records. Data records -may be placed in multiple collections (as links) and child collections may be created to further organize contained -records. Like data records, collections have, at a minimum, an identifier and a title, but additional optional fields may -be defined including an alias, a description, public access, and tags. Unlike data records, collections do not support -user-specified structured metadata. - -Collections do not exclusively "own" the data records contained within them, but certain collection operations will directly -impact the records (and child collections) within them. There are also constraints on which data records can be placed in a -collection. These operations and constraints are as follows: - -- **Permissions** - Collections allow inheritable permissions to be set that apply to all contained data records and child - collections. This is generally the preferred way to share data other users and to control data access within a project. -- **Single Owner** - It is not currently possible to mix data records owned by multiple users in a single collection. Only data - records owned by the user that also owns the collection can be linked (this applies to project collections as well). However, this - restriction does not apply to record "creators". -- **Deletion** - If a collection is deleted, all child collections, as well as all data records that exist *only* within the deleted - collection hierarchy, will be deleted. -- **Downloads** - Downloading a collection will download all raw data associated with contained data records - including those in - child collections. Downloaded raw data will all be placed in the same user-specified destination path (without subdirectories). - The DataFed web portal will display download dialog with a selectable list of which data records to download - from the collection. -- **Allocation Change** - Collections can be used to change the repository allocations of all contained data records. Any - contained data record that is not already on a specified target allocation will be scheduled to be moved. Those that are already - on the target allocation will be ignored. Currently, this operation can only be done in the DataFed web portal. -- **Ownership Change** - Collections can be used to change the ownership of contained data records. All records are moved to - a specified target collection owned by the new owner, and the all associated raw data will be scheduled to be moved to the new - owner's default allocation. Currently, this operation can only be done in the DataFed web portal. +Collections in DataFed are a logical mechanism for organizing, sharing, and +downloading sets of data records. Data records may be placed in multiple +collections (as links), and child collections may be created to further organize +contained records. Like data records, collections have at minimum an identifier +and a title, but additional optional fields may be defined, including an alias, +a description, public access, and tags. + +Unlike data records, collections do not support user-specified structured +metadata. Collections do not exclusively "own" the data records contained within +them, but certain collection operations can directly impact the records (and +child collections) they contain. There are also constraints on which data +records can be placed in a collection. These operations and constraints are as +follows: + +- **Permissions** – Collections allow inheritable permissions to be set that + apply to all contained data records and child collections. This is generally + the preferred way to share data with other users and to control access within + a project. + +- **Single Owner** – It is not currently possible to mix data records owned by + multiple users in a single collection. Only data records owned by the user who + owns the collection may be linked (this applies to project collections as + well). This restriction does *not* apply to record creators. + +- **Deletion** – If a collection is deleted, all child collections, as well as + any data records that exist *only* within the deleted collection hierarchy, + will be deleted. + +- **Downloads** – Downloading a collection will download all raw data associated + with the contained data records, including those in child collections. + Downloaded raw data will be placed in the user-specified destination path + (without subdirectories). The DataFed web portal will display a download + dialog with a selectable list of data records to include. + +- **Allocation Change** – Collections can be used to change the repository + allocations of all contained data records. Any record not already on the + specified target allocation will be scheduled to move. Records already on the + target allocation will be ignored. Currently, this operation can only be + performed in the DataFed web portal. + +- **Ownership Change** – Collections can be used to change the ownership of all + contained data records. All records are moved to a specified target collection + owned by the new owner, and the associated raw data will be scheduled for + transfer to the new owner's default allocation. Currently, this operation can + only be performed in the DataFed web portal. + Root Collection --------------- -All users and projects own a special "root" collection that acts as the parent for all other (top-level) collections and/or -data records. The root collections behaves like a normal collection except that it cannot be edited or deleted. The root -collection also has a special identifier and alias which are derived from the type and owner identifier, as follows: +All users and projects own a special "root" collection that acts as the parent +for all other (top-level) collections and/or data records. The root collection +behaves like a normal collection except that it cannot be edited or deleted. +The root collection also has a special identifier and alias derived from its +type and owner identifier: .. code-block:: text - For a user with an ID of "user123", the root collection ID is "c/u_user123_root" and the alias is "u:user123:root" + For a user with an ID of "user123": + Root collection ID: "c/u_user123_root" + Alias: "u:user123:root" + + For a project with an ID of "proj123": + Root collection ID: "c/p_proj123_root" + Alias: "p:proj123:root" - For a project with an ID of "proj123", the root collection ID is "c/p_proj123_root" and the alias is "p:proj123:root" Public Collections ------------------ -Collections can be set to public access, in which case the collection and all of its contents will become discoverable and -readable by any DataFed user. Public access is implemented through a DataFed catalog system which allows users to browse -and search for public collections and datasets. Please refer to the `Catalog`_ section for more information. +Collections can be configured for public access. When a collection is made +public, the collection and all of its contents become discoverable and readable +by any DataFed user. Public access is enabled through the DataFed catalog +system, which allows users to browse and search for public collections and +datasets. + +Please refer to the :ref:`Catalog` section for more information. Field Summary ------------- -The table below lists all of the fields of a collection. Currently, only public collections in the DataFed catalog can be -searched, and only through the DataFed web portal. In a future release, direct queries will be supported. - -============== ======== ======== ========================================= -Field Type Name Description -============== ======== ======== ========================================= -ID Auto id Auto-assigned system-unique identifier -Alias Optional alias Human-friendly alternative identifier -Title Required title Title of record -Description Optional desc Description of record (markdown allowed) -Tags Optional --- Tag list -Access Default --- Public or private (default) access -Category Optional --- Catalog category for public access -Owner Auto owner User ID of current collection owner -Creator Auto creator User ID of original collection creator -Create Time Auto ct Creation timestamp (Unix) -Update Time Auto ut Update timestamp (Unix) -============== ======== ======== ========================================= +The table below lists all of the fields of a collection. Currently, only public collections in the +DataFed catalog can be searched, and only through the DataFed web portal. In a future release, +direct queries will be supported. + +============== ======== ======== ========================================= +Field Type Name Description +============== ======== ======== ========================================= +ID Auto id Auto-assigned system-unique identifier +Alias Optional alias Human-friendly alternative identifier +Title Required title Title of record +Description Optional desc Description of record (markdown allowed) +Tags Optional --- Tag list +Access Default --- Public or private (default) access +Category Optional --- Catalog category for public access +Owner Auto owner User ID of current collection owner +Creator Auto creator User ID of original collection creator +Create Time Auto ct Creation timestamp (Unix) +Update Time Auto ut Update timestamp (Unix) +============== ======== ======== ========================================= + --------- Projects -------- -A DataFed project is a distinct organizational unit that permits multiple users to create and manage data records and -collections as a team - without requiring project members to maintain their own complex access control rules. Projects -can be created by any DataFed user, but a DataFed repository allocation is required for the project before any data -records can be created within, or transferred to, the project. Projects have specific user roles with distinct -permissions, as follows: +A DataFed project is a distinct organizational unit that permits multiple users to create and manage +data records and collections as a team—without requiring project members to maintain complex access +control rules. Projects can be created by any DataFed user, but a DataFed repository allocation is +required before any data records can be created within, or transferred to, the project. -- **Project Owner** - The user that initially creates a project becomes the owner and has complete control over the - project and contained data and collections. The owner can add and remove project members and administrators. -- **Administrators** - These users have the ability to add and remove project members (but not other administrators), and - can also configure access control rules on the projects root collection. -- **Members** - These users may create and update data records and collections based on the access control rules set by - the project owner or administrators. Members always have administrative access to records they create. +Projects have specific user roles with distinct permissions: + +- **Project Owner** – The user who initially creates a project becomes the owner and has complete + control over the project and its contained data and collections. The owner can add and remove + project members and administrators. +- **Administrators** – These users can add and remove project members (but not other administrators), + and can configure access control rules on the project’s root collection. +- **Members** – These users may create and update data records and collections based on the access + control rules set by the project owner or administrators. Members always have administrative + access to records they create. + +When any user associated with a project creates a data record or collection inside a project, the +project—rather than the creating user—becomes the owner of the new record or collection. While users +still have administrative control over records and collections they create within a project, the +project’s storage allocation is used to store and manage any raw data associated with these records. -When any user associated with a project creates a data record or collection inside a project, the project, rather than -the creating user, becomes the owner of the new record or collection. While users still have administrative control over -records and collections they create within a project, the allocation of the project is used to store and manage any raw -data associated with these records. ---------------- Access Controls --------------- -DataFed implements fine-grained access control through a set of permissions that can be applied to both data records and -collections. Permissions can be configured to apply to specific users, groups of users, or a combination of -these, and define what specific actions users can take. Collections also allow specification of inherited permissions that -are applied to items linked within it. The individual permissions are as follows: - -* READ RECORD - Allows reading basic information about a data record or collection. -* READ METADATA - Allows reading structured metadata of a data record. -* READ DATA - Allows downloading raw data from a data record. -* WRITE RECORD - Allows updating basic information of a data record or collection. -* WRITE METADATA - Allows updating structured metadata of a data record. -* WRITE DATA - Allows uploading raw data to a data record. -* LIST - Allows listing of items linked within a collection (does not imply reading these items) -* LINK - Allows linking an unlinking items to/from a collection -* CREATE - Allows new items to be created within a collection -* DELETE - Allows deletion of records and collections -* SHARE - Allows setting access controls on records and collections -* LOCK - Allows locking a record or collection to temporarily suppress all permissions - -Multiple user- and group- scoped permissions may be applied. Permissions for a given user are evaluated by combining all -permissions set for all scopes that apply - including permissions that may be inherited from parent collection hierarchies. -Because permissions are inherited and additive, the absence of a permission on a given data record or collection is not -equivalent to denying that permission. - -Access controls are typically applied to parent collections of a collection hierarchy where contained data and sub-collections -inherit the permissions defined by the top-level parent. Collections have both "local" and "inherited" permissions; where -local permissions control access to the collection record itself, and "inherited" permissions are the permissions passed -down to all contained data records and sub-collections. Note that because data records can be placed into multiple collections, -the inherited permissions of *all* associated parent collections are evaluated for each user accessing a given data record. +DataFed implements fine-grained access control through a set of permissions that can be applied to +both data records and collections. Permissions can be configured for specific users, groups, or a +combination of both, and define what actions users may take. Collections also allow the specification +of inherited permissions that apply to items linked within them. + +The individual permissions are: + +* **READ RECORD** – Allows reading basic information about a data record or collection. +* **READ METADATA** – Allows reading structured metadata of a data record. +* **READ DATA** – Allows downloading raw data from a data record. +* **WRITE RECORD** – Allows updating basic information of a data record or collection. +* **WRITE METADATA** – Allows updating structured metadata of a data record. +* **WRITE DATA** – Allows uploading raw data to a data record. +* **LIST** – Allows listing items linked within a collection (does not imply read access). +* **LINK** – Allows linking and unlinking items to/from a collection. +* **CREATE** – Allows new items to be created within a collection. +* **DELETE** – Allows deletion of records and collections. +* **SHARE** – Allows setting access controls on records and collections. +* **LOCK** – Allows locking a record or collection to temporarily suppress all permissions. + +Multiple user- and group-scoped permissions may be applied. Permissions for a given user are evaluated +by combining all permissions set across all applicable scopes, including inherited permissions from +parent collection hierarchies. Because permissions are inherited and additive, the absence of a +permission on a given record or collection is not equivalent to denying that permission. + +Access controls are typically applied to parent collections in a collection hierarchy, where contained +data and sub-collections inherit the permissions defined by the top-level parent. Collections have +both *local* and *inherited* permissions: local permissions control access to the collection record +itself, while inherited permissions are propagated to all contained records and sub-collections. Because +data records may be placed into multiple collections, the inherited permissions of *all* parent +collections are evaluated for each user. + ----------------------- Repository Allocations ---------------------- -Having access to DataFed does not, in itself, grant users the ability to create or manage data within DataFed. This is because -DataFed does not provide any raw data storage on its own, but instead relies on *federated* storage provided by DataFed member -organization and/or facilities. Federated storage is implemented through a network of geographically distributed "data repositories" -that are owned and maintained by specific member organizations, yet are potentially accessible by any DataFed user. +Having access to DataFed does not, by itself, grant users the ability to create or manage data within +DataFed. This is because DataFed does not provide raw data storage on its own, but instead relies on +*federated* storage provided by DataFed member organizations and facilities. -Typically, DataFed users with an account at one or more DataFed member facilities will be automatically granted storage -allocations on data repositories managed by the organization that operates the facilities. For unaffiliated users, storage -allocations may be explicitly requested from a DataFed member organizations. DataFed member organizations are free to define -and enforce their own data storage policies; therefore, users wishing to acquire storage a specific allocation must contact the -associated organization for information on how to gain access. Even though unaffiliated users with no storage allocation cannot -use DataFed to create and manage their own data, DataFed still allows these users to locate, access, and monitor data owned -by other DataFed users or projects. +Federated storage is implemented through a network of geographically distributed “data repositories” +that are owned and maintained by member organizations, yet may be accessible to any DataFed user. +Typically, DataFed users with accounts at one or more DataFed member facilities are automatically +granted storage allocations on repositories managed by those organizations. For unaffiliated users, +storage allocations may be explicitly requested from DataFed member organizations. + +DataFed member organizations are free to define and enforce their own data storage policies; +therefore, users requesting a storage allocation must contact the associated organization for access +information. + +Unaffiliated users without storage allocations cannot create or manage their own data, but can still +locate, access, and monitor data owned by other users or projects. + +Users may have multiple storage allocations across different repositories. In this case, a default +allocation may be specified, or a specific allocation chosen when creating new DataFed data records. +Data can be accessed uniformly regardless of which repository stores it; however, physical proximity +to the repository may affect access latency. -It is likely that DataFed users may have multiple storage allocations on different data repositories. In this case, a default -storage allocation may be specified, or a specific storage allocation selected when creating new DataFed data records. Data can -be accessed in a consistent manner no matter which data repository it is stored on; however, the physical proximity of a data -repository in relation to the point of use of data can impact access latency. ----------------- Metadata Schemas ---------------- -Metadata schemas are documents that define and validate the allowed fields and field types of the domain-specific metadata -associated with data records. Schemas can be used to constrain the values of fields, including min/max values, numeric -ranges, or text patterns, and can also define conditional constraints. Existing schemas can be referenced by new schemas as -a sub-document, or as a custom type for a local field. When a defined schema is associated with a data record, the domain- -specific metadata of that record is validated against the schema, and if any errors are found, the data record is flagged -and the validation errors are stored with the data record for subsequent review. Optionally, a flag can be used in the -DataFed CLI/API to reject data record create or update requests if the associated metadata does not validate against a -specified schema. - -In DataFed, the metadata schema implementation is based on a modified version of the JSON Schema Specification, version -'2020-12', available at ``_. The primary difference between DataFed's schema implementation and the -standard is how schemas are identified and referenced. With the official JSON schema specification, schemas are both identified -and accessed via URIs. This approach allows arbitrary storage, distribution, and reuse of schemas; however, it also introduces -significant latency and resource costs within DataFed services. For this reason, DataFed instead stores all schemas locally -and restricts schema references to local schemas only. However, external schemas can still be imported into DataFed and can -then be referenced with a local identifier. +Metadata schemas define and validate the allowed fields and field types of domain-specific metadata +associated with data records. Schemas can constrain field values (e.g., ranges, patterns) and also +define conditional constraints. Existing schemas can be referenced by new schemas as sub-documents or +as custom types for local fields. + +When a schema is associated with a data record, the domain-specific metadata is validated against that +schema. If validation errors occur, the record is flagged and the validation errors are stored for +later review. Optionally, a flag in the DataFed CLI/API can be used to reject create or update +operations when metadata does not validate successfully. + +DataFed’s schema implementation is based on a modified version of the JSON Schema Specification +(2020-12). The main difference is that DataFed identifies and references schemas locally instead of +via URIs to avoid external fetch latency. External schemas may still be imported and referenced using +local identifiers. + ----- Tags ---- -Tags are simple words that can be associated with data records and collections. Tags have no inherent meaning in themselves, -but can be useful for organizing data in a faceted manner, via saved queries, rather than a hierarchical manner through collections. -Tags are tracked and reference counted, and the web portal features an autocomplete tag entry widget that shows matching available -tags with current reference counts. +Tags are simple words associated with data records and collections. Tags have no inherent meaning but +are useful for organizing data in a faceted (rather than hierarchical) manner using saved queries. +Tags are tracked and reference-counted, and the web portal provides an autocomplete widget that shows +matching tags with their usage counts. + ------------ Annotations ----------- -Annotations are a feature that allows users (with proper permissions) to attach general notifications, questions, warnings, and errors to -data records. Annotations have several states including "open", "active", and "closed". When an annotation is initially created, -it is in the "open" state by default and only the owner/creator of the data record and the author of the annotation will be able see -the new annotation. A mechanism is provided to allow the two parties to exchange information, and if deemed suitable by the owner -of the data record, the annotation can be "activated" which will make it visible to all users that have access to the associated +Annotations allow users (with appropriate permissions) to attach notifications, questions, warnings, +and errors to data records. Annotations have states including *open*, *active*, and *closed*. + +When created, an annotation is *open* by default and visible only to the data record’s owner/creator +and the annotation author. These parties may exchange information, and if the record owner deems the +annotation appropriate, it may be *activated*, making it visible to all users with access to the record. -In addition, if a record has dependent records (via provenance references) and an error or warning annotation is activated, -then the dependent records will have new annotations automatically created with links to the parent annotation. The owners of the -dependent records will then have an opportunity to perform an impact assessment and either close or activate the derived annotations. -This process continues down the provenance relationships as each derived annotation is activated. This mechanism enables a form of -data "quality assurance" even when the data producers and data consumers are unknown to one other. +If a record has dependent records (via provenance references) and a warning or error annotation is +activated, new derived annotations are automatically created on the dependent records. Owners of those +dependent records may perform an impact assessment and either close or activate the derived +annotations. This process continues down provenance chains. This mechanism enables a form of +distributed data quality assurance even between data producers and data consumers who may not know +each other. + +A future release will notify users via email when annotations on owned or derived records are created +or updated. -In a future release, users will be notified via email when annotations associated with owned or derived records are created or updated. --------------------------- Data and Collection Search -------------------------- -DataFed provides a powerful search feature that allows data records and collections to be found within a users personal data space, -across projects, and data/collections shared by other users and/or projects. Searches can be saved and will then be accessible -via the "Saved Queries" feature in the DataFed web portal, the command-line interface, and the Python API. Below is a list of fields -that can be used for searches and saved queries. +DataFed provides a powerful search feature that allows users to locate data records and collections +within their personal space, across projects, and among shared data. Searches can be saved and accessed +via the DataFed web portal, command-line interface, and Python API. + +Searchable fields include: + +* **ID/Alias** – Full or partial ID or alias with wildcard support +* **Text** – Words/phrases within titles and/or descriptions (full-text indexed) +* **Tags** – Assigned tags +* **Date/Time** – “From” and “To” ranges based on record update timestamp +* **Creator** – Creator’s user ID +* **Metadata Schema** – Metadata schema ID +* **Metadata Query** – Domain-specific metadata query expression (schema-aware builder provided) +* **Metadata Errors** – Records with metadata schema validation errors -* ID/Alias - A full or partial ID or alias with wildcard support -* Text - Word/phrases within title and/or description (full-text indexed) -* Tags - Assigned tags -* Date / Time - "From" and "To" date ranges based on record update timestamp -* Creator - Creators user ID -* Metadata Schema - Metadata schema ID -* Metadata Query - Domain-specific metadata query expression (schema-aware query builder provided) -* Metadata Errors - Finds records with metadata schema validation errors -------- Catalog ------- -The DataFed catalog allows collections and data records to be internally published (without DOI numbers) for use by any -DataFed user. The catalog allows users to browse collections by hierarchical categories and to search for collections and datasets -directly by filtering relevant field and metadata schema values. +The DataFed catalog allows collections and data records to be internally published (without DOI +assignment) for discovery by any DataFed user. The catalog enables browsing by hierarchical categories +and direct searching of collections and datasets using field and metadata schema filters. + diff --git a/docker/Dockerfile.dependencies b/docker/Dockerfile.dependencies deleted file mode 100644 index 27057501a..000000000 --- a/docker/Dockerfile.dependencies +++ /dev/null @@ -1,70 +0,0 @@ -ARG DATAFED_DIR="/datafed" -ARG DATAFED_INSTALL_PATH="/opt/datafed" -ARG DATAFED_DEPENDENCIES_INSTALL_PATH="/opt/datafed/dependencies" -ARG GCS_IMAGE="code.ornl.gov:4567/dlsw/datafed/gcs-ubuntu-focal" -ARG BUILD_DIR="$DATAFED_DIR/source" -ARG LIB_DIR="/opt/datafed/dependencies/lib" -ARG BASE_IMAGE="debian:bookworm-slim" - -FROM ${BASE_IMAGE} AS base - -SHELL ["/bin/bash", "-c"] -ARG DATAFED_DIR -ARG BUILD_DIR -ARG DATAFED_INSTALL_PATH -ARG DEBIAN_FRONTEND=noninteractive -ARG LIB_DIR -ARG DATAFED_DEPENDENCIES_INSTALL_PATH - -ENV BUILD_DIR="${BUILD_DIR}" -ENV DATAFED_DIR="${DATAFED_DIR}" -ENV DATAFED_DEPENDENCIES_INSTALL_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}" -ENV LIB_DIR="${LIB_DIR}" - -RUN mkdir -p ${BUILD_DIR} -RUN mkdir -p ${BUILD_DIR}/logs -RUN mkdir -p ${BUILD_DIR}/repository/server -RUN mkdir -p ${BUILD_DIR}/common/proto -RUN mkdir -p /libraries - -WORKDIR ${BUILD_DIR} - -# Copy install scripts -COPY ./scripts/dependency_install_functions.sh ${BUILD_DIR}/scripts/ -COPY ./scripts/dependency_versions.sh ${BUILD_DIR}/scripts/ -COPY ./scripts/install_dependencies.sh ${BUILD_DIR}/scripts/ -COPY ./scripts/utils.sh ${BUILD_DIR}/scripts/utils.sh -COPY ./scripts/install_docs_dependencies.sh ${BUILD_DIR}/scripts/ -COPY ./cmake/Web.cmake ${BUILD_DIR}/cmake/Web.cmake -COPY ./cmake/Version.cmake ${BUILD_DIR}/cmake/Version.cmake -COPY ./web/package.json.in ${BUILD_DIR}/web/package.json.in -COPY ./web/version.js.in ${BUILD_DIR}/web/version.js.in -COPY ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/generate_datafed.sh - -RUN echo "#!/bin/bash\n\$@" > /usr/bin/sudo && chmod +x /usr/bin/sudo - -# run build scripts -RUN ${BUILD_DIR}/scripts/generate_datafed.sh - -# Web dependencies -COPY ./scripts/install_ws_dependencies.sh ${BUILD_DIR}/scripts/ -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC ${BUILD_DIR}/scripts/install_dependencies.sh -a -r -z -c - -# Core dependencies -COPY ./scripts/install_core_dependencies.sh ${BUILD_DIR}/scripts/ -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC ${BUILD_DIR}/scripts/install_dependencies.sh -a -r -z -w - -# Repo dependencies -COPY ./scripts/install_repo_dependencies.sh ${BUILD_DIR}/scripts/ -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC ${BUILD_DIR}/scripts/install_dependencies.sh -a -z -w -c - -# Authz dependencies -COPY ./scripts/install_authz_dependencies.sh ${BUILD_DIR}/scripts/ -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC ${BUILD_DIR}/scripts/install_dependencies.sh -a -w -c -r - -RUN mkdir -p ${DATAFED_INSTALL_PATH} -RUN mkdir -p ${DATAFED_INSTALL_PATH}/keys - -WORKDIR ${BUILD_DIR} - -COPY ./common ${BUILD_DIR}/common diff --git a/docker/Dockerfile.foxx b/docker/Dockerfile.foxx index 6ee7a0f9d..a94405282 100644 --- a/docker/Dockerfile.foxx +++ b/docker/Dockerfile.foxx @@ -8,6 +8,7 @@ ARG DATAFED_DIR="/datafed" ARG DATAFED_INSTALL_PATH="/opt/datafed" ARG DATAFED_DEPENDENCIES_INSTALL_PATH="/opt/datafed/dependencies" ARG BUILD_DIR="$DATAFED_DIR/source" +ARG DATAFED_DEPENDENCIES_ROOT="$BUILD_DIR/external/DataFedDependencies" ARG NVM_DIR="$DATAFED_DIR/.nvm" ARG NVM_INC="$DATAFED_DIR/.nvm/versions/node/v20.18.2/include/node" ARG NVM_BIN="$DATAFED_DIR/.nvm/versions/node/v20.18.2/bin" @@ -19,6 +20,7 @@ ARG DATAFED_DIR ARG BUILD_DIR ARG DATAFED_INSTALL_PATH ARG DATAFED_DEPENDENCIES_INSTALL_PATH +ARG DATAFED_DEPENDENCIES_ROOT # WARNING # @@ -30,14 +32,20 @@ ENV BUILD_DIR="${BUILD_DIR}" ENV HOME="${BUILD_DIR}" ENV DATAFED_DIR="${DATAFED_DIR}" ENV DATAFED_DEPENDENCIES_INSTALL_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}" +ENV DATAFED_DEPENDENCIES_ROOT="${DATAFED_DEPENDENCIES_ROOT}" ENV DATAFED_INSTALL_PATH="$DATAFED_INSTALL_PATH" ENV DATAFED_DEFAULT_LOG_PATH="$DATAFED_INSTALL_PATH/logs" # Set to false by default to avoid wiping the database ENV ENABLE_FOXX_TESTS="FALSE" +RUN mkdir -p ${BUILD_DIR}/external/DataFedDependencies/scripts/ && \ + mv ./scripts/dependency_versions.sh ${BUILD_DIR}/external/DataFedDependencies/scripts/ && \ + mv ./scripts/dependency_install_functions.sh ${BUILD_DIR}/external/DataFedDependencies/scripts/ && \ + mv ./scripts/generate_dependencies_config.sh ${BUILD_DIR}/external/DataFedDependencies/scripts/ && \ + mv ./scripts/utils.sh ${BUILD_DIR}/external/DataFedDependencies/scripts/ + COPY ./core/CMakeLists.txt ${BUILD_DIR}/core/CMakeLists.txt COPY ./CMakeLists.txt ${BUILD_DIR} -COPY ./scripts/dependency_versions.sh ${BUILD_DIR}/scripts/ COPY ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/ COPY ./cmake ${BUILD_DIR}/cmake COPY ./docker/entrypoint_foxx.sh ${BUILD_DIR}/docker/entrypoint_foxx.sh @@ -72,14 +80,15 @@ RUN chown -R datafed:root /home/datafed \ ${BUILD_DIR}/scripts \ ${BUILD_DIR}/core/ \ ${BUILD_DIR}/common/ \ - ${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm && \ + ${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm \ + ${BUILD_DIR}/external/DataFedDependencies/ && \ chown datafed:root ${BUILD_DIR} ${DATAFED_DEPENDENCIES_INSTALL_PATH} && \ find ${BUILD_DIR}/config -maxdepth 1 -type f -exec chmod 0664 {} + && \ find ${BUILD_DIR}/core/database -type f -exec chmod 0664 {} + && \ find ${BUILD_DIR}/core -type d -exec chmod 0774 {} + && \ - find ${BUILD_DIR}/common/proto/common -type d -exec chmod 0774 {} + && \ - find ${BUILD_DIR}/common -type f -exec chmod 0664 {} + && \ + find ${BUILD_DIR}/config -type d -exec chmod 0774 {} + && \ find ${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm -type d -exec chmod 0774 {} + && \ + find ${DATAFED_DEPENDENCIES_ROOT} -type d -exec chmod 0774 {} + && \ chmod 774 ${BUILD_DIR} \ ${BUILD_DIR}/scripts \ ${DATAFED_DEPENDENCIES_INSTALL_PATH} \ diff --git a/docker/entrypoint_foxx.sh b/docker/entrypoint_foxx.sh index ce612c9af..81511ba2e 100755 --- a/docker/entrypoint_foxx.sh +++ b/docker/entrypoint_foxx.sh @@ -31,10 +31,6 @@ if [ -f "${PROJECT_ROOT}/core/database/foxx/manifest.json" ]; then rm "${PROJECT_ROOT}/core/database/foxx/manifest.json" fi -if [ -f "${PROJECT_ROOT}/common/proto/common/Version.proto" ]; then - rm "${PROJECT_ROOT}/common/proto/common/Version.proto" -fi - install_flag="/tmp/.foxx_is_installed" if [ "${local_DATAFED_FORCE_INSTALL_FOXX}" == "TRUE" ]; then if [ -f "$install_flag" ]; then @@ -46,7 +42,7 @@ fi # we may want to keep the existing state and not overwrite the database. install_flag="/tmp/.foxx_is_installed" if [ ! -f "$install_flag" ]; then - echo "Installing foxx." + echo "INFO - Installing foxx." log_path="$DATAFED_DEFAULT_LOG_PATH" if [ ! -d "${log_path}" ]; then mkdir -p "${log_path}" datafed @@ -71,6 +67,7 @@ if [ ! -f "$install_flag" ]; then -DBUILD_PYTHON_CLIENT=False -DBUILD_FOXX=True -DINSTALL_FOXX=True + -DENABLE_INTEGRATION_TESTS=False ) # Add the ENABLE_FOXX_TESTS option if it's set to TRUE @@ -84,9 +81,6 @@ if [ ! -f "$install_flag" ]; then "${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake" --build build - # Give arango container a minute to initialize - # should be replaced with health check at some point - sleep 5 "${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake" --build build --target install if [ "$ENABLE_FOXX_TESTS" == "TRUE" ]; then @@ -100,7 +94,7 @@ if [ ! -f "$install_flag" ]; then # Create flag to indicate container has done its job touch "$install_flag" else - echo "$install_flag has been found skipping reinstall" + echo "INFO - $install_flag has been found skipping reinstall" fi # Keep container alive for a little bit, the CI pipelines check that the @@ -108,3 +102,4 @@ fi # might fail because it wasn't able to determine if the container actually # ran. sleep 60 +echo "INFO - Initialization of Database has completed successfully." diff --git a/docs/_sources/admin/general.rst.txt b/docs/_sources/admin/general.rst.txt index 4003f9ee3..20eca8ba8 100644 --- a/docs/_sources/admin/general.rst.txt +++ b/docs/_sources/admin/general.rst.txt @@ -74,9 +74,9 @@ The npm packages needed primarily by the web server are: This can be done with a helper scripts these scripts are for ubuntu:: - ./DataFed/scripts/install_core_dependencies.sh - ./DataFed/scripts/install_repo_dependencies.sh - ./DataFed/scripts/install_ws_dependencies.sh + ./DataFed/external/DataFedDependencies/scripts/install_core_dependencies.sh + ./DataFed/external/DataFedDependencies/scripts/install_repo_dependencies.sh + ./DataFed/external/DataFedDependencies/scripts/install_ws_dependencies.sh The next step is to enter configuration options that are listed in ./config/datafed.sh. To generate a template for this file you will first need to run:: @@ -89,7 +89,6 @@ of the configuration options: 1. DATAFED_DEFAULT_LOG_PATH - Needed by core, repo, web services 2. DATAFED_DATABASE_PASSWORD - Needed by core 3. DATAFED_ZEROMQ_SESSION_SECRET - Needed by web server -4. DATAFED_ZEROMQ_SYSTEM_SECRET - Needed by web server 5. DATAFED_LEGO_EMAIL - Needed by web server 6. DATAFED_WEB_KEY_PATH - Needed by web server 7. DATAFED_WEB_CERT_PATH - Needed by web server diff --git a/docs/admin/general.html b/docs/admin/general.html index ef58285ea..9e5987089 100644 --- a/docs/admin/general.html +++ b/docs/admin/general.html @@ -178,9 +178,9 @@

Downloading DataFed & Installing Dependencies

  • client-oauth2

  • This can be done with a helper scripts these scripts are for ubuntu:

    -
    ./DataFed/scripts/install_core_dependencies.sh
    -./DataFed/scripts/install_repo_dependencies.sh
    -./DataFed/scripts/install_ws_dependencies.sh
    +
    ./DataFed/external/DataFedDependencies/scripts/install_core_dependencies.sh
    +./DataFed/external/DataFedDependencies/scripts/install_repo_dependencies.sh
    +./DataFed/external/DataFedDependencies/scripts/install_ws_dependencies.sh
     

    The next step is to enter configuration options that are listed in ./config/datafed.sh. To @@ -194,7 +194,6 @@

    Downloading DataFed & Installing Dependencies

  • DATAFED_DEFAULT_LOG_PATH - Needed by core, repo, web services

  • DATAFED_DATABASE_PASSWORD - Needed by core

  • DATAFED_ZEROMQ_SESSION_SECRET - Needed by web server

  • -
  • DATAFED_ZEROMQ_SYSTEM_SECRET - Needed by web server

  • DATAFED_LEGO_EMAIL - Needed by web server

  • DATAFED_WEB_KEY_PATH - Needed by web server

  • DATAFED_WEB_CERT_PATH - Needed by web server

  • diff --git a/external/DataFedDependencies b/external/DataFedDependencies new file mode 160000 index 000000000..fe59a393f --- /dev/null +++ b/external/DataFedDependencies @@ -0,0 +1 @@ +Subproject commit fe59a393f54d3aa1b8bf551f97d274b762bf93d2 diff --git a/external/protobuf b/external/protobuf deleted file mode 160000 index a9b006bdd..000000000 --- a/external/protobuf +++ /dev/null @@ -1 +0,0 @@ -Subproject commit a9b006bddd52e289029f16aa77b77e8e0033d9ee diff --git a/python/datafed_pkg/requirements.txt b/python/datafed_pkg/requirements.txt deleted file mode 100644 index ab83b039c..000000000 --- a/python/datafed_pkg/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -protobuf>=5.27.1 -pyzmq>=16 -wget>=3 -requests>=2 -click>=7 -prompt_toolkit>=2 diff --git a/python/docker/Dockerfile.python-client-base.ubuntu b/python/docker/Dockerfile.python-client-base.ubuntu index 53102ac43..be84235cd 100644 --- a/python/docker/Dockerfile.python-client-base.ubuntu +++ b/python/docker/Dockerfile.python-client-base.ubuntu @@ -14,7 +14,7 @@ COPY ./scripts/dependency_install_functions.sh ${BUILD_DIR}/scripts/ COPY ./scripts/dependency_versions.sh ${BUILD_DIR}/scripts/ COPY ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/ COPY ./scripts/utils.sh ${BUILD_DIR}/scripts/ -COPY ./scripts/install_python_client_dependencies.sh ${BUILD_DIR}/scripts/ +COPY ./external/DataFedDependencies/scripts/install_python_client_dependencies.sh ${BUILD_DIR}/scripts/ RUN echo "#!/bin/bash\n\$@" > /usr/bin/sudo && chmod +x /usr/bin/sudo RUN ${BUILD_DIR}/scripts/generate_datafed.sh diff --git a/repository/docker/000-default.conf b/repository/docker/000-default.conf deleted file mode 100644 index e3755b444..000000000 --- a/repository/docker/000-default.conf +++ /dev/null @@ -1,60 +0,0 @@ - - # The ServerName directive sets the request scheme, hostname and port that - # the server uses to identify itself. This is used when creating - # redirection URLs. In the context of virtual hosts, the ServerName - # specifies what hostname must appear in the request's Host: header to - # match this virtual host. For the default virtual host (this file) this - # value is not decisive as it is used as a last resort host regardless. - # However, you must set it for any further virtual host explicitly. - #ServerName www.example.com - - ServerAdmin webmaster@localhost - DocumentRoot /var/www/html - - # Available loglevels: trace8, ..., trace1, debug, info, notice, warn, - # error, crit, alert, emerg. - # It is also possible to configure the loglevel for particular - # modules, e.g. - #LogLevel info ssl:warn - - ErrorLog ${APACHE_LOG_DIR}/error.log - CustomLog ${APACHE_LOG_DIR}/access.log combined - - # For most configuration files from conf-available/, which are - # enabled or disabled at a global level, it is possible to - # include a line for only one particular virtual host. For example the - # following line enables the CGI configuration for this host only - # after it has been globally disabled with "a2disconf". - #Include conf-available/serve-cgi-bin.conf - - -# vim: syntax=apache ts=4 sw=4 sts=4 sr noet - - ServerName datafed-gcs-test.ornl.gov - - SSLEngine on - SSLCertificateFile /opt/datafed/keys/cert.crt - SSLCertificateKeyFile /opt/datafed/keys/cert.key - - # SSL configuration - SSLProtocol TLSv1.2 TLSv1.3 - SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH - SSLHonorCipherOrder on - - # Proxy settings - ProxyPass / https://localhost:8080/ - ProxyPassReverse / https://localhost:8080/ - ProxyPreserveHost On - RequestHeader set X-Forwarded-Proto "https" - - # Additional proxy SSL settings - SSLProxyEngine on - SSLProxyVerify none - SSLProxyCheckPeerCN off - SSLProxyCheckPeerName off - SSLProxyCheckPeerExpire off - - SSLProxyVerifyDepth 2 - SSLProxyCACertificateFile /opt/datafed/keys/cert.crt - - diff --git a/repository/docker/Dockerfile b/repository/docker/Dockerfile index bf957d41b..35e869c1a 100644 --- a/repository/docker/Dockerfile +++ b/repository/docker/Dockerfile @@ -2,15 +2,16 @@ # cd ${PROJECT_ROOT} or cd DataFed # docker build -f repository/docker/Dockerfile . -ARG BUILD_BASE="debian:bookworm-slim" -ARG DEPENDENCIES="dependencies" -ARG RUNTIME="runtime" -ARG DATAFED_DIR="/datafed" -ARG DATAFED_INSTALL_PATH="/opt/datafed" +ARG BUILD_BASE="debian:bookworm-slim" +ARG DEPENDENCIES="dependencies" +ARG RUNTIME="runtime" +ARG DATAFED_DIR="/datafed" +ARG DATAFED_INSTALL_PATH="/opt/datafed" ARG DATAFED_DEPENDENCIES_INSTALL_PATH="/opt/datafed/dependencies" -ARG GCS_IMAGE="code.ornl.gov:4567/dlsw/datafed/gcs-ubuntu-focal" -ARG BUILD_DIR="$DATAFED_DIR/source" -ARG LIB_DIR="/usr/local/lib" +ARG GCS_IMAGE="code.ornl.gov:4567/dlsw/datafed/gcs-ubuntu-focal" +ARG BUILD_DIR="$DATAFED_DIR/source" +ARG LIB_DIR="/usr/local/lib" +ARG DATAFED_DEPENDENCIES_ROOT="$BUILD_DIR/external/DataFedDependencies" FROM ${DEPENDENCIES} AS repo-build @@ -18,34 +19,42 @@ SHELL ["/bin/bash", "-c"] ARG DATAFED_DIR ARG BUILD_DIR ARG DATAFED_INSTALL_PATH +ARG DATAFED_DEPENDENCIES_ROOT ARG DATAFED_DEPENDENCIES_INSTALL_PATH ENV DATAFED_INSTALL_PATH="$DATAFED_INSTALL_PATH" # This port is needed to communicate with the DataFed core server EXPOSE 7512 # Not quite sure what 9000 is doing that 7512 isn't, difference between egress + # and ingress? EXPOSE 9000 +RUN mkdir -p ${DATAFED_DEPENDENCIES_ROOT}/scripts/ && \ + mv ./scripts/dependency_versions.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/ && \ + mv ./scripts/dependency_install_functions.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/ && \ + mv ./scripts/generate_dependencies_config.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/ + +COPY ./common ${BUILD_DIR}/common COPY ./repository/CMakeLists.txt ${BUILD_DIR}/repository/CMakeLists.txt COPY ./CMakeLists.txt ${BUILD_DIR} -COPY ./scripts/dependency_versions.sh ${BUILD_DIR}/scripts/ -COPY ./scripts/dependency_install_functions.sh ${BUILD_DIR}/scripts/ COPY ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/ COPY ./scripts/generate_repo_config.sh ${BUILD_DIR}/scripts/ COPY ./scripts/install_repo.sh ${BUILD_DIR}/scripts/ COPY ./cmake ${BUILD_DIR}/cmake COPY ./repository/server ${BUILD_DIR}/repository/server -RUN ${BUILD_DIR}/scripts/generate_datafed.sh && \ - ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake -S. -B build \ - -DBUILD_REPO_SERVER=True \ - -DBUILD_AUTHZ=False \ - -DBUILD_CORE_SERVER=False \ - -DBUILD_WEB_SERVER=False \ - -DBUILD_DOCS=False \ - -DBUILD_PYTHON_CLIENT=False \ - -DBUILD_FOXX=False +RUN ${DATAFED_DEPENDENCIES_ROOT}/scripts/generate_dependencies_config.sh && \ + ${BUILD_DIR}/scripts/generate_datafed.sh && \ + ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake -S. -B build \ + -DBUILD_REPO_SERVER=True \ + -DBUILD_AUTHZ=False \ + -DBUILD_CORE_SERVER=False \ + -DBUILD_WEB_SERVER=False \ + -DBUILD_DOCS=False \ + -DBUILD_PYTHON_CLIENT=False \ + -DBUILD_FOXX=False \ + -DENABLE_INTEGRATION_TESTS=False RUN ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake --build build -j 8 RUN ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake --build build --target install @@ -57,19 +66,22 @@ ARG DATAFED_INSTALL_PATH ARG BUILD_DIR ARG LIB_DIR ARG DATAFED_DEPENDENCIES_INSTALL_PATH +ARG DATAFED_DEPENDENCIES_ROOT # The above should also be available at runtime ENV DATAFED_INSTALL_PATH="$DATAFED_INSTALL_PATH" ENV DATAFED_DEPENDENCIES_INSTALL_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}" ENV DATAFED_GCS_COLLECTION_BASE_PATH="/mnt/datafed" ENV DATAFED_GCS_COLLECTION_ROOT_PATH="/mnt/datafed" +ENV DATAFED_DEPENDENCIES_ROOT="$DATAFED_DEPENDENCIES_ROOT" ENV DATAFED_REPO_ID_AND_DIR="home" ENV DATAFED_DIR="$DATAFED_DIR" ENV BUILD_DIR="$BUILD_DIR" ENV LIB_DIR="$LIB_DIR" -WORKDIR /datafed +WORKDIR ${BUILD_DIR} +COPY --from=repo-build --chown=datafed:root ${DATAFED_DEPENDENCIES_ROOT}/scripts ${DATAFED_DEPENDENCIES_ROOT}/scripts COPY --from=repo-build /usr/lib/x86_64-linux-gnu/libboost_program_options.so /usr/lib/x86_64-linux-gnu/libboost_program_options.so COPY --from=repo-build /usr/lib/x86_64-linux-gnu/libboost_filesystem.so /usr/lib/x86_64-linux-gnu/libboost_filesystem.so @@ -77,16 +89,17 @@ RUN ldconfig USER datafed -COPY --chown=datafed:root ./scripts/generate_datafed.sh ${DATAFED_DIR}/scripts/generate_datafed.sh -COPY --chown=datafed:root ./scripts/generate_repo_config.sh ${DATAFED_DIR}/scripts/generate_repo_config.sh -COPY --chown=datafed:root ./scripts/install_repo.sh ${DATAFED_DIR}/scripts/install_repo.sh -COPY --chown=datafed:root ./cmake/Version.cmake ${DATAFED_DIR}/cmake/Version.cmake -COPY --chown=datafed:root ./repository/docker/entrypoint_repo.sh ${BUILD_DIR}/repository/entrypoint.sh +COPY --chown=datafed:root ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/generate_datafed.sh +COPY --chown=datafed:root ./scripts/generate_repo_config.sh ${BUILD_DIR}/scripts/generate_repo_config.sh +COPY --chown=datafed:root ./scripts/install_repo.sh ${BUILD_DIR}/scripts/install_repo.sh +COPY --chown=datafed:root ./cmake/Version.cmake ${BUILD_DIR}/cmake/Version.cmake +COPY --chown=datafed:root ./repository/docker/entrypoint_repo.sh ${BUILD_DIR}/repository/docker/entrypoint.sh COPY --from=repo-build --chown=datafed:root ${DATAFED_INSTALL_PATH}/repo/datafed-repo ${DATAFED_INSTALL_PATH}/repo/datafed-repo USER root -RUN chown -R datafed:root /datafed -RUN chown -R datafed:root /opt +RUN chown -R datafed:root /datafed /opt \ + ${DATAFED_DEPENDENCIES_ROOT} && \ + find ${DATAFED_DEPENDENCIES_ROOT} -type d -exec chmod 0774 {} + -ENTRYPOINT [ "/datafed/source/repository/entrypoint.sh", "/opt/datafed/repo/datafed-repo","--cfg","/opt/datafed/repo/datafed-repo.cfg"] +ENTRYPOINT [ "/datafed/source/repository/docker/entrypoint.sh", "/opt/datafed/repo/datafed-repo","--cfg","/opt/datafed/repo/datafed-repo.cfg"] diff --git a/repository/docker/Dockerfile.gcs b/repository/docker/Dockerfile.gcs index 32e1e376b..c92068629 100644 --- a/repository/docker/Dockerfile.gcs +++ b/repository/docker/Dockerfile.gcs @@ -11,6 +11,7 @@ ARG NVM_DIR="$DATAFED_DIR/.nvm" ARG NVM_INC="$DATAFED_DIR/.nvm/versions/node/v13.14.0/include/node" ARG NVM_BIN="$DATAFED_DIR/.nvm/versions/node/v13.14.0/bin" ARG LIB_DIR="/usr/local/lib" +ARG DATAFED_DEPENDENCIES_ROOT="$BUILD_DIR/external/DataFedDependencies" FROM ${DEPENDENCIES} AS dependencies @@ -18,6 +19,7 @@ ARG DATAFED_DIR ARG BUILD_DIR ARG DATAFED_INSTALL_PATH ARG DATAFED_DEPENDENCIES_INSTALL_PATH +ARG DATAFED_DEPENDENCIES_ROOT ARG LIB_DIR ENV DATAFED_DEPENDENCIES_INSTALL_PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH" @@ -30,10 +32,14 @@ ENV BUILD_DIR="$BUILD_DIR" ENV LIB_DIR="$LIB_DIR" ENV DATAFED_GLOBUS_REPO_USER="datafed" ENV DATAFED_DEFAULT_LOG_PATH="$DATAFED_INSTALL_PATH/logs" +ENV DATAFED_DEPENDENCIES_ROOT="$DATAFED_DEPENDENCIES_ROOT" + +RUN mkdir -p ${DATAFED_DEPENDENCIES_ROOT}/scripts/ && \ + mv ./scripts/dependency_versions.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/ && \ + mv ./scripts/utils.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/ && \ + mv ./scripts/generate_dependencies_config.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/ -COPY --chown=datafed:root ./scripts/dependency_versions.sh ${BUILD_DIR}/scripts/ COPY --chown=datafed:root ./scripts/generate_authz_config.sh ${BUILD_DIR}/scripts/generate_authz_config.sh -COPY --chown=datafed:root ./scripts/utils.sh ${BUILD_DIR}/scripts/utils.sh COPY --chown=datafed:root ./CMakeLists.txt ${BUILD_DIR} COPY --chown=datafed:root ./cmake ${BUILD_DIR}/cmake COPY --chown=datafed:root ./repository/CMakeLists.txt ${BUILD_DIR}/repository/CMakeLists.txt @@ -48,10 +54,13 @@ COPY --chown=datafed:root ./scripts/generate_gsi-authz_config.sh ${BUILD_DI COPY --chown=datafed:root ./scripts/globus/create_guest_collection.py ${BUILD_DIR}/scripts/globus/create_guest_collection.py COPY --chown=datafed:root ./scripts/globus/utils.py ${BUILD_DIR}/scripts/globus/utils.py COPY --chown=datafed:root ./scripts/globus/__init__.py ${BUILD_DIR}/scripts/globus/__init__.py +COPY --chown=datafed:root ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/ +RUN ${BUILD_DIR}/scripts/generate_datafed.sh RUN ${BUILD_DIR}/scripts/generate_gsi-authz_config.sh # Don't build with syslog the container does not seem to support syslog -RUN ${BUILD_DIR}/scripts/generate_authz_config.sh && \ +RUN ${DATAFED_DEPENDENCIES_ROOT}/scripts/generate_dependencies_config.sh && \ + ${BUILD_DIR}/scripts/generate_authz_config.sh && \ ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake -S. -B build \ -DBUILD_REPO_SERVER=False \ -DBUILD_AUTHZ_TESTS=True \ @@ -61,8 +70,9 @@ RUN ${BUILD_DIR}/scripts/generate_authz_config.sh && \ -DBUILD_WEB_SERVER=False \ -DBUILD_DOCS=False \ -DBUILD_PYTHON_CLIENT=False \ - -DBUILD_FOXX=False -RUN ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake --build build -j 8 + -DBUILD_FOXX=False \ + -DENABLE_INTEGRATION_TESTS=False +RUN ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake --build build -j 8 RUN ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake --build build --target install FROM ${GCS_IMAGE} @@ -71,6 +81,7 @@ ARG DATAFED_DIR ARG BUILD_DIR ARG DATAFED_INSTALL_PATH ARG DATAFED_DEPENDENCIES_INSTALL_PATH +ARG DATAFED_DEPENDENCIES_ROOT ARG LIB_DIR ENV DATAFED_DEPENDENCIES_INSTALL_PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH" @@ -83,6 +94,7 @@ ENV BUILD_DIR="$BUILD_DIR" ENV LIB_DIR="$LIB_DIR" ENV DATAFED_GLOBUS_REPO_USER="datafed" ENV DATAFED_DEFAULT_LOG_PATH="$DATAFED_INSTALL_PATH/logs" +ENV DATAFED_DEPENDENCIES_ROOT="$DATAFED_DEPENDENCIES_ROOT" # Value needed so tput command doesn't crash ENV TERM="xterm" ENV DATAFED_GCS_IP="" @@ -96,12 +108,13 @@ RUN mkdir -p ${DATAFED_INSTALL_PATH}/authz RUN mkdir -p /mnt/datafed # This is necessary for curl, which is used the entrypoint script -COPY --from=dependencies ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib/libssl.so ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib/libssl.so -COPY --from=dependencies ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib/libssl.so.1.1 ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib/libssl.so.1.1 -COPY --from=dependencies ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib/libcrypto.so ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib/libcrypto.so -COPY --from=dependencies ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib/libcrypto.so.1.1 ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib/libcrypto.so.1.1 +COPY --from=dependencies ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib64/libssl.so ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib64/libssl.so +COPY --from=dependencies ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib64/libssl.so.3 ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib64/libssl.so.3 +COPY --from=dependencies ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib64/libcrypto.so ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib64/libcrypto.so +COPY --from=dependencies ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib64/libcrypto.so.3 ${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib64/libcrypto.so.3 COPY --from=dependencies ${DATAFED_DEPENDENCIES_INSTALL_PATH}/python ${DATAFED_DEPENDENCIES_INSTALL_PATH}/python COPY --from=dependencies ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/python3.9 ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/python3.9 +COPY --from=dependencies --chown=datafed:root ${DATAFED_DEPENDENCIES_ROOT}/scripts/ ${DATAFED_DEPENDENCIES_ROOT}/scripts/ # Needed for tests COPY --from=dependencies ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake @@ -119,15 +132,7 @@ RUN adduser --disabled-password --gecos "" datafed RUN echo "#!/bin/bash\n\$@" > /usr/bin/sudo && chmod +x /usr/bin/sudo -COPY ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/ -COPY ./scripts/dependency_versions.sh ${BUILD_DIR}/scripts/ -COPY ./scripts/utils.sh ${BUILD_DIR}/scripts/utils.sh - -RUN ${BUILD_DIR}/scripts/generate_datafed.sh - -COPY --chown=datafed:root ./scripts/dependency_versions.sh ${BUILD_DIR}/scripts/ COPY --chown=datafed:root ./scripts/generate_authz_config.sh ${BUILD_DIR}/scripts/generate_authz_config.sh -COPY --chown=datafed:root ./scripts/utils.sh ${BUILD_DIR}/scripts/utils.sh COPY --chown=datafed:root ./CMakeLists.txt ${BUILD_DIR} COPY --chown=datafed:root ./cmake ${BUILD_DIR}/cmake COPY --chown=datafed:root ./repository/CMakeLists.txt ${BUILD_DIR}/repository/CMakeLists.txt @@ -150,6 +155,9 @@ RUN apt update RUN apt-get install wget jq bind9-host build-essential -y RUN apt-get reinstall globus-connect-server54 libsqlite3-dev -y +COPY --chown=datafed:root ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/ + +RUN ${BUILD_DIR}/scripts/generate_datafed.sh RUN ${BUILD_DIR}/scripts/generate_gsi-authz_config.sh # Don't build with syslog the container does not seem to support syslog @@ -167,7 +175,7 @@ COPY --chown=datafed:root ./repository/docker/entrypoint_authz.sh ${BUILD_DIR}/r USER root -ENV LD_LIBRARY_PATH=/opt/datafed/dependencies/lib/ +ENV LD_LIBRARY_PATH=/opt/datafed/dependencies/lib/:/opt/datafed/dependencies/lib64/ # Remove --client-id from command because it was deprecated RUN sed -i '/--client-id/d' /entrypoint.sh diff --git a/repository/docker/entrypoint_authz.sh b/repository/docker/entrypoint_authz.sh index 18f4d1dbb..c3b1b693a 100755 --- a/repository/docker/entrypoint_authz.sh +++ b/repository/docker/entrypoint_authz.sh @@ -14,15 +14,15 @@ PROJECT_ROOT=$(realpath ${SOURCE}/../..) # The env variables below are needed for running globus-connect-server without # interactively logging in -export GCS_CLI_CLIENT_ID=$(jq -r .client "$local_DATAFED_ARANGO_PEM_PATH" +fi + +if [ -f "${local_DATAFED_ARANGO_CERT_PATH}" ]; then + if [ ${local_DATAFED_DATABASE_IP_ADDRESS} == "http://arango" ]; then + echo "WARNING - Discovered that Arango is set to use certificate: $local_DATAFED_ARANGO_CERT_PATH" + echo " switching DATAFED_DATABASE_IP_ADDRESS from http://arango to https://arango" + local_DATAFED_DATABASE_IP_ADDRESS="https://arango" + fi +else + if [ ${local_DATAFED_DATABASE_IP_ADDRESS} == "https://arango" ]; then + echo "WARNING - Discovered that Arango certs are not defined." + echo " switching DATAFED_DATABASE_IP_ADDRESS from https://arango to http://arango" + local_DATAFED_DATABASE_IP_ADDRESS="http://arango" + fi +fi # Handle repo domain logic -if [ -z "$(get_env_value "DATAFED_COMPOSE_REPO_DOMAIN" "")" ]; then - if [ "${local_DATAFED_COMPOSE_DOMAIN}" = "localhost" ]; then - local_DATAFED_COMPOSE_REPO_DOMAIN="" +if [ -z "$(get_env_value "DATAFED_REPO_DOMAIN" "")" ]; then + if [ "${local_DATAFED_DOMAIN}" = "localhost" ]; then + local_DATAFED_REPO_DOMAIN="" else - local_DATAFED_COMPOSE_REPO_DOMAIN="${local_DATAFED_COMPOSE_DOMAIN}" + local_DATAFED_REPO_DOMAIN="${local_DATAFED_DOMAIN}" fi else - local_DATAFED_COMPOSE_REPO_DOMAIN=$(get_env_value "DATAFED_COMPOSE_REPO_DOMAIN" "") + local_DATAFED_REPO_DOMAIN=$(get_env_value "DATAFED_REPO_DOMAIN" "") fi -local_DATAFED_COMPOSE_REPO_FORM_PATH=$(get_env_value "DATAFED_COMPOSE_REPO_FORM_PATH" "") -local_DATAFED_COMPOSE_GLOBUS_APP_SECRET=$(get_env_value "DATAFED_COMPOSE_GLOBUS_APP_SECRET" "") -local_DATAFED_COMPOSE_GLOBUS_APP_ID=$(get_env_value "DATAFED_COMPOSE_GLOBUS_APP_ID" "") -local_DATAFED_GLOBUS_KEY_DIR=$(get_env_value "DATAFED_GLOBUS_KEY_DIR" "${COMPOSE_ENV_DIR}/globus") -local_DATAFED_COMPOSE_ZEROMQ_SESSION_SECRET=$(get_env_value "DATAFED_COMPOSE_ZEROMQ_SESSION_SECRET" "") -local_DATAFED_COMPOSE_HTTPS_SERVER_PORT=$(get_env_value "DATAFED_COMPOSE_HTTPS_SERVER_PORT" "443") -local_DATAFED_COMPOSE_CONTAINER_LOG_PATH=$(get_env_value "DATAFED_COMPOSE_CONTAINER_LOG_PATH" "/opt/datafed/logs") -local_DATAFED_COMPOSE_DATABASE_PASSWORD=$(get_env_value "DATAFED_COMPOSE_DATABASE_PASSWORD" "butterscotch") -local_DATAFED_COMPOSE_DATABASE_IP_ADDRESS=$(get_env_value "DATAFED_COMPOSE_DATABASE_IP_ADDRESS" "http://arango") -local_DATAFED_ENABLE_FOXX_TESTS=$(get_env_value "DATAFED_ENABLE_FOXX_TESTS" "FALSE") -local_DATAFED_COMPOSE_DATABASE_PORT=$(get_env_value "DATAFED_COMPOSE_DATABASE_PORT" "8529") -local_DATAFED_COMPOSE_GCS_IP=$(get_env_value "DATAFED_COMPOSE_GCS_IP" "") -local_DATAFED_HOST_COLLECTION_MOUNT=$(get_env_value "DATAFED_COMPOSE_HOST_COLLECTION_MOUNT" "$HOME/compose_collection") -local_DATAFED_COMPOSE_HOST_DEPLOYMENT_KEY_PATH=$(get_env_value "DATAFED_COMPOSE_HOST_DEPLOYMENT_KEY_PATH" "${local_DATAFED_GLOBUS_KEY_DIR}/deployment-key.json") -local_DATAFED_HOST_CRED_FILE_PATH=$(get_env_value "DATAFED_COMPOSE_HOST_CRED_FILE_PATH" "${local_DATAFED_GLOBUS_KEY_DIR}/client_cred.json") -local_DATAFED_GCS_COLLECTION_BASE_PATH=$(get_env_value "DATAFED_GCS_COLLECTION_BASE_PATH" "/") -local_DATAFED_GCS_COLLECTION_ROOT_PATH=$(get_env_value "DATAFED_GCS_COLLECTION_ROOT_PATH" "/mnt/datafed") -local_DATAFED_GLOBUS_CONTROL_PORT=$(get_env_value "DATAFED_GLOBUS_CONTROL_PORT" "443") -local_DATAFED_GLOBUS_SUBSCRIPTION=$(get_env_value "DATAFED_GLOBUS_SUBSCRIPTION" "") -local_DATAFED_CORE_LOG_LEVEL=$(get_env_value "DATAFED_CORE_LOG_LEVEL" "3") - # Make the logs folder if it doesn't exist mkdir -p "${COMPOSE_ENV_DIR}/logs" @@ -208,40 +297,49 @@ touch "$ENV_FILE_PATH" if [ "${BUILD_METADATA}" == "TRUE" ] || [ "${BUILD_REPO}" == "TRUE" ]; then cat <>"$ENV_FILE_PATH" -DATAFED_HTTPS_SERVER_PORT=${local_DATAFED_COMPOSE_HTTPS_SERVER_PORT} -DATAFED_DOMAIN=${local_DATAFED_COMPOSE_DOMAIN} -DATAFED_UID=$(id -u) -DATAFED_CONTAINER_LOG_PATH=${local_DATAFED_COMPOSE_CONTAINER_LOG_PATH} +DATAFED_HTTPS_SERVER_PORT=${local_DATAFED_HTTPS_SERVER_PORT} +DATAFED_DOMAIN=${local_DATAFED_DOMAIN} +DATAFED_UID=${local_DATAFED_UID} +DATAFED_CONTAINER_LOG_PATH=${local_DATAFED_CONTAINER_LOG_PATH} DATAFED_CORE_LOG_LEVEL=${local_DATAFED_CORE_LOG_LEVEL} EOF fi if [ "${BUILD_METADATA}" == "TRUE" ]; then cat <>"$ENV_FILE_PATH" -DATAFED_GLOBUS_APP_SECRET=${local_DATAFED_COMPOSE_GLOBUS_APP_SECRET} -DATAFED_GLOBUS_APP_ID=${local_DATAFED_COMPOSE_GLOBUS_APP_ID} -DATAFED_ZEROMQ_SESSION_SECRET=${local_DATAFED_COMPOSE_ZEROMQ_SESSION_SECRET} -DATAFED_WEB_CERT_PATH=/opt/datafed/keys/${local_DATAFED_WEB_CERT_NAME} -DATAFED_WEB_KEY_PATH=/opt/datafed/keys/${local_DATAFED_WEB_KEY_NAME} -DATAFED_DATABASE_PASSWORD=${local_DATAFED_COMPOSE_DATABASE_PASSWORD} -DATAFED_DATABASE_IP_ADDRESS=${local_DATAFED_COMPOSE_DATABASE_IP_ADDRESS} -DATAFED_DATABASE_PORT=${local_DATAFED_COMPOSE_DATABASE_PORT} +DATAFED_GLOBUS_APP_SECRET=${local_DATAFED_GLOBUS_APP_SECRET} +DATAFED_GLOBUS_APP_ID=${local_DATAFED_GLOBUS_APP_ID} +DATAFED_ZEROMQ_SESSION_SECRET=${local_DATAFED_ZEROMQ_SESSION_SECRET} +DATAFED_WEB_CERT_PATH=${local_DATAFED_WEB_CERT_PATH} +DATAFED_WEB_KEY_PATH=${local_DATAFED_WEB_KEY_PATH} +DATAFED_DATABASE_PASSWORD=${local_DATAFED_DATABASE_PASSWORD} +DATAFED_DATABASE_IP_ADDRESS=${local_DATAFED_DATABASE_IP_ADDRESS} +DATAFED_DATABASE_PORT=${local_DATAFED_DATABASE_PORT} DATAFED_ENABLE_FOXX_TESTS=${local_DATAFED_ENABLE_FOXX_TESTS} EOF + + if [[ -n "${local_DATAFED_ARANGO_KEY_PATH}" || "${ARANGO_USE_SSL}" == "TRUE" ]]; then + echo "DATAFED_ARANGO_KEY_PATH=${local_DATAFED_ARANGO_KEY_PATH}" >>$ENV_FILE_PATH + fi + if [[ -n "${local_DATAFED_ARANGO_CERT_PATH}" || "${ARANGO_USE_SSL}" == "TRUE" ]]; then + echo "DATAFED_ARANGO_CERT_PATH=${local_DATAFED_ARANGO_CERT_PATH}" >>$ENV_FILE_PATH + fi + if [[ -n "${local_DATAFED_ARANGO_PEM_PATH}" || "${ARANGO_USE_SSL}" == "TRUE" ]]; then + echo "DATAFED_ARANGO_PEM_PATH=${local_DATAFED_ARANGO_PEM_PATH}" >>$ENV_FILE_PATH + fi fi if [ "${BUILD_REPO}" == "TRUE" ]; then cat <>"$ENV_FILE_PATH" -DATAFED_REPO_USER=datafed -DATAFED_GCS_ROOT_NAME=DataFed_Compose -DATAFED_GCS_IP=${local_DATAFED_COMPOSE_GCS_IP} -DATAFED_REPO_ID_AND_DIR=compose-home +DATAFED_GCS_ROOT_NAME=${local_DATAFED_GCS_ROOT_NAME} +DATAFED_GCS_IP=${local_DATAFED_GCS_IP} +DATAFED_REPO_ID_AND_DIR=${local_DATAFED_REPO_ID_AND_DIR} DATAFED_HOST_COLLECTION_MOUNT=${local_DATAFED_HOST_COLLECTION_MOUNT} -DATAFED_HOST_DEPLOYMENT_KEY_PATH=${local_DATAFED_COMPOSE_HOST_DEPLOYMENT_KEY_PATH} +DATAFED_HOST_DEPLOYMENT_KEY_PATH=${local_DATAFED_HOST_DEPLOYMENT_KEY_PATH} DATAFED_HOST_CRED_FILE_PATH=${local_DATAFED_HOST_CRED_FILE_PATH} DATAFED_GLOBUS_CONTROL_PORT=${local_DATAFED_GLOBUS_CONTROL_PORT} DATAFED_GLOBUS_SUBSCRIPTION=${local_DATAFED_GLOBUS_SUBSCRIPTION} -DATAFED_REPO_DOMAIN=${local_DATAFED_COMPOSE_REPO_DOMAIN} +DATAFED_REPO_DOMAIN=${local_DATAFED_REPO_DOMAIN} DATAFED_GCS_COLLECTION_BASE_PATH=${local_DATAFED_GCS_COLLECTION_BASE_PATH} DATAFED_GCS_COLLECTION_ROOT_PATH=${local_DATAFED_GCS_COLLECTION_ROOT_PATH} EOF @@ -260,4 +358,4 @@ done <"$ENV_FILE_PATH" chmod +x "$unset_env_file_name" -echo "Successfully created/updated .env file at: $ENV_FILE_PATH" +echo "INFO - Successfully created/updated .env file at: $ENV_FILE_PATH" diff --git a/scripts/compose_generate_globus_files.sh b/scripts/compose_generate_globus_files.sh index 9af8903bb..6b1365696 100755 --- a/scripts/compose_generate_globus_files.sh +++ b/scripts/compose_generate_globus_files.sh @@ -1,9 +1,9 @@ #!/bin/bash -SCRIPT=$(realpath "$0") +SCRIPT=$(realpath "${BASH_SOURCE[0]}") SOURCE=$(dirname "$SCRIPT") PROJECT_ROOT=$(realpath "${SOURCE}/..") -source "${SOURCE}/dependency_versions.sh" +source "${PROJECT_ROOT}/external/DataFedDependencies/scripts/dependency_versions.sh" # This script should be run after generating the .env file as it will pull # values from the .env file diff --git a/scripts/compose_generate_web_server_globus_credentials.sh b/scripts/compose_generate_web_server_globus_credentials.sh new file mode 100755 index 000000000..6d8c83cb8 --- /dev/null +++ b/scripts/compose_generate_web_server_globus_credentials.sh @@ -0,0 +1,87 @@ +#!/bin/bash +SCRIPT=$(realpath "${BASH_SOURCE[0]}") +SOURCE=$(dirname "$SCRIPT") +PROJECT_ROOT=$(realpath "${SOURCE}/..") + +source "${PROJECT_ROOT}/scripts/dependency_versions.sh" + +# This script should be run after generating the .env file as it will pull +# values from the .env file +Help() { + echo "$(basename $0) generate credentials for the DataFed Web Server." + echo "Note the .env file must exist in the specified directory." + echo + echo "Syntax: $(basename $0) [-h|d]" + echo "options:" + echo "-h, --help Print this help message" + echo "-d, --directory Directory where globus folder will be" + echo " created." +} + +VALID_ARGS=$(getopt -o hd: --long 'help',directory: -- "$@") + +DIRECTORY="" +eval set -- "$VALID_ARGS" +while [ : ]; do + case "$1" in + -h | --help) + Help + exit 0 + ;; + -d | --directory) + DIRECTORY="$2" + shift 2 + ;; + --) + shift + break + ;; + \?) # incorrect option + echo "Error: Invalid option" + exit + ;; + esac +done + +if [ ! -d "${DIRECTORY}" ]; then + echo "The provided directory does not seem to exist: ${DIRECTORY}" +fi + +if [ ! -f "${DIRECTORY}/.env" ]; then + echo "Missing . ${DIRECTORY}/.env file. This file needs to be" + echo "created first" + exit 1 +fi + +# This script should be run after generating the .env file as it will pull +# values from the .env file + +if [ ! -f "${DIRECTORY}/.env" ]; then + echo "Missing . ${DIRECTORY}/.env file. This file needs to be" + echo "created first" + exit 1 +fi + +local_DATAFED_GLOBUS_KEY_DIR="${DIRECTORY}/globus" +if [ ! -d "$local_DATAFED_GLOBUS_KEY_DIR" ]; then + mkdir -p "$local_DATAFED_GLOBUS_KEY_DIR" +fi + +# Because docker compose honors spaces and reads in .env files as literals +# we cannot include the quotes for variables that have spaces. So we need to +# convert this file such that it is in a format that can be readable by bash +# before loading it into the env + +cp "${DIRECTORY}/.env" "${DIRECTORY}/.env_shell" + +sed -i 's/=\([^"]*\)/="\1"/' "${DIRECTORY}/.env_shell" + +. "${DIRECTORY}/.env_shell" + +# Cleanup after loading env +rm "${DIRECTORY}/.env_shell" + +DATAFED_GLOBUS_REDIRECT_CRED_FILE_PATH="$DATAFED_GLOBUS_REDIRECT_CRED_FILE_PATH" \ + DATAFED_GCS_ROOT_NAME="$DATAFED_GCS_ROOT_NAME" \ + DATAFED_DOMAIN="$DATAFED_DOMAIN" \ + "python${DATAFED_PYTHON_VERSION}" "${PROJECT_ROOT}/scripts/globus/generate_web_server_credentials.py" diff --git a/scripts/dependency_install_functions.sh b/scripts/dependency_install_functions.sh deleted file mode 100644 index 0d75e4c11..000000000 --- a/scripts/dependency_install_functions.sh +++ /dev/null @@ -1,727 +0,0 @@ -#!/bin/bash -SCRIPT=$(realpath "$BASH_SOURCE[0]") -SOURCE=$(dirname "$SCRIPT") -source "${SOURCE}/dependency_versions.sh" -PROJECT_ROOT=$(realpath "${SOURCE}/..") -source "${SOURCE}/utils.sh" - -# Ensures the shell returns the exit code of the first failed command in a pipeline -set -o pipefail - -sudo_command -# these are the dependencies to be installed by apt -export apt_file_path="${PROJECT_ROOT}/tmp/apt_deps" -export pip_file_path="${PROJECT_ROOT}/tmp/pip_deps" -# these are the dependencies to be installed and built via cmake -export ext_file_path="${PROJECT_ROOT}/tmp/ext_deps" - -if [ ! -d "${PROJECT_ROOT}/tmp" ]; then - mkdir -p "${PROJECT_ROOT}/tmp" -fi - -if [ ! -e "${PROJECT_ROOT}/config/datafed.sh" ]; then - echo "Please run generate_datafed.sh before installing dependencies" - exit 1 -fi - -source "${PROJECT_ROOT}/config/datafed.sh" - -if [ ! -e "$DATAFED_DEPENDENCIES_INSTALL_PATH" ] || [ ! -d "$DATAFED_DEPENDENCIES_INSTALL_PATH" ]; then - parent_dir=$(dirname "${DATAFED_DEPENDENCIES_INSTALL_PATH}") - if [ -w "${parent_dir}" ]; then - mkdir -p "$DATAFED_DEPENDENCIES_INSTALL_PATH" - else - echo "Sudo command $SUDO_CMD" - "$SUDO_CMD" mkdir -p "$DATAFED_DEPENDENCIES_INSTALL_PATH" - user=$(whoami) - "$SUDO_CMD" chown "$user" "$DATAFED_DEPENDENCIES_INSTALL_PATH" - fi -fi - -# NOTE - LD_LIBRARY_PATH must not be a variable for this to work. You cannot -# replace ! -v LD_LIBRARY_PATH with ! -v ${LD_LIBRARY_PATH} because this is -# checking if the variable even exists. -if [[ ! -v LD_LIBRARY_PATH ]]; then - LD_LIBRARY_PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH/lib" -else - if [[ -n "$LD_LIBRARY_PATH" ]]; then - LD_LIBRARY_PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH/lib:$LD_LIBRARY_PATH" - else - LD_LIBRARY_PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH/lib" - fi -fi - -# This if statement is to make sure PKG_CONFIG_PATH is defined for cmake, and -# that it contains the necessary paths from the datafed depedencies install path -# to compile other dependencies -if [[ ! -v PKG_CONFIG_PATH ]]; then - PKG_CONFIG_PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH/lib/pkgconfig" -else - if [[ -n "$PKG_CONFIG_PATH" ]]; then - PKG_CONFIG_PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH/lib/pkgconfig:$PKG_CONFIG_PATH" - else - PKG_CONFIG_PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH/lib/pkgconfig" - fi -fi - -# WARNING: overwriting PATH can be very dangerous -# In Docker builds this must follow the pattern: -# PATH=":$PATH" -# Curly braces around PATH, like ${PATH} may pull from the host's PATH -# Please see StackOverflow answer: https://stackoverflow.com/a/38742545 -if [[ ! -v PATH ]]; then - PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH/bin" -else - if [[ -n "$PATH" ]]; then - PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH/bin:$PATH" - else - PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH/bin" - fi -fi - -# Function to clean up multiple installation flag files with a given prefix -clean_install_flags() { - local install_path="${DATAFED_DEPENDENCIES_INSTALL_PATH}" - local prefix="$1" # The first argument is now the prefix - - # Validate that a prefix was provided - if [ -z "$prefix" ]; then - echo "Error: No prefix provided for clean_install_flags." >&2 - return 1 # Indicate an error - fi - - # Count files matching the pattern - local count=$(find "${install_path}" -maxdepth 1 -type f -name "${prefix}*" 2>/dev/null | wc -l) - - if [ "${count}" -gt 1 ]; then - echo "Warning: Found ${count} installation flag files with prefix '${prefix}'. Cleaning up..." - # Remove all files matching the pattern - find "${install_path}" -maxdepth 1 -type f -name "${prefix}*" -delete - echo "Removed all existing installation flag files with prefix '${prefix}'." - fi -} - -install_python() { - local original_dir=$(pwd) - - local PYTHON_FLAG_PREFIX=".python_installed-" - clean_install_flags "$PYTHON_FLAG_PREFIX" - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${PYTHON_FLAG_PREFIX}${DATAFED_PYTHON_VERSION}" ]; then - local original_dir=$(pwd) - - # Check if openssl is already installed, otherwise error since openssl is required - local OPENSSL_FLAG_PREFIX=".openssl_installed-" - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${OPENSSL_FLAG_PREFIX}${DATAFED_OPENSSL}" ]; then - echo "You must first install openssl before installing python" - exit 1 - fi - - cd "${PROJECT_ROOT}" - "$SUDO_CMD" apt update - "$SUDO_CMD" apt install -y build-essential libreadline-dev zlib1g-dev libffi-dev wget libsqlite3-dev - - wget "https://www.python.org/ftp/python/${DATAFED_PYTHON_VERSION_FULL}/Python-${DATAFED_PYTHON_VERSION_FULL}.tgz" - tar -xf "Python-${DATAFED_PYTHON_VERSION_FULL}.tgz" - cd "Python-${DATAFED_PYTHON_VERSION_FULL}" - - export CPPFLAGS="-I${DATAFED_DEPENDENCIES_INSTALL_PATH}/include $CPPFLAGS" - export LDFLAGS="-L${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib -Wl,-rpath,${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib $LDFLAGS" - ./configure --prefix="${DATAFED_PYTHON_DEPENDENCIES_DIR}" --with-openssl="${DATAFED_DEPENDENCIES_INSTALL_PATH}" --with-openssl-rpath=auto --enable-loadable-sqlite-extensions - make -j$(nproc) - make altinstall - - mkdir -p "${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin" - # Delete link if it exists - rm -rf "${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/python${DATAFED_PYTHON_VERSION}" - ln -s "${DATAFED_PYTHON_DEPENDENCIES_DIR}/bin/python${DATAFED_PYTHON_VERSION}" "${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/python${DATAFED_PYTHON_VERSION}" - export PYTHON="${DATAFED_PYTHON_DEPENDENCIES_DIR}/bin/python${DATAFED_PYTHON_VERSION}" - - touch "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${PYTHON_FLAG_PREFIX}${DATAFED_PYTHON_VERSION}" - cd "$original_dir" - else - echo "Python already installed, skipping..." - fi -} - -init_python() { - - if [[ ! -v DATAFED_PYTHON_DEPENDENCIES_DIR ]]; then - echo "DATAFED_PYTHON_DEPENDENCIES_DIR is not defined please make sure it is defined in the ${PROJECT_ROOT}/config/datafed.sh file." - exit 1 - else - if [[ -z "$DATAFED_PYTHON_DEPENDENCIES_DIR" ]]; then - echo "DATAFED_PYTHON_DEPENDENCIES_DIR is defined but is empty please make sure it is defined in ${PROJECT_ROOT}/config/datafed.sh file." - exit 1 - fi - fi - - if [ ! -e "$DATAFED_DEPENDENCIES_INSTALL_PATH" ] || [ ! -d "$DATAFED_PYTHON_DEPENDENCIES_DIR" ]; then - mkdir -p "$DATAFED_PYTHON_DEPENDENCIES_DIR" - fi - - "python${DATAFED_PYTHON_VERSION}" -m venv "${DATAFED_PYTHON_ENV}" - # Make sure that pip is installed and upgraded - "python${DATAFED_PYTHON_VERSION}" -m ensurepip --upgrade -} - -install_cmake() { - - local CMAKE_FLAG_PREFIX=".cmake_installed-" - clean_install_flags "$CMAKE_FLAG_PREFIX" - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${CMAKE_FLAG_PREFIX}${DATAFED_CMAKE_VERSION}" ]; then - # Version 3.20 of cmake and onwards starting using all lower case in the package names, previos versions use a - # a capital L in the name. - wget https://github.com/Kitware/CMake/releases/download/v${DATAFED_CMAKE_VERSION}/cmake-${DATAFED_CMAKE_VERSION}-linux-x86_64.tar.gz - tar -xzvf "cmake-${DATAFED_CMAKE_VERSION}-linux-x86_64.tar.gz" >/dev/null 2>&1 - cp -r "cmake-${DATAFED_CMAKE_VERSION}-linux-x86_64/bin" "${DATAFED_DEPENDENCIES_INSTALL_PATH}" - cp -r "cmake-${DATAFED_CMAKE_VERSION}-linux-x86_64/share" "${DATAFED_DEPENDENCIES_INSTALL_PATH}" - - # Cleanup - rm -rf "cmake-${DATAFED_CMAKE_VERSION}-linux-x86_64" - rm -rf "cmake-${DATAFED_CMAKE_VERSION}-linux-x86_64.tar.gz" - - # Mark cmake as installed - touch "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${CMAKE_FLAG_PREFIX}${DATAFED_CMAKE_VERSION}" - fi - # WARNING: overwriting PATH can be very dangerous - # In Docker builds this must follow the pattern: - # PATH=":$PATH" - # Curly braces around PATH, like ${PATH} may pull from the host's PATH - # Please see StackOverflow answer: https://stackoverflow.com/a/38742545 - export PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin:$PATH" -} - -install_protobuf() { - local PROTOBUF_FLAG_PREFIX=".protobuf_installed-" - clean_install_flags "$PROTOBUF_FLAG_PREFIX" - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${PROTOBUF_FLAG_PREFIX}${DATAFED_PROTOBUF_VERSION}" ]; then - local original_dir=$(pwd) - cd "${PROJECT_ROOT}" - if [ -d "${PROJECT_ROOT}/external/protobuf" ]; then - # sudo required because of egg file - "$SUDO_CMD" rm -rf "${PROJECT_ROOT}/external/protobuf" - fi - # Here we are using clone instead of submodule update, because submodule - # requires the .git folder exist and the current folder be considered a repo - # this creates problems in docker because each time a commit is made the - # .git folder contents are changed causing a fresh rebuild of all containers - git clone "https://github.com/protocolbuffers/protobuf.git" \ - "${PROJECT_ROOT}/external/protobuf" - - cd "${PROJECT_ROOT}/external/protobuf" - git checkout "v${DATAFED_PROTOBUF_VERSION}" - git submodule update --init --recursive - # Build static library, cannot build shared library at same time apparently - # there cannot be a shared libsodium file in the - # DATAFED_DEPENDENCIES_INSTALL_PREFIX if you want to have everything static - # libzmq picks up any shared file regardless of whether you have told it to - # only use static libraries or not. - # NOTE - static libraries must be built first - cmake -S . -B build \ - -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ - -DBUILD_SHARED_LIBS=OFF \ - -Dprotobuf_BUILD_TESTS=OFF \ - -DABSL_PROPAGATE_CXX_STD=ON \ - -DCMAKE_INSTALL_PREFIX="${DATAFED_DEPENDENCIES_INSTALL_PATH}" - cmake --build build -j 8 - if [ -w "${DATAFED_DEPENDENCIES_INSTALL_PATH}" ]; then - cmake --build build --target install - else - "$SUDO_CMD" cmake --build build --target install - fi - # Build Shared library - # Don't build shared, it messes up the static library linking because the - # cmake file installed are not compatible - # WARNING - static library will break if build with shared options on - - cd python - init_python - source "${DATAFED_PYTHON_ENV}/bin/activate" - LD_LIBRARY_PATH="$LD_LIBRARY_PATH" PATH="$PATH" python${DATAFED_PYTHON_VERSION} -m pip install numpy tzdata - LD_LIBRARY_PATH="$LD_LIBRARY_PATH" PATH="$PATH" python${DATAFED_PYTHON_VERSION} setup.py build - LD_LIBRARY_PATH="$LD_LIBRARY_PATH" PATH="$PATH" python${DATAFED_PYTHON_VERSION} setup.py test - # Because we have activaited a venv we don't want to use the --user flag - # with the install command - LD_LIBRARY_PATH="$LD_LIBRARY_PATH" PATH="$PATH" "python${DATAFED_PYTHON_VERSION}" setup.py install - cd ../ - # Cleanup build file with root ownership - if [ -f build/install_manifest.txt ]; then - "$SUDO_CMD" rm build/install_manifest.txt - fi - cd "${PROJECT_ROOT}" - - # Mark protobuf as installed - touch "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${PROTOBUF_FLAG_PREFIX}${DATAFED_PROTOBUF_VERSION}" - cd "$original_dir" - fi -} - -install_libsodium() { - local LIBSODIUM_FLAG_PREFIX=".libsodium_installed-" - clean_install_flags "$LIBSODIUM_FLAG_PREFIX" - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${LIBSODIUM_FLAG_PREFIX}${DATAFED_LIBSODIUM_VERSION}" ]; then - local original_dir=$(pwd) - if [ -d "${PROJECT_ROOT}/external/libsodium" ]; then - # sudo required because of egg file - "$SUDO_CMD" rm -rf "${PROJECT_ROOT}/external/libsodium" - fi - # Official documentation for libsodium indicates this is the preferred way to build libsodium. - # Using the git repo directly results in build instability because of additional network calls when running - # autogen.sh. - wget "https://download.libsodium.org/libsodium/releases/libsodium-${DATAFED_LIBSODIUM_VERSION}.tar.gz" -P "${PROJECT_ROOT}/external" - tar -xvzf "${PROJECT_ROOT}/external/libsodium-${DATAFED_LIBSODIUM_VERSION}.tar.gz" -C "${PROJECT_ROOT}/external/" - cd "${PROJECT_ROOT}/external/libsodium-${DATAFED_LIBSODIUM_VERSION}" - # Build static ONLY!!!! - # Note if zmq detects a shared sodium library it will grab it no matter what - # --enable-shared=no must be set here - SODIUM_STATIC=1 ./configure --enable-static=yes --enable-shared=no --with-pic=yes --prefix="${DATAFED_DEPENDENCIES_INSTALL_PATH}" - make -j 8 - make check - if [ -w "${DATAFED_DEPENDENCIES_INSTALL_PATH}" ]; then - make install - else - "$SUDO_CMD" make install - fi - - # Mark libsodium as installed - touch "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${LIBSODIUM_FLAG_PREFIX}${DATAFED_LIBSODIUM_VERSION}" - cd "$original_dir" - fi -} - -install_libzmq() { - local LIBZMQ_FLAG_PREFIX=".libzmq_installed-" - clean_install_flags "$LIBZMQ_FLAG_PREFIX" - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${LIBZMQ_FLAG_PREFIX}${DATAFED_LIBZMQ_VERSION}" ]; then - local original_dir=$(pwd) - if [ -d "${PROJECT_ROOT}/external/libzmq" ]; then - "$SUDO_CMD" rm -rf "${PROJECT_ROOT}/external/libzmq" - fi - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/.libsodium_installed-${DATAFED_LIBSODIUM_VERSION}" ]; then - echo "You must first install libsodium before installing libzmq" - exit 1 - fi - # Here we are using clone instead of submodule update, because submodule - # requires the .git folder exist and the current folder be considered a repo - # this creates problems in docker because each time a commit is made the - # .git folder contents are changed causing a fresh rebuild of all containers - git clone https://github.com/zeromq/libzmq.git "${PROJECT_ROOT}/external/libzmq" - cd "${PROJECT_ROOT}/external/libzmq" - git checkout "v${DATAFED_LIBZMQ_VERSION}" - # Build static only - cmake -S. -B build \ - -DBUILD_STATIC=ON \ - -DBUILD_SHARED_LIBS=OFF \ - -DBUILD_SHARED=OFF \ - -DWITH_LIBSODIUM_STATIC=ON \ - -DBUILD_TESTS=OFF \ - -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ - -DCMAKE_PREFIX_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}/lib" \ - -DCMAKE_INSTALL_PREFIX="${DATAFED_DEPENDENCIES_INSTALL_PATH}" - cmake --build build -j 8 - if [ -w "${DATAFED_DEPENDENCIES_INSTALL_PATH}" ]; then - cmake --build build --target install - else - "$SUDO_CMD" cmake --build build --target install - fi - - if [ -d "${PROJECT_ROOT}/external/cppzmq" ]; then - # sudo required because of egg file - "$SUDO_CMD" rm -rf "${PROJECT_ROOT}/external/cppzmq" - fi - git clone https://github.com/zeromq/cppzmq.git "${PROJECT_ROOT}/external/cppzmq" - cd "${PROJECT_ROOT}/external/cppzmq" - git checkout v"${DATAFED_LIB_ZMQCPP_VERSION}" - # Will will not build the unit tests because there are not enough controls - # to link to the correct static library. - # NOTE - static libraries must be built first - cmake -S. -B build \ - -DBUILD_SHARED_LIBS=OFF \ - -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ - -DCPPZMQ_BUILD_TESTS=OFF \ - -DCMAKE_INSTALL_PREFIX="${DATAFED_DEPENDENCIES_INSTALL_PATH}" - cmake --build build -j 8 - if [ -w "${DATAFED_DEPENDENCIES_INSTALL_PATH}" ]; then - cmake --build build --target install - else - "$SUDO_CMD" cmake --build build --target install - fi - - cd "$original_dir" - # Mark libzmq as installed - touch "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${LIBZMQ_FLAG_PREFIX}${DATAFED_LIBZMQ_VERSION}" - fi -} - -install_nlohmann_json() { - local NLOHMANN_FLAG_PREFIX=".nlohmann_json_installed-" - clean_install_flags "$NLOHMANN_FLAG_PREFIX" - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${NLOHMANN_FLAG_PREFIX}${DATAFED_NLOHMANN_JSON_VERSION}" ]; then - local original_dir=$(pwd) - if [ -d "${PROJECT_ROOT}/external/json" ]; then - "$SUDO_CMD" rm -rf "${PROJECT_ROOT}/external/json" - fi - git clone https://github.com/nlohmann/json.git "${PROJECT_ROOT}/external/json" - cd "${PROJECT_ROOT}/external/json" - git checkout v${DATAFED_NLOHMANN_JSON_VERSION} - echo "FILE STRUCTURE $(ls)" - # Build static - cmake -S . -B build \ - -DBUILD_SHARED_LIBS=OFF \ - -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ - -DCMAKE_INSTALL_PREFIX="${DATAFED_DEPENDENCIES_INSTALL_PATH}" - cmake --build build -j 8 - if [ -w "${DATAFED_DEPENDENCIES_INSTALL_PATH}" ]; then - cmake --build build --target install - else - "$SUDO_CMD" cmake --build build --target install - fi - # Build shared - cmake -S . -B build \ - -DBUILD_SHARED_LIBS=ON \ - -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ - -DCMAKE_INSTALL_PREFIX="${DATAFED_DEPENDENCIES_INSTALL_PATH}" - cmake --build build -j 8 - if [ -w "${DATAFED_DEPENDENCIES_INSTALL_PATH}" ]; then - cmake --build build --target install - else - "$SUDO_CMD" cmake --build build --target install - fi - - # Mark nlohmann_json as installed - touch "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${NLOHMANN_FLAG_PREFIX}${DATAFED_NLOHMANN_JSON_VERSION}" - cd "$original_dir" - fi -} - -install_json_schema_validator() { - local NLOHMANN_SCHEMA_FLAG_PREFIX=".nlohmann_schema_validator_installed-" - clean_install_flags "$NLOHMANN_SCHEMA_FLAG_PREFIX" - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${NLOHMANN_SCHEMA_FLAG_PREFIX}${DATAFED_JSON_SCHEMA_VALIDATOR_VERSION}" ]; then - local original_dir=$(pwd) - if [ -d "${PROJECT_ROOT}/external/json-schema-validator" ]; then - "$SUDO_CMD" rm -rf "${PROJECT_ROOT}/external/json-schema-validator" - fi - git clone https://github.com/pboettch/json-schema-validator "${PROJECT_ROOT}/external/json-schema-validator" - cd "${PROJECT_ROOT}/external/json-schema-validator" - git checkout ${DATAFED_JSON_SCHEMA_VALIDATOR_VERSION} - # Build static - cmake -S . -B build \ - -DBUILD_SHARED_LIBS=OFF \ - -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ - -DCMAKE_INSTALL_PREFIX="${DATAFED_DEPENDENCIES_INSTALL_PATH}" - cmake --build build -j 8 - if [ -w "${DATAFED_DEPENDENCIES_INSTALL_PATH}" ]; then - cmake --build build --target install - else - "$SUDO_CMD" cmake --build build --target install - fi - # WARNING building shared library will overwrite cmake file for static - # library, does not appear to support both targets at the same time, similar - # to protobuf - # Mark json-schema-validator as installed - touch "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${NLOHMANN_SCHEMA_FLAG_PREFIX}${DATAFED_JSON_SCHEMA_VALIDATOR_VERSION}" - cd "$original_dir" - fi -} - -install_gcs() { - local GCS_FLAG_PREFIX=".gcs_installed-" - clean_install_flags "$GCS_FLAG_PREFIX" - if [ ! -e "${GCS_FLAG_PREFIX}${DATAFED_GLOBUS_VERSION}" ]; then - "$SUDO_CMD" apt update - "$SUDO_CMD" apt install -y curl git gnupg - curl -LOs \ - "https://downloads.globus.org/globus-connect-server/stable/installers/repo/deb/globus-repo_${DATAFED_GLOBUS_VERSION}_all.deb" - "$SUDO_CMD" dpkg -i "globus-repo_${DATAFED_GLOBUS_VERSION}_all.deb" - "$SUDO_CMD" apt-key add /usr/share/globus-repo/RPM-GPG-KEY-Globus - # Need a second update command after adding the globus GPG key - "$SUDO_CMD" apt update - "$SUDO_CMD" apt-get install globus-connect-server54 -y - - # Mark gcs as installed - touch "${GCS_FLAG_PREFIX}${DATAFED_GLOBUS_VERSION}" - fi -} - -install_nvm() { - local NVM_FLAG_PREFIX=".nvm_installed-" - clean_install_flags "$NVM_FLAG_PREFIX" - # By default this will place NVM in $HOME/.nvm - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${NVM_FLAG_PREFIX}${DATAFED_NVM_VERSION}" ]; then - # By setting NVM_DIR beforehand when the scirpt is run it - # will use it to set the install path - export NVM_DIR="${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm" - mkdir -p "${NVM_DIR}" - # --fail makes curl return a non-zero exit code for HTTP errors like 404 or 500. - curl --fail -o- "https://raw.githubusercontent.com/nvm-sh/nvm/${DATAFED_NVM_VERSION}/install.sh" | bash - # Mark nvm as installed - touch "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${NVM_FLAG_PREFIX}${DATAFED_NVM_VERSION}" - else - export NVM_DIR="${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm" - fi -} - -install_ws_node_packages() { - - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/.nvm_installed-${DATAFED_NVM_VERSION}" ]; then - echo "You must first install nvm before installing ws node packages." - exit 1 - fi - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/.node_installed-${DATAFED_NODE_VERSION}" ]; then - echo "You must first install node before installing ws node packages" - exit 1 - fi - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/.cmake_installed-${DATAFED_CMAKE_VERSION}" ]; then - echo "You must first install cmake before installing ws node packages" - exit 1 - fi - - # Configure the package.json.in file -> package.json - cmake -P "${PROJECT_ROOT}/cmake/Web.cmake" - export NVM_DIR="${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm" - export NODE_VERSION="$DATAFED_NODE_VERSION" - "$NVM_DIR/nvm-exec" npm --prefix "${PROJECT_ROOT}/web" install "${PROJECT_ROOT}/web" -} - -install_node() { - local NODE_FLAG_PREFIX=".node_installed-" - clean_install_flags "$NODE_FLAG_PREFIX" - # By default this will place NVM in $HOME/.nvm - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${NODE_FLAG_PREFIX}${DATAFED_NODE_VERSION}" ]; then - local original_dir=$(pwd) - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/.nvm_installed-${DATAFED_NVM_VERSION}" ]; then - echo "You must first install nvm before installing node." - exit 1 - fi - - export NVM_DIR="${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm" - - [ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm - nvm install "$DATAFED_NODE_VERSION" - nvm use "$DATAFED_NODE_VERSION" - # Mark node as installed - touch "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${NODE_FLAG_PREFIX}${DATAFED_NODE_VERSION}" - cd "$original_dir" - else - export NVM_DIR="${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm" - # Used by nvm - export NODE_VERSION="$DATAFED_NODE_VERSION" - [ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm - nvm use "$DATAFED_NODE_VERSION" - fi - echo "NODE VERSION USED/INSTALLED $DATAFED_NODE_VERSION" -} - -install_foxx_cli() { - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/.nvm_installed-${DATAFED_NVM_VERSION}" ]; then - echo "You must first install nvm before installing foxx_cli." - exit 1 - fi - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/.node_installed-${DATAFED_NODE_VERSION}" ]; then - echo "You must first install node before installing foxx_cli" - exit 1 - fi - local FOXX_FLAG_PREFIX=".foxx_cli_installed-" - clean_install_flags "$FOXX_FLAG_PREFIX" - # By default this will place NVM in $HOME/.nvm - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${FOXX_FLAG_PREFIX}" ]; then - local original_dir=$(pwd) - export NVM_DIR="${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm" - [ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm - export NODE_VERSION="$DATAFED_NODE_VERSION" - "$NVM_DIR/nvm-exec" npm install --global foxx-cli --prefix "${DATAFED_DEPENDENCIES_INSTALL_PATH}/npm" - # Mark foxx_cli as installed - touch "${DATAFED_DEPENDENCIES_INSTALL_PATH}/${FOXX_FLAG_PREFIX}" - cd "$original_dir" - else - export NVM_DIR="${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm" - [ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm - export NODE_VERSION="$DATAFED_NODE_VERSION" - - # check that foxx can be found - if [ ! -d "${DATAFED_DEPENDENCIES_INSTALL_PATH}/npm" ]; then - echo "Something went wrong Foxx is supposed to be installed i.e. " - echo "(${DATAFED_DEPENDENCIES_INSTALL_PATH}/.foxx_cli_installed) " - echo "exists. But there is no npm folder in: ${DATAFED_DEPENDENCIES_INSTALL_PATH}" - exit 1 - fi - if [ ! -e "${DATAFED_DEPENDENCIES_INSTALL_PATH}/npm/bin/foxx" ]; then - echo "Something went wrong Foxx is supposed to be installed i.e. " - echo "(${DATAFED_DEPENDENCIES_INSTALL_PATH}/.foxx_cli_installed) " - echo "exists. But there is no foxx binary here: ${DATAFED_DEPENDENCIES_INSTALL_PATH}/npm/bin/foxx" - exit 1 - fi - fi -} - -install_arangodb() { - curl -OL https://download.arangodb.com/arangodb312/DEBIAN/Release.key - "$SUDO_CMD" apt-key add - >"$apt_file_path" - echo -n "${pip_packages[@]} " >>"$pip_file_path" - echo -n "${externals[@]} " >>"$ext_file_path" - local_UNIFY=true - ;; - *) - echo "Invalid Argument" - ;; - esac -fi - -if [[ $local_UNIFY = false ]]; then - sudo_command - "$SUDO_CMD" apt-get update - "$SUDO_CMD" dpkg --configure -a - "$SUDO_CMD" apt-get install -y "${packages[@]}" - - for ext in "${externals[@]}"; do - install_dep_by_name "$ext" - done - - init_python - source "${DATAFED_PYTHON_ENV}/bin/activate" - "python${DATAFED_PYTHON_VERSION}" -m pip install "${pip_packages[@]}" -fi diff --git a/scripts/install_client_dependencies.sh b/scripts/install_client_dependencies.sh deleted file mode 100755 index aa3fdc033..000000000 --- a/scripts/install_client_dependencies.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Exit on error -set -e - -SCRIPT=$(realpath "$0") -SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath "${SOURCE}/..") - -source "${PROJECT_ROOT}/scripts/utils.sh" -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" - -packages=("pkg-config") -pip_packages=("setuptools") -externals=("cmake" "libopenssl" "python" "protobuf") - -sudo_command -# This script will install all of the dependencies needed by DataFed 1.0 -"$SUDO_CMD" apt-get update -"$SUDO_CMD" dpkg --configure -a -"$SUDO_CMD" apt-get install -y "${packages[@]}" - -for ext in "${externals[@]}"; do - install_dep_by_name "$ext" -done - -init_python -source "${DATAFED_PYTHON_ENV}/bin/activate" -"python${DATAFED_PYTHON_VERSION}" -m pip install "${pip_packages[@]}" -"python${DATAFED_PYTHON_VERSION}" -m pip install -r "${PROJECT_ROOT}/python/datafed_pkg/requirements.txt" - -cd ~ diff --git a/scripts/install_core_dependencies.sh b/scripts/install_core_dependencies.sh deleted file mode 100755 index 039123b02..000000000 --- a/scripts/install_core_dependencies.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -# Exit on error -set -e - -SCRIPT=$(realpath "$0") -SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath ${SOURCE}/..) - -source "${PROJECT_ROOT}/scripts/utils.sh" -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" - -packages=("libtool" "build-essential" "g++" "gcc" "make" "libboost-all-dev" "libboost-program-options-dev" "pkg-config" "autoconf" "automake" "unzip" "wget" "rapidjson-dev" "libkrb5-dev" "git") -pip_packages=("setuptools") -# NOTE the order matters here -externals=("cmake" "libopenssl" "python" "nlohmann_json" "json_schema_validator" "protobuf" - "libsodium" "libzmq" "zlib" "libcurl") - -local_UNIFY=false - -if [ $# -eq 1 ]; then - case "$1" in - -h | --help) - # If -h or --help is provided, print help - echo "Usage: $0 [-h|--help] [unify]" - ;; - unify) - # If 'unify' is provided, print the packages - # The extra space is necessary to not conflict with the other install scripts - echo -n "${packages[@]} " >>"$apt_file_path" - echo -n "${externals[@]} " >>"$ext_file_path" - echo -n "${pip_packages[@]} " >>"$pip_file_path" - local_UNIFY=true - ;; - *) - echo "Invalid Argument" - ;; - esac -fi - -if [[ $local_UNIFY = false ]]; then - sudo_command - "$SUDO_CMD" apt-get update - "$SUDO_CMD" dpkg --configure -a - "$SUDO_CMD" apt-get install -y "${packages[@]}" - - for ext in "${externals[@]}"; do - install_dep_by_name "$ext" - done - - init_python - source "${DATAFED_PYTHON_ENV}/bin/activate" - "python${DATAFED_PYTHON_VERSION}" -m pip install "${pip_packages[@]}" -fi diff --git a/scripts/install_dependencies.sh b/scripts/install_dependencies.sh deleted file mode 100755 index 7c2fbf511..000000000 --- a/scripts/install_dependencies.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash - -# Exit on error -set -e - -SCRIPT=$(realpath "$0") -SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath ${SOURCE}/..) - -source "${PROJECT_ROOT}/scripts/utils.sh" -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" -source "${SOURCE}/dependency_versions.sh" - -Help() { - echo $(basename "$0")" Will install all datafed dependencies" - echo - echo "Syntax: "$(basename "$0")" [-h|a|w|c|r]" - echo "options:" - echo "-h, --help Print this help message" - echo "-a, --disable-arango-deps-install Don't install arango" - echo "-w, --disable-web-deps-install Don't install web deps" - echo "-c, --disable-core-deps-install Don't install core deps" - echo "-r, --disable-repo-deps-install Don't install repo deps" - echo "-z, --disable-authz-deps-install Don't install authz deps" -} - -local_INSTALL_ARANGO="TRUE" -local_INSTALL_WEB="TRUE" -local_INSTALL_CORE="TRUE" -local_INSTALL_REPO="TRUE" -local_INSTALL_AUTHZ="TRUE" - -VALID_ARGS=$(getopt -o hawcrz --long 'help',disable-arango-deps-install,disable-web-deps-install,disable-core-deps-install,disable-repo-debs-install,disable-authz-deps-install -- "$@") -if [[ $? -ne 0 ]]; then - exit 1 -fi -eval set -- "$VALID_ARGS" -while [ : ]; do - case "$1" in - -h | --help) - Help - exit 0 - ;; - -a | --disable-arango-deps-install) - local_INSTALL_ARANGO="FALSE" - shift - ;; - -w | --disable-web-deps-install) - local_INSTALL_WEB="FALSE" - shift - ;; - -c | --disable-core-deps-install) - local_INSTALL_CORE="FALSE" - shift - ;; - -r | --disable-repo-deps-install) - local_INSTALL_REPO="FALSE" - shift - ;; - -z | --disable-authz-deps-install) - local_INSTALL_AUTHZ="FALSE" - shift - ;; - --) - shift - break - ;; - \?) # incorrect option - echo "Error: Invalid option" - exit - ;; - esac -done - -sudo_command - -touch "$apt_file_path" -touch "$ext_file_path" -touch "$pip_file_path" - -# Defines SUDO_CMD which is empty if root -# sudo path if exists -# throws error otherwise - -"$SUDO_CMD" apt-get update -"$SUDO_CMD" apt install -y wget git curl - -# This script will install all of the dependencies needed by DataFed 1.0 -"$SUDO_CMD" dpkg --configure -a - -if [ "$local_INSTALL_CORE" == "TRUE" ]; then - "$SUDO_CMD" "$SOURCE/install_core_dependencies.sh" unify -fi -if [ "$local_INSTALL_REPO" == "TRUE" ]; then - "$SUDO_CMD" "$SOURCE/install_repo_dependencies.sh" unify -fi -if [ "$local_INSTALL_WEB" == "TRUE" ]; then - "$SUDO_CMD" "$SOURCE/install_ws_dependencies.sh" unify -fi -if [ "$local_INSTALL_AUTHZ" == "TRUE" ]; then - "$SUDO_CMD" "$SOURCE/install_authz_dependencies.sh" unify -fi -"$SUDO_CMD" "$SOURCE/install_docs_dependencies.sh" unify - -all_packages=$(cat "$apt_file_path") -IFS=' ' read -r -a all_packages_array <<<"$all_packages" -deduplicated_packages_array=($(printf "%s\n" "${all_packages_array[@]}" | sort -u)) -echo "DEPENDENCIES (${deduplicated_packages_array[@]})" -"$SUDO_CMD" apt-get install -y "${deduplicated_packages_array[@]}" - -all_pip_packages=$(cat "$pip_file_path") -IFS=' ' read -ra all_pip_packages_array <<<"$all_pip_packages" -if [ ${#all_pip_packages_array[@]} -gt 0 ]; then - echo "DEPENDENCIES (${all_pip_packages_array[@]})" - init_python - source "${DATAFED_PYTHON_ENV}/bin/activate" - "python${DATAFED_PYTHON_VERSION}" -m pip install "${all_pip_packages_array[@]}" -fi - -all_externals=$(cat "$ext_file_path") -IFS=' ' read -r -a all_externals_array <<<"$all_externals" -# Deduplication must preserve order -deduplicated_externals_array=($(echo "${all_externals_array[@]}" | awk '{ for (i=1;i<=NF;i++) if (!seen[$i]++) printf("%s ", $i) }')) -echo "DEPENDENCIES (${deduplicated_externals_array[@]})" -for ext in "${deduplicated_externals_array[@]}"; do - echo "===== INSTALLING $ext ======" - install_dep_by_name "$ext" -done - -rm "$apt_file_path" -rm "$ext_file_path" -rm "$pip_file_path" - -if [ "$local_INSTALL_ARANGO" == "TRUE" ]; then - install_arangodb -fi diff --git a/scripts/install_docker_dependencies.sh b/scripts/install_docker_dependencies.sh deleted file mode 100755 index c8713d7d2..000000000 --- a/scripts/install_docker_dependencies.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -# Exit on error -set -e - -# This script will install all of the dependencies needed by DataFed 1.0 -sudo apt-get update -sudo dpkg --configure -a - -sudo apt-get install \ - ca-certificates \ - curl \ - gnupg \ - lsb-release - -sudo mkdir -p /etc/apt/keyrings -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg - -echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list >/dev/null - -sudo apt-get update -sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin diff --git a/scripts/install_docs_dependencies.sh b/scripts/install_docs_dependencies.sh deleted file mode 100755 index ad27d83ce..000000000 --- a/scripts/install_docs_dependencies.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -# Exit on error -set -e - -SCRIPT=$(realpath "$0") -SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath ${SOURCE}/..) - -source "${PROJECT_ROOT}/scripts/utils.sh" -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" - -packages=("g++" "gcc" "make" "pkg-config") -pip_packages=("setuptools" "sphinx" "sphinx-rtd-theme" "sphinx-autoapi") -externals=("cmake" "libopenssl" "python" "protobuf") - -local_UNIFY=false - -if [ $# -eq 1 ]; then - case "$1" in - -h | --help) - # If -h or --help is provided, print help - echo "Usage: $0 [-h|--help] [unify]" - ;; - unify) - # If 'unify' is provided, print the packages - # The extra space is necessary to not conflict with the other install scripts - echo -n "${packages[@]} " >>"$apt_file_path" - echo -n "${pip_packages[@]} " >>"$pip_file_path" - echo -n "${externals[@]} " >>"$ext_file_path" - local_UNIFY=true - ;; - *) - echo "Invalid Argument" - ;; - esac -fi - -if [[ $local_UNIFY = false ]]; then - sudo_command - "$SUDO_CMD" apt-get update - "$SUDO_CMD" dpkg --configure -a - "$SUDO_CMD" apt-get install -y "${packages[@]}" - - for ext in "${externals[@]}"; do - install_dep_by_name "$ext" - done - - init_python - source "${DATAFED_PYTHON_ENV}/bin/activate" - "python${DATAFED_PYTHON_VERSION}" -m pip install --upgrade pip - "python${DATAFED_PYTHON_VERSION}" -m pip install "${pip_packages[@]}" -fi diff --git a/scripts/install_end_to_end_test_dependencies.sh b/scripts/install_end_to_end_test_dependencies.sh deleted file mode 100755 index ff4d8f661..000000000 --- a/scripts/install_end_to_end_test_dependencies.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -# Exit on error -set -e - -SCRIPT=$(realpath "$0") -SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath ${SOURCE}/..) - -source "${PROJECT_ROOT}/scripts/utils.sh" -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" - -packages=("libtool" "build-essential" "g++" "gcc" "make" "libboost-all-dev" "pkg-config" "autoconf" "automake" "unzip" "libcurl4-openssl-dev" "wget" - "rapidjson-dev" "libkrb5-dev" "git" "libssl-dev") - -pip_packages=("setuptools") -# NOTE the order matters here -externals=("cmake" "libopenssl" "python" "protobuf" "nvm" "node" "foxx") - -local_UNIFY=false - -if [ $# -eq 1 ]; then - case "$1" in - -h | --help) - # If -h or --help is provided, print help - echo "Usage: $0 [-h|--help] [unify]" - ;; - unify) - # If 'unify' is provided, print the packages - # The extra space is necessary to not conflict with the other install scripts - echo -n "${packages[@]} " >>"$apt_file_path" - echo -n "${externals[@]} " >>"$ext_file_path" - echo -n "${pip_packages[@]} " >>"$pip_file_path" - local_UNIFY=true - ;; - *) - echo "Invalid Argument" - ;; - esac -fi - -if [[ $local_UNIFY = false ]]; then - sudo_command - "$SUDO_CMD" apt-get update - "$SUDO_CMD" dpkg --configure -a - "$SUDO_CMD" apt-get install -y "${packages[@]}" - - for ext in "${externals[@]}"; do - install_dep_by_name "$ext" - done - - init_python - source "${DATAFED_PYTHON_ENV}/bin/activate" - "python${DATAFED_PYTHON_VERSION}" -m pip install --upgrade pip - "python${DATAFED_PYTHON_VERSION}" -m pip install "${pip_packages[@]}" -fi diff --git a/scripts/install_foxx.sh b/scripts/install_foxx.sh index 92064a0be..45c072abe 100755 --- a/scripts/install_foxx.sh +++ b/scripts/install_foxx.sh @@ -15,10 +15,10 @@ set -ef -o pipefail SCRIPT=$(realpath "$0") SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath ${SOURCE}/..) -source "${PROJECT_ROOT}/config/datafed.sh" -source "${SOURCE}/dependency_versions.sh" -source "${SOURCE}/dependency_install_functions.sh" +DATAFED_PROJECT_ROOT=$(realpath ${SOURCE}/..) +source "${DATAFED_PROJECT_ROOT}/config/datafed.sh" +source "${DATAFED_PROJECT_ROOT}/external/DataFedDependencies/scripts/dependency_versions.sh" +source "${DATAFED_PROJECT_ROOT}/external/DataFedDependencies/scripts/dependency_install_functions.sh" Help() { echo "$(basename $0) Will set up a configuration file for the core server" @@ -37,7 +37,6 @@ Help() { echo " database, the env variable: " echo " DATAFED_DATABASE_HOST can also be " echo " used." - echo "-y, --system-secret ZeroMQ system secret" echo echo "NOTE: Do not run this script with sudo!" echo @@ -49,16 +48,32 @@ Help() { local_DATABASE_API_SCHEME="${DATABASE_API_SCHEME:-http}" local_SSL_CERT_FILE="${SSL_CERT_FILE:-}" local_ARANGOSH_SERVER_ENDPOINT_SCHEME="tcp" +local_CURL_SSL_ARG="" +NODE_EXTRA_CA_CERTS="" +if [[ ! -z "${local_SSL_CERT_FILE}" ]]; then + if [ -f "${local_SSL_CERT_FILE}" ]; then + echo "" + echo "INFO - Found ssl certificates for arango! Building with https." + echo "${local_SSL_CERT_FILE}" + echo "" + # Only run if defined and not empty, API_SCHEME must be https in this case + local_DATABASE_API_SCHEME="https" + local_CURL_SSL_ARG="--cacert $local_SSL_CERT_FILE" + # So foxx will trust the certificate + export NODE_EXTRA_CA_CERTS="$local_SSL_CERT_FILE" + else + echo "" + echo "WARNING - SSL_CERT_FILE is defined but the file does not exist! Assuming," + echo " communication with the database will be over http not https." + echo "$local_SSL_CERT_FILE" + echo "" + fi +fi local_DATABASE_NAME="sdms" local_DATABASE_USER="root" local_DATABASE_PORT="8529" -if [ -f "${local_SSL_CERT_FILE}" ]; then - ssl_args="--ssl.cafile ${local_SSL_CERT_FILE}" - export NODE_EXTRA_CA_CERTS="${local_SSL_CERT_FILE}" -fi - if [ -z "${DATAFED_DATABASE_PASSWORD}" ]; then local_DATAFED_DATABASE_PASSWORD="" else @@ -66,7 +81,7 @@ else fi if [ -z "${FOXX_MAJOR_API_VERSION}" ]; then - local_FOXX_MAJOR_API_VERSION=$(cat ${PROJECT_ROOT}/cmake/Version.cmake | grep -o -P "(?<=FOXX_API_MAJOR).*(?=\))" | xargs) + local_FOXX_MAJOR_API_VERSION=$(cat ${DATAFED_PROJECT_ROOT}/cmake/Version.cmake | grep -o -P "(?<=FOXX_API_MAJOR).*(?=\))" | xargs) else local_FOXX_MAJOR_API_VERSION=$(printenv FOXX_MAJOR_API_VERSION) fi @@ -77,7 +92,7 @@ else local_DATAFED_DATABASE_HOST=$(printenv DATAFED_DATABASE_HOST) fi -VALID_ARGS=$(getopt -o hu:p:f:i:y: --long 'help',database-user:,database-password:,foxx-api-major-version:,database-host:,zeromq-system-secret: -- "$@") +VALID_ARGS=$(getopt -o hu:p:f:i: --long 'help',database-user:,database-password:,foxx-api-major-version:,database-host: -- "$@") if [[ $? -ne 0 ]]; then exit 1 fi @@ -89,22 +104,18 @@ while [ : ]; do exit 0 ;; -u | --database-user) - echo "Processing 'Database user' option. Input argument is '$2'" local_DATABASE_USER=$2 shift 2 ;; -p | --database-password) - echo "Processing 'Database password' option. Input argument is '$2'" local_DATAFED_DATABASE_PASSWORD=$2 shift 2 ;; -f | --foxx-api-major-version) - echo "Processing 'Foxx major api version' option. Input argument is '$2'" local_FOXX_MAJOR_API_VERSION=$2 shift 2 ;; -i | --database-host) - echo "Processing 'database host' option. Input argument is '$2'" local_DATAFED_DATABASE_HOST=$2 shift 2 ;; @@ -113,7 +124,7 @@ while [ : ]; do break ;; \?) # incorrect option - echo "Error: Invalid option" + echo "ERROR - Invalid option" exit ;; esac @@ -121,9 +132,9 @@ done ERROR_DETECTED=0 if [ -z "$local_DATAFED_DATABASE_PASSWORD" ]; then - echo "Error DATAFED_DATABASE_PASSWORD is not defined, this is a required argument" - echo " This variable can be set using the command line option -p, --database-password" - echo " or with the environment variable DATAFED_DATABASE_PASSWORD." + echo "ERROR - DATAFED_DATABASE_PASSWORD is not defined, this is a required argument" + echo " This variable can be set using the command line option -p, --database-password" + echo " or with the environment variable DATAFED_DATABASE_PASSWORD." ERROR_DETECTED=1 fi @@ -135,12 +146,12 @@ basic_auth="$local_DATABASE_USER:$local_DATAFED_DATABASE_PASSWORD" if [ "${local_DATABASE_API_SCHEME}" == "https" ]; then set +e - output=$(curl --user "$basic_auth" ${local_DATABASE_API_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT} 2>&1) + output=$(LD_LIBRARY_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}:$LD_LIBRARY_PATH" curl ${local_CURL_SSL_ARG} --user "$basic_auth" ${local_DATABASE_API_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT} 2>&1) error_code="$?" set -e if [ "$error_code" == "60" ]; then - echo "Error detected, untrusted certificate." + echo "ERROR - Untrusted certificate detected of address ${local_DATABASE_API_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT}" echo "$output" exit 1 fi @@ -150,47 +161,67 @@ fi url="${local_DATABASE_API_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT}/_api/database/user" # Do not output to /dev/null we need the output -code=$(LD_LIBRARY_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}:$LD_LIBRARY_PATH" curl -s -o /dev/null -w "%{http_code}" --user "$basic_auth" "$url") -if [[ "$code" != "200" ]]; then - echo "Error detected in attempting to connect to database at $url" - echo "HTTP code is: $code" - exit 1 -fi +max_retries=3 +retry_delay=5 + +for attempt in $(seq 1 $max_retries); do + set +e + code=$(LD_LIBRARY_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}:$LD_LIBRARY_PATH" \ + curl ${local_CURL_SSL_ARG} -s -o /dev/null -w "%{http_code}" \ + --user "$basic_auth" "$url") + error_code=$? + set -e + + if [[ "$error_code" -eq 0 && "$code" -eq 200 ]]; then + echo "INFO - Attempting to connect to database at $url" + echo " HTTP code is: $code" + echo " Succeeded" + break + fi + + if [[ "$attempt" -lt "$max_retries" ]]; then + echo "WARNING - Attempting to connect to database at $url" + echo " HTTP code is: $code" + echo " Failed" + echo " Retrying in ${retry_delay}s..." + sleep "$retry_delay" + else + echo "ERROR - Attempting to connect to database at $url" + echo " HTTP code is: $code" + echo " Failed after $max_retries" + exit 1 + fi +done url2="${local_DATABASE_API_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT}/_api/database" # We are now going to initialize the DataFed database in Arango, but only if sdms database does # not exist -output=$(LD_LIBRARY_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}:$LD_LIBRARY_PATH" curl -s -i --user "$basic_auth" "$url2") - -echo "Output: $output" +output=$(LD_LIBRARY_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}:$LD_LIBRARY_PATH" curl ${local_CURL_SSL_ARG} -s -i --user "$basic_auth" "$url2") if [[ "$output" == "" ]]; then - echo "curl command failed $url2 exiting" + echo "ERROR - curl command failed $url2 exiting" exit 1 fi if [[ "$output" =~ .*"sdms".* ]]; then - echo "SDMS already exists do nothing" + echo "INFO - SDMS already exists do nothing." else - echo "Creating SDMS" + echo "INFO - Creating SDMS" arangosh --server.endpoint \ "${local_ARANGOSH_SERVER_ENDPOINT_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT}" \ --server.password "${local_DATAFED_DATABASE_PASSWORD}" \ --server.username "${local_DATABASE_USER}" \ - "${ssl_args}" \ - --javascript.execute "${PROJECT_ROOT}/core/database/foxx/db_create.js" + --javascript.execute "${DATAFED_PROJECT_ROOT}/core/database/foxx/db_create.js" # Give time for the database to be created sleep 2 arangosh --server.endpoint "${local_ARANGOSH_SERVER_ENDPOINT_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT}" \ --server.password "${local_DATAFED_DATABASE_PASSWORD}" \ --server.username "${local_DATABASE_USER}" \ - "${ssl_args}" \ --javascript.execute-string 'db._useDatabase("sdms"); db.config.insert({"_key": "msg_daily", "msg" : "DataFed servers will be off-line for regular maintenance every Sunday night from 11:45 pm until 12:15 am EST Monday morning."}, {overwrite: true});' arangosh --server.endpoint "${local_ARANGOSH_SERVER_ENDPOINT_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT}" \ --server.password "${local_DATAFED_DATABASE_PASSWORD}" \ --server.username "${local_DATABASE_USER}" \ - "${ssl_args}" \ --javascript.execute-string "db._useDatabase(\"sdms\"); db.config.insert({ \"_key\": \"system\", \"_id\": \"config/system\"}, {overwrite: true } );" fi @@ -203,11 +234,11 @@ fi # syntax for the REST http endpoints with curl so we are going to try the node module # 1. Install nvm which will allow us to update node -echo "Installing nvm" +echo "INFO - Installing nvm" install_nvm -echo "Installing node" +echo "INFO - Installing node" install_node -echo "Installing foxx_cli" +echo "INFO - Installing foxx_cli" install_foxx_cli FOXX_PREFIX="" @@ -218,7 +249,6 @@ fi echo "${local_DATAFED_DATABASE_PASSWORD}" >"${SOURCE}/database_temp.password" PATH_TO_PASSWD_FILE="${SOURCE}/database_temp.password" -echo "Path to PASSWRD file ${PATH_TO_PASSWD_FILE} passwd is $local_DATAFED_DATABASE_PASSWORD" echo "$local_DATAFED_DATABASE_PASSWORD" >"${PATH_TO_PASSWD_FILE}" { # try @@ -234,14 +264,13 @@ echo "$local_DATAFED_DATABASE_PASSWORD" >"${PATH_TO_PASSWD_FILE}" INSTALL_API="FALSE" FOUND_API=$(echo "$existing_services" | grep "/api/${local_FOXX_MAJOR_API_VERSION}") - echo "$FOUND_API" - - RESULT=$(LD_LIBRARY_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}:$LD_LIBRARY_PATH" curl -s ${local_DATABASE_API_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT}/_db/sdms/api/${local_FOXX_MAJOR_API_VERSION}/version) + RESULT=$(LD_LIBRARY_PATH="${DATAFED_DEPENDENCIES_INSTALL_PATH}:$LD_LIBRARY_PATH" curl ${local_CURL_SSL_ARG} -s ${local_DATABASE_API_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT}/_db/sdms/api/${local_FOXX_MAJOR_API_VERSION}/version) CODE=$(echo "${RESULT}" | jq '.code') - echo "Code is $CODE" if [ -z "${FOUND_API}" ]; then + echo "INFO - API found at ${local_DATABASE_API_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT}/_db/sdms/api/${local_FOXX_MAJOR_API_VERSION}/version" INSTALL_API="TRUE" elif [ "$CODE" == "503" ]; then + echo "WARNING - $CODE returned, attempting to remove api at /api/${local_FOXX_MAJOR_API_VERSION}" INSTALL_API="TRUE" # Remove the api at this point # WARNING Foxx and arangosh arguments differ --server is used for Foxx not --server.endpoint @@ -253,8 +282,8 @@ echo "$local_DATAFED_DATABASE_PASSWORD" >"${PATH_TO_PASSWD_FILE}" --database "${local_DATABASE_NAME}" fi - echo "$RESULT" if [ "${INSTALL_API}" == "TRUE" ]; then + echo "INFO - Installing arango foxx services at /api/${local_FOXX_MAJOR_API_VERSION}" # WARNING Foxx and arangosh arguments differ --server is used for Foxx not --server.endpoint "${FOXX_PREFIX}foxx" install \ --server "${local_DATABASE_API_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT}" \ @@ -262,19 +291,19 @@ echo "$local_DATAFED_DATABASE_PASSWORD" >"${PATH_TO_PASSWD_FILE}" -p "${PATH_TO_PASSWD_FILE}" \ --database "${local_DATABASE_NAME}" \ "/api/${local_FOXX_MAJOR_API_VERSION}" \ - "${PROJECT_ROOT}/core/database/foxx/" + "${DATAFED_PROJECT_ROOT}/core/database/foxx/" else - echo "DataFed Foxx Services have already been uploaded, replacing to ensure consisency" + echo "INFO - Replacing arango foxx services at /api/${local_FOXX_MAJOR_API_VERSION}" # WARNING Foxx and arangosh arguments differ --server is used for Foxx not --server.endpoint "${FOXX_PREFIX}foxx" replace \ --server "${local_DATABASE_API_SCHEME}://${local_DATAFED_DATABASE_HOST}:${local_DATABASE_PORT}" \ -u "${local_DATABASE_USER}" \ -p "${PATH_TO_PASSWD_FILE}" \ --database "${local_DATABASE_NAME}" \ - "/api/${local_FOXX_MAJOR_API_VERSION}" "${PROJECT_ROOT}/core/database/foxx/" - echo "foxx replace -u ${local_DATABASE_USER} -p ${PATH_TO_PASSWD_FILE} --database ${local_DATABASE_NAME} /api/${local_FOXX_MAJOR_API_VERSION} ${PROJECT_ROOT}/core/database/foxx" + "/api/${local_FOXX_MAJOR_API_VERSION}" "${DATAFED_PROJECT_ROOT}/core/database/foxx/" fi rm "${PATH_TO_PASSWD_FILE}" } || { # catch + echo "ERROR - Unexpected error encountered!" rm "${PATH_TO_PASSWD_FILE}" } diff --git a/scripts/install_gcs.sh b/scripts/install_gcs.sh deleted file mode 100755 index dea94d8c7..000000000 --- a/scripts/install_gcs.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -euf -o pipefail - -SCRIPT=$(realpath "$0") -SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath "${SOURCE}/..") -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" - -install_gcs diff --git a/scripts/install_lego_and_certificates.sh b/scripts/install_lego_and_certificates.sh index ed9b28fb3..44ed61c8d 100755 --- a/scripts/install_lego_and_certificates.sh +++ b/scripts/install_lego_and_certificates.sh @@ -6,7 +6,7 @@ SCRIPT=$(realpath "$0") SOURCE=$(dirname "$SCRIPT") PROJECT_ROOT=$(realpath "${SOURCE}/..") source "${PROJECT_ROOT}/config/datafed.sh" -source "${PROJECT_ROOT}/scripts/utils.sh" +source "${PROJECT_ROOT}/external/DataFedDependencies/scripts/utils.sh" Help() { echo "$(basename $0) Will install lego and use Let's Encrypt to create certificates." diff --git a/scripts/install_python_client_dependencies.sh b/scripts/install_python_client_dependencies.sh deleted file mode 100755 index 0d2c2cd3c..000000000 --- a/scripts/install_python_client_dependencies.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# Exit on error -set -e - -SCRIPT=$(realpath "$0") -SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath "${SOURCE}/..") - -source "${PROJECT_ROOT}/scripts/utils.sh" -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" - -sudo_command - -# This script will install all of the dependencies needed by DataFed 1.0 -"$SUDO_CMD" apt-get update -"$SUDO_CMD" dpkg --configure -a -"$SUDO_CMD" apt-get install -y libtool build-essential g++ gcc make libboost-all-dev \ - pkg-config autoconf automake unzip libcurl4-openssl-dev wget \ - rapidjson-dev libkrb5-dev git libssl-dev - -cd ~ -install_python -install_cmake -cd ~ - -# Install cmake 3.17 - -init_python -source "${DATAFED_PYTHON_ENV}/bin/activate" -"python${DATAFED_PYTHON_VERSION}" -m pip install --upgrade pip -"python${DATAFED_PYTHON_VERSION}" -m pip install setuptools - -install_protobuf -cd ~ diff --git a/scripts/install_repo_dependencies.sh b/scripts/install_repo_dependencies.sh deleted file mode 100755 index e14acff4b..000000000 --- a/scripts/install_repo_dependencies.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -# Exit on error -set -e - -SCRIPT=$(realpath "$0") -SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath ${SOURCE}/..) - -source "${PROJECT_ROOT}/scripts/utils.sh" -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" - -packages=("libtool" "wget" "build-essential" "g++" "gcc" "libboost-all-dev" "pkg-config" "autoconf" "automake" "make" "unzip" "git") -pip_packages=("setuptools") -externals=("cmake" "libopenssl" "python" "protobuf" "libsodium" "libzmq") - -local_UNIFY=false - -if [ $# -eq 1 ]; then - case "$1" in - -h | --help) - # If -h or --help is provided, print help - echo "Usage: $0 [-h|--help] [unify]" - ;; - unify) - # If 'unify' is provided, print the packages - # The extra space is necessary to not conflict with the other install scripts - echo -n "${packages[@]} " >>"$apt_file_path" - echo -n "${externals[@]} " >>"$ext_file_path" - echo -n "${pip_packages[@]} " >>"$pip_file_path" - local_UNIFY=true - ;; - *) - # If any other argument is provided, install the packages - echo "Invalid Argument" - ;; - esac -fi - -sudo_command - -if [[ $local_UNIFY = false ]]; then - "$SUDO_CMD" apt-get update - "$SUDO_CMD" dpkg --configure -a - "$SUDO_CMD" apt-get install -y "${packages[@]}" - - for ext in "${externals[@]}"; do - install_dep_by_name "$ext" - done - - init_python - source "${DATAFED_PYTHON_ENV}/bin/activate" - "python${DATAFED_PYTHON_VERSION}" -m pip install --upgrade pip - "python${DATAFED_PYTHON_VERSION}" -m pip install "${pip_packages[@]}" -fi diff --git a/scripts/install_ws.sh b/scripts/install_ws.sh index 247e26bb2..f5a7750fc 100755 --- a/scripts/install_ws.sh +++ b/scripts/install_ws.sh @@ -6,8 +6,8 @@ SCRIPT=$(realpath "$0") SOURCE=$(dirname "$SCRIPT") PROJECT_ROOT=$(realpath "${SOURCE}/..") source "${PROJECT_ROOT}/config/datafed.sh" -source "${SOURCE}/dependency_versions.sh" -source "${SOURCE}/dependency_install_functions.sh" +source "${PROJECT_ROOT}/external/DataFedDependencies/scripts/dependency_versions.sh" +source "${PROJECT_ROOT}/external/DataFedDependencies/scripts/dependency_install_functions.sh" #NVM_DIR=/home/cades/.nvm #[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" diff --git a/scripts/install_ws_dependencies.sh b/scripts/install_ws_dependencies.sh deleted file mode 100755 index 3c0d2ae77..000000000 --- a/scripts/install_ws_dependencies.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash - -# Exit on error -set -e - -SCRIPT=$(realpath "$0") -SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath "${SOURCE}/..") - -source "${PROJECT_ROOT}/scripts/utils.sh" -source "${SOURCE}/dependency_versions.sh" -source "${PROJECT_ROOT}/scripts/dependency_install_functions.sh" - -packages=("curl" "g++" "make" "wget") -externals=("cmake" "libopenssl" "python" "nvm" "node" "ws_node_packages") - -Help() { - echo "$(basename $0) install web dependencies." - echo - echo "Syntax: $(basename $0) [-h|n]" - echo "options:" - echo "-h, --help Print this help message" - echo "-n, --node_install_dir Install directory, defaults to" - echo " whatever is defined in the datafed.sh file" - echo " DATAFED_DEPENDENCIES_INSTALL_PATH" - echo " ${DATAFED_DEPENDENCIES_INSTALL_PATH}" - echo "-u, --unify Unifies install scripts to be used in docker builds" -} - -# Equivalent to the .nvm directory -local_NODE_INSTALL="$DATAFED_DEPENDENCIES_INSTALL_PATH" -local_UNIFY=false - -VALID_ARGS=$(getopt -o hn: --long 'help',node_install_dir: -- "$@") -if [[ $? -ne 0 ]]; then - exit 1 -fi -eval set -- "$VALID_ARGS" -while [ : ]; do - case "$1" in - -h | --help) - Help - exit 0 - ;; - -n | --node_install_dir) - local_NODE_INSTALL=$2 - shift 2 - ;; - unify) - # The extra space is necessary to not conflict with the other install scripts - echo -n "${packages[@]} " >>"$apt_file_path" - echo -n "${externals[@]} " >>"$ext_file_path" - local_UNIFY=true - shift - ;; - --) - shift - break - ;; - \?) # incorrect option - echo "Error: Invalid option" - exit - ;; - esac -done - -sudo_command - -if [[ $local_UNIFY = false ]]; then - "$SUDO_CMD" apt-get update - "$SUDO_CMD" dpkg --configure -a - "$SUDO_CMD" apt-get install -y "${packages[@]}" - - for ext in "${externals[@]}"; do - install_dep_by_name "$ext" - done -fi diff --git a/scripts/utils.sh b/scripts/utils.sh deleted file mode 100755 index 61d273c24..000000000 --- a/scripts/utils.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/bin/bash -SCRIPT=$(realpath "$0") -SOURCE=$(dirname "$SCRIPT") - -if [ -z "${PROJECT_ROOT}" ]; then - PROJECT_ROOT=$(realpath "${SOURCE}/..") -fi - -echo "PROJECT ROOT $PROJECT_ROOT" - -export_dependency_version_numbers() { - # Get the content of the function and remove comments - variables=$(cat "${PROJECT_ROOT}/scripts/dependency_versions.sh") - - local content="$(echo "${variables}" | sed '/^$/d;/^#/d')" - - # Extract variable assignments from the content - local assignments=$(echo "$content" | grep -Eo '\b[a-zA-Z_][a-zA-Z_0-9]*="[^\"]*"') - - echo "Variables are $variables" - echo "Content is $content" - echo "Assignments is $assignments" - # Loop through each assignment, export the variable - # Note: This may override existing variables - for assignment in $assignments; do - echo "export $assignment" - export "$assignment" - done -} - -empty_command() { - "$@" -} - -# The purpose of this function is to detect the sudo command -# if it exists use it, if we are running as root set SUDO_CMD to empty_command -# empty_command is needed so that I can do this where sudo doesn't exist -# -# "$SUDO_CMD" apt install curl -# -# If running as root this will expand to -# -# empty_command apt install curl -# -# which expands to -# -# apt install curl -# -# If I left SUDO_CMD blank i.e. "" apt install curl bash would complain -sudo_command() { - if [ "$(id -u)" -eq 0 ]; then - export SUDO_CMD="empty_command" # Ignore sudo running as root - else - # Check if sudo is available - if command -v sudo &>/dev/null; then - export SUDO_CMD=$(command -v sudo) - return 0 - else - echo "Error: This script requires sudo but sudo is not installed." >&2 - echo "You are not running as root!" >&2 - exit 1 - fi - exit $? # Exit with the same status as the sudo command - fi -} - -# Only recognized x.x.x format where all "x" are integers -# Returns true if first version is greater or equal to second version -# -# semantic_version_compatible "1.2.3" "1.1.8" -# echo $? -# Should print 1 -# -# semantic_version_compatible "1.2.3" "1.2.8" -# echo $? -# Should print 0 -# -#semantic_version_compatible "1.1.1" "1.1.1" -#echo "Should return true 1.1.1 >= 1.1.1" -# -#semantic_version_compatible "1.2.1" "1.1.1" -#echo "Should return true 1.2.1 >= 1.1.1" -# -#semantic_version_compatible "1.2.1" "3.1.1" -#echo "Should return false 1.2.1 >= 3.1.1" -# -#semantic_version_compatible "v1.2.1" "v3.1.1" -#echo "Should return false v1.2.1 >= v3.1.1" -# -#semantic_version_compatible "v1.2.1" "1.1.1" -#echo "Should return true v1.2.1 >= 1.1.1" - -semantic_version_compatible() { - local VER1="$1" - local VER2="$2" - - # Remove any preceding v from version i.e. v1.1.2 - VER1=$(echo "$VER1" | sed 's/v//g') - VER2=$(echo "$VER2" | sed 's/v//g') - - maj_1=$(echo "$VER1" | sed 's/\./ /g' | awk '{print $1}') - min_1=$(echo "$VER1" | sed 's/\./ /g' | awk '{print $2}') - patch_1=$(echo "$VER1" | sed 's/\./ /g' | awk '{print $3}') - maj_2=$(echo "$VER2" | sed 's/\./ /g' | awk '{print $1}') - min_2=$(echo "$VER2" | sed 's/\./ /g' | awk '{print $2}') - patch_2=$(echo "$VER2" | sed 's/\./ /g' | awk '{print $3}') - - if [ "$maj_1" -gt "$maj_2" ]; then - return 1 - elif [ "$maj_1" -lt "$maj_2" ]; then - return 0 - fi - - if [ "$min_1" -gt "$min_2" ]; then - return 1 - elif [ "$min_1" -lt "$min_2" ]; then - return 0 - fi - - if [ "$patch_1" -gt "$patch_2" ]; then - return 1 - elif [ "$patch_1" -lt "$patch_2" ]; then - return 0 - fi - return 1 -} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt new file mode 100644 index 000000000..1c6fdc2b6 --- /dev/null +++ b/tests/CMakeLists.txt @@ -0,0 +1,55 @@ + +if( ENABLE_INTEGRATION_TESTS ) + if( BUILD_COMMON ) + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/mock/mock_start.sh + ${CMAKE_CURRENT_BINARY_DIR}/mock/mock_start.sh + COPYONLY) + + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/mock/mock_stop.sh + ${CMAKE_CURRENT_BINARY_DIR}/mock/mock_stop.sh + COPYONLY) + + add_test(NAME start_mock + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/mock/mock_start.sh + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/tests/mock_core + ) + + set(DATAFED_MOCK_CORE_PUB_KEY "${CMAKE_BINARY_DIR}/tests/mock_core/mock-datafed-core-key.pub" PARENT_SCOPE) + + # Add a custom target to stop the server + add_test(NAME stop_mock + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/mock/mock_stop.sh + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/tests/mock_core + ) + + set_tests_properties(start_mock PROPERTIES FIXTURES_SETUP FIX_MOCK) + set_tests_properties(stop_mock PROPERTIES FIXTURES_CLEANUP FIX_MOCK) + + add_subdirectory(mock_core) + add_subdirectory(mock) + else() + message(WARNING "Integration tests are enabled but build common is disabled, if you wish to run the full suite of integration tests you will need to enable building the common library.") + endif() +endif() + +if (ENABLE_END_TO_END_TESTS) + if (NOT ENABLE_END_TO_END_API_TESTS AND NOT ENABLE_END_TO_END_WEB_TESTS) + message (FATAL_ERROR "Cannot run end-to-end tests because one of the ENABLE_END_TO_END web or API flag is not on") + else() + add_subdirectory(end-to-end) + add_subdirectory(end-to-end/web-UI) + endif() +else() + if (ENABLE_END_TO_END_API_TESTS) + if (BUILD_PYTHON_CLIENT) + add_subdirectory(end-to-end) + else() + message (FATAL_ERROR "Cannot run end-to-end tests because BUILD_PYTHON_CLIENT is not on") + endif() + endif() + if (ENABLE_END_TO_END_WEB_TESTS) + add_subdirectory(end-to-end/web-UI) + endif() +endif() + + diff --git a/tests/end-to-end/setup.sh b/tests/end-to-end/setup.sh index 1ed7d0bdc..c117ec984 100755 --- a/tests/end-to-end/setup.sh +++ b/tests/end-to-end/setup.sh @@ -92,13 +92,13 @@ if [ -z "${DATAFED_USER99_GLOBUS_UUID}" ]; then exit 1 fi -SCRIPT=$(realpath "$0") +SCRIPT=$(realpath "${BASH_SOURCE[0]}") SOURCE=$(dirname "$SCRIPT") -PROJECT_ROOT=$(realpath ${SOURCE}/../../) -source ${PROJECT_ROOT}/config/datafed.sh +DATAFED_PROJECT_ROOT=$(realpath ${SOURCE}/../../) +source ${DATAFED_PROJECT_ROOT}/config/datafed.sh if [ -z "${FOXX_MAJOR_API_VERSION}" ]; then - local_FOXX_MAJOR_API_VERSION=$(cat ${PROJECT_ROOT}/cmake/Version.cmake | grep -o -P "(?<=FOXX_API_MAJOR).*(?=\))" | xargs) + local_FOXX_MAJOR_API_VERSION=$(cat ${DATAFED_PROJECT_ROOT}/cmake/Version.cmake | grep -o -P "(?<=FOXX_API_MAJOR).*(?=\))" | xargs) else local_FOXX_MAJOR_API_VERSION=$(printenv FOXX_MAJOR_API_VERSION) fi @@ -119,11 +119,11 @@ fi # First step is to clear the database echo "Clearing old database" -${PROJECT_ROOT}/scripts/clear_db.sh +${DATAFED_PROJECT_ROOT}/scripts/clear_db.sh # Second install foxx echo "Installing foxx services and API" -${PROJECT_ROOT}/scripts/install_foxx.sh +${DATAFED_PROJECT_ROOT}/scripts/install_foxx.sh echo "Completed" if [ -z "${DATAFED_DATABASE_HOST}" ]; then diff --git a/tests/end-to-end/web-UI/auth.setup.js.in b/tests/end-to-end/web-UI/auth.setup.js.in index 2e50973f5..9d898dacc 100644 --- a/tests/end-to-end/web-UI/auth.setup.js.in +++ b/tests/end-to-end/web-UI/auth.setup.js.in @@ -2,50 +2,68 @@ const { chromium } = require('playwright'); const path = require('path'); const process = require('process'); -console.log("******Inside Setup file******"); +console.log("INFO - ******Inside Setup file******"); module.exports = async function () { // if a playwright page object doesn't exist, create one const browser = await chromium.launch({ args: ['--ignore-certificate-errors'], - timeout: 30000, + headless: true, }); - // const context = await browser.newContext({ - // ignoreHTTPSErrors: true, - // }); const page = await browser.newPage(); - console.log("new page object created"); - - // Go to the website and login through globus using a tester account - await page.goto('https://@DATAFED_DOMAIN@/ui/welcome');//TESTING - if (await page.getByRole('button', { name: 'Log In / Register' }).isVisible()) { - await page.getByRole('button', { name: 'Log In / Register' }).click(); - if (page.getByRole('link', { name: 'Globus globus' }).isVisible()) { - page.getByRole('button', { name: 'Globus ID to sign in' }).click(); - if (page.getByLabel('Username @globusid.org').isEditable()) { - // changes the username and password in the .env file if needed - await page.getByLabel('Username @globusid.org').fill(process.env.DATAFED_WEB_TEST_USERNAME); - await page.getByLabel('Password').fill(process.env.DATAFED_WEB_TEST_PASSWORD); - await page.click('button[type="submit"]'); - await page.waitForURL('https://@DATAFED_DOMAIN@/ui/main') - console.log("******PAST LOGIN******"); - await page.context().storageState({ path: './.auth/auth.json'}); //TESTING - console.log("******Done with login******"); - } else { - console.log("DID NOT SEE FORM"); - } - } else { - console.log("DID NOT SEE GLOBUS BUTTON"); - } - } else { - console.log("DID NOT SEE LOGIN BUTTON"); - } + console.log("INFO - new page object created"); + + // Extra safety for debugging slow loads + page.on('console', msg => console.log('INFO - [PAGE LOG] ', msg.text())); + page.on('response', res => { + if (res.status() >= 400) + console.log(`ERROR - [HTTP ${res.status()}] ${res.url()}`); + }); + + // --- Step 1: Go to DataFed --- + console.log("INFO - 1. Got to DataFed"); + await page.goto('https://@DATAFED_DOMAIN@/ui/welcome', { waitUntil: 'networkidle', timeout: 60000 }); + + // --- Step 2: Click Login/Register --- + console.log("INFO - 2. Login and Register"); + const loginButton = page.getByRole('button', { name: 'Log In / Register' }); + await loginButton.waitFor({ state: 'visible', timeout: 30000 }); + await loginButton.click(); + + // --- Step 3: Check if link is visible --- + console.log("INFO - 3. Check if link is visible"); + const globusLink = page.getByRole('link', { name: /Globus/i }); + await globusLink.waitFor({ state: 'visible', timeout: 30000 }); + + // --- Step 4: Click Globus ID to sign in --- + console.log("INFO - 4. Use Globus ID to sign in"); + const globusIDButton = page.getByRole('button', { name: 'Globus ID to sign in' }); + await globusIDButton.waitFor({ state: 'visible', timeout: 30000 }); + await globusIDButton.click(); + + // --- Step 5: Wait for Globus redirect --- + console.log("INFO - 5. Wait for Globus login to redirect"); + await page.waitForLoadState('networkidle', { timeout: 45000 }); + + // --- Step 6: Fill in credentials robustly --- + console.log("INFO - 6. Fill in credentials"); + const usernameField = page.getByLabel(/username/i); + await usernameField.waitFor({ state: 'visible', timeout: 45000 }); + await usernameField.fill(process.env.DATAFED_WEB_TEST_USERNAME); + + const passwordField = page.getByLabel(/password/i); + await passwordField.waitFor({ state: 'visible', timeout: 45000 }); + await passwordField.fill(process.env.DATAFED_WEB_TEST_PASSWORD); + + // --- Step 7: Submit form --- + await page.click('button[type="submit"]'); + await page.waitForURL('https://@DATAFED_DOMAIN@/ui/main') + + console.log("INFO - ******PAST LOGIN******"); + await page.context().storageState({ path: './.auth/auth.json'}); //TESTING + console.log("INFO - ******Done with login******"); + await browser.close(); - //return page; // pass on the page variable if using this function directly in a test script }; -// TODO this is not as efficient as storing the states in the context variable, -// something like "page.context().storagestate" would work better in the future when -// there are many more test files - diff --git a/tests/end-to-end/web-UI/playwright.config.js b/tests/end-to-end/web-UI/playwright.config.js index 1e83946f1..cae11dd50 100644 --- a/tests/end-to-end/web-UI/playwright.config.js +++ b/tests/end-to-end/web-UI/playwright.config.js @@ -15,13 +15,13 @@ module.exports = defineConfig({ /* Run tests in files in parallel */ fullyParallel: false, - /* Fail the build on CI if you accidentally left test.only in the source code. */ - forbidOnly: !!process.env.CI, + /* Fail the build on CI_DATAFED_END_TO_END_WEB_RETRIES if you accidentally left test.only in the source code. */ + forbidOnly: !!process.env.CI_DATAFED_END_TO_END_WEB_RETRIES, - /* Retry on CI only */ - retries: process.env.CI ? 2 : 0, + /* Retry on CI_DATAFED_END_TO_END_WEB_RETRIES only */ + retries: process.env.CI_DATAFED_END_TO_END_WEB_RETRIES ? 2 : 0, - /* Opt out of parallel tests on CI. */ + /* Opt out of parallel tests on CI_DATAFED_END_TO_END_WEB_RETRIES. */ workers: 1, /* Reporter to use. See https://playwright.dev/docs/test-reporters */ @@ -31,9 +31,11 @@ module.exports = defineConfig({ use: { /* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */ trace: "on-first-retry", - + screenshot: "only-on-failure", + video: "retain-on-failure", ignoreHTTPSErrors: true, - + actionTimeout: 45000, + navigationTimeout: 60000, launchOptions: { args: ["--ignore-certificate-errors"], }, diff --git a/tests/mock/CMakeLists.txt b/tests/mock/CMakeLists.txt new file mode 100644 index 000000000..0e8b7eeb0 --- /dev/null +++ b/tests/mock/CMakeLists.txt @@ -0,0 +1,24 @@ + + +# Each test listed in Alphabetical order +foreach(PROG + test_getVersion +) + + file(GLOB ${PROG}_SOURCES ${PROG}*.cpp) + add_executable(mock_liveness_${PROG} ${${PROG}_SOURCES}) + + target_link_libraries(mock_liveness_${PROG} PUBLIC ${DATAFED_BOOST_LIBRARIES} ${DATAFED_GSSAPI_LIBRARIES} ${DATAFED_GLOBUS_COMMON_LIBRARIES} common ) + if(BUILD_SHARED_LIBS) + target_compile_definitions(mock_liveness_${PROG} PRIVATE BOOST_TEST_DYN_LINK) + endif() + + # Run the test from the same folder as the mock server so that the + # public key is visble + add_test(NAME mock_liveness_${PROG} + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/mock_liveness_${PROG} + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/tests/mock_core + ) + set_tests_properties(mock_liveness_${PROG} PROPERTIES FIXTURES_REQUIRED FIX_MOCK) + +endforeach(PROG) diff --git a/tests/mock/mock_start.sh b/tests/mock/mock_start.sh new file mode 100755 index 000000000..4828d0830 --- /dev/null +++ b/tests/mock/mock_start.sh @@ -0,0 +1,10 @@ +#!/bin/bash +SCRIPT=$(realpath "${BASH_SOURCE[0]}") +SOURCE=$(dirname "$SCRIPT") + +if [ -f ./mock.log ]; then + rm ./mock.log +fi +"$SOURCE/../mock_core/datafed-mock-core" --gen-keys +"$SOURCE/../mock_core/datafed-mock-core" >mock.log 2>&1 & +sleep 2 diff --git a/repository/gridftp/globus5/authz/tests/mock/mock_stop.sh b/tests/mock/mock_stop.sh similarity index 60% rename from repository/gridftp/globus5/authz/tests/mock/mock_stop.sh rename to tests/mock/mock_stop.sh index 4755e1e89..e548daa9a 100755 --- a/repository/gridftp/globus5/authz/tests/mock/mock_stop.sh +++ b/tests/mock/mock_stop.sh @@ -1,4 +1,7 @@ #!/bin/bash +SCRIPT=$(realpath "${BASH_SOURCE[0]}") +SOURCE=$(dirname "$SCRIPT") + echo "Stopping Mock Server" kill $(cat ./server.pid) && rm -f ./server.pid cat ./mock.log diff --git a/repository/gridftp/globus5/authz/tests/mock/test_getVersion.cpp b/tests/mock/test_getVersion.cpp similarity index 93% rename from repository/gridftp/globus5/authz/tests/mock/test_getVersion.cpp rename to tests/mock/test_getVersion.cpp index f407c97c1..9d734402b 100644 --- a/repository/gridftp/globus5/authz/tests/mock/test_getVersion.cpp +++ b/tests/mock/test_getVersion.cpp @@ -4,8 +4,6 @@ #include #include -#include "Version.hpp" - // Public includes #include "common/CommunicatorFactory.hpp" #include "common/CredentialFactory.hpp" @@ -21,18 +19,25 @@ // Standard includes #include +#include using namespace SDMS; -extern "C" { -#include "AuthzWorker.h" +std::string get_env_or_default(const char* env_name, const std::string& default_value) { + const char* val = std::getenv(env_name); + if (val && *val != '\0') { + return std::string(val); + } + return default_value; } BOOST_AUTO_TEST_SUITE(mock_liveness_get_version) BOOST_AUTO_TEST_CASE(mock_liveness_test_get_version) { - std::string fname = "../../mock_core/mock-datafed-core-key.pub"; + std::cout << "Running from: " << std::filesystem::current_path() << std::endl; + std::string default_pub_key_path = std::filesystem::current_path() / "mock-datafed-core-key.pub"; + std::string fname = get_env_or_default("DATAFED_MOCK_CORE_PUB_KEY", default_pub_key_path); std::string mock_core_server_address = "tcp://localhost:9998"; std::ifstream inf(fname.c_str()); if (!inf.is_open() || !inf.good()) { diff --git a/repository/gridftp/globus5/authz/mock_core/AuthMap.cpp b/tests/mock_core/AuthMap.cpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/AuthMap.cpp rename to tests/mock_core/AuthMap.cpp diff --git a/repository/gridftp/globus5/authz/mock_core/AuthMap.hpp b/tests/mock_core/AuthMap.hpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/AuthMap.hpp rename to tests/mock_core/AuthMap.hpp diff --git a/repository/gridftp/globus5/authz/mock_core/AuthenticationManager.cpp b/tests/mock_core/AuthenticationManager.cpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/AuthenticationManager.cpp rename to tests/mock_core/AuthenticationManager.cpp diff --git a/repository/gridftp/globus5/authz/mock_core/AuthenticationManager.hpp b/tests/mock_core/AuthenticationManager.hpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/AuthenticationManager.hpp rename to tests/mock_core/AuthenticationManager.hpp diff --git a/repository/gridftp/globus5/authz/mock_core/CMakeLists.txt b/tests/mock_core/CMakeLists.txt similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/CMakeLists.txt rename to tests/mock_core/CMakeLists.txt diff --git a/repository/gridftp/globus5/authz/mock_core/ClientWorker.cpp b/tests/mock_core/ClientWorker.cpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/ClientWorker.cpp rename to tests/mock_core/ClientWorker.cpp diff --git a/repository/gridftp/globus5/authz/mock_core/ClientWorker.hpp b/tests/mock_core/ClientWorker.hpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/ClientWorker.hpp rename to tests/mock_core/ClientWorker.hpp diff --git a/repository/gridftp/globus5/authz/mock_core/Condition.cpp b/tests/mock_core/Condition.cpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/Condition.cpp rename to tests/mock_core/Condition.cpp diff --git a/repository/gridftp/globus5/authz/mock_core/Condition.hpp b/tests/mock_core/Condition.hpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/Condition.hpp rename to tests/mock_core/Condition.hpp diff --git a/repository/gridftp/globus5/authz/mock_core/Config.cpp b/tests/mock_core/Config.cpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/Config.cpp rename to tests/mock_core/Config.cpp diff --git a/repository/gridftp/globus5/authz/mock_core/Config.hpp b/tests/mock_core/Config.hpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/Config.hpp rename to tests/mock_core/Config.hpp diff --git a/repository/gridftp/globus5/authz/mock_core/IMockCoreServer.hpp b/tests/mock_core/IMockCoreServer.hpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/IMockCoreServer.hpp rename to tests/mock_core/IMockCoreServer.hpp diff --git a/repository/gridftp/globus5/authz/mock_core/MockCoreServer.cpp b/tests/mock_core/MockCoreServer.cpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/MockCoreServer.cpp rename to tests/mock_core/MockCoreServer.cpp diff --git a/repository/gridftp/globus5/authz/mock_core/MockCoreServer.hpp b/tests/mock_core/MockCoreServer.hpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/MockCoreServer.hpp rename to tests/mock_core/MockCoreServer.hpp diff --git a/repository/gridftp/globus5/authz/mock_core/MockGlobals.cpp b/tests/mock_core/MockGlobals.cpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/MockGlobals.cpp rename to tests/mock_core/MockGlobals.cpp diff --git a/repository/gridftp/globus5/authz/mock_core/MockGlobals.hpp b/tests/mock_core/MockGlobals.hpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/MockGlobals.hpp rename to tests/mock_core/MockGlobals.hpp diff --git a/repository/gridftp/globus5/authz/mock_core/PublicKeyTypes.hpp b/tests/mock_core/PublicKeyTypes.hpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/PublicKeyTypes.hpp rename to tests/mock_core/PublicKeyTypes.hpp diff --git a/repository/gridftp/globus5/authz/mock_core/Version.hpp.in b/tests/mock_core/Version.hpp.in similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/Version.hpp.in rename to tests/mock_core/Version.hpp.in diff --git a/repository/gridftp/globus5/authz/mock_core/main.cpp b/tests/mock_core/main.cpp similarity index 100% rename from repository/gridftp/globus5/authz/mock_core/main.cpp rename to tests/mock_core/main.cpp diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 6ead222f7..d4373c707 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -592,12 +592,22 @@ the registration page. a_resp.redirect("/ui/register"); } else { + if (reply.user.length > 1) { + logger.warn( + "ui/authn", + getCurrentLineNumber(), + "More than one user was returned from DataFed, this can happen if a user has registered two or more separate accounts with DataFed and has since linked their identities from a third party identity manager. DataFed will select the first identity when logging in.", + ); + } + let username = reply.user[0]?.uid?.replace(/^u\//, ""); logger.info( "/ui/authn", getCurrentLineNumber(), "User: " + uid + - " verified, acc:" + + " verified, mapped to: " + + username + + " acc:" + xfr_token.access_token + ", ref: " + xfr_token.refresh_token + @@ -606,7 +616,7 @@ the registration page. ); // Store only data needed for active session - a_req.session.uid = uid; + a_req.session.uid = username; a_req.session.reg = true; let redirect_path = "/ui/main"; diff --git a/web/docker/Dockerfile b/web/docker/Dockerfile index 6af086e1e..5fafb6b08 100644 --- a/web/docker/Dockerfile +++ b/web/docker/Dockerfile @@ -1,21 +1,23 @@ # NOTE this image must be built with respect to the base of the project i.e. # cd ${PROJECT_ROOT} or cd DataFed # docker build -f web/docker/Dockerfile . -ARG BUILD_BASE="debian:bookworm-slim" -ARG DEPENDENCIES="dependencies" -ARG RUNTIME="runtime" -ARG DATAFED_DIR="/datafed" -ARG DATAFED_INSTALL_PATH="/opt/datafed" +ARG BUILD_BASE="debian:bookworm-slim" +ARG DEPENDENCIES="dependencies" +ARG RUNTIME="runtime" +ARG DATAFED_DIR="/datafed" +ARG DATAFED_INSTALL_PATH="/opt/datafed" ARG DATAFED_DEPENDENCIES_INSTALL_PATH="/opt/datafed/dependencies" -ARG GCS_IMAGE="code.ornl.gov:4567/dlsw/datafed/gcs-ubuntu-focal" -ARG BUILD_DIR="$DATAFED_DIR/source" -ARG LIB_DIR="/usr/local/lib" +ARG GCS_IMAGE="code.ornl.gov:4567/dlsw/datafed/gcs-ubuntu-focal" +ARG BUILD_DIR="$DATAFED_DIR/source" +ARG LIB_DIR="/usr/local/lib" +ARG DATAFED_DEPENDENCIES_ROOT="$BUILD_DIR/external/DataFedDependencies" FROM ${DEPENDENCIES} AS ws-build ARG DATAFED_DIR ARG BUILD_DIR ARG DATAFED_INSTALL_PATH +ARG DATAFED_DEPENDENCIES_ROOT ARG DATAFED_DEPENDENCIES_INSTALL_PATH ENV DATAFED_INSTALL_PATH="${DATAFED_INSTALL_PATH}" @@ -24,6 +26,12 @@ EXPOSE 7513 # For communication with the public EXPOSE 443 +RUN mkdir -p ${DATAFED_DEPENDENCIES_ROOT}/scripts && \ + mv ./scripts/utils.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/utils.sh && \ + mv ./scripts/dependency_install_functions.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/dependency_install_functions.sh && \ + mv ./scripts/dependency_versions.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/dependency_versions.sh && \ + mv ./scripts/generate_dependencies_config.sh ${DATAFED_DEPENDENCIES_ROOT}/scripts/generate_dependencies_config.sh + COPY ./CMakeLists.txt ${BUILD_DIR} COPY ./scripts/dependency_versions.sh ${BUILD_DIR}/scripts/ COPY ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/ @@ -34,16 +42,18 @@ COPY ./cmake ${BUILD_DIR}/cmake COPY ./common/proto ${BUILD_DIR}/common/proto COPY ./web ${BUILD_DIR}/web -RUN ${BUILD_DIR}/scripts/generate_datafed.sh && \ - ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake -S. -B build \ - -DBUILD_REPO_SERVER=False \ - -DBUILD_AUTHZ=False \ - -DBUILD_CORE_SERVER=False \ - -DBUILD_WEB_SERVER=True \ - -DBUILD_DOCS=False \ - -DBUILD_PYTHON_CLIENT=False \ - -DBUILD_FOXX=False \ - -DBUILD_COMMON=False +RUN ${DATAFED_DEPENDENCIES_ROOT}/scripts/generate_dependencies_config.sh && \ + ${BUILD_DIR}/scripts/generate_datafed.sh && \ + ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake -S. -B build \ + -DBUILD_REPO_SERVER=False \ + -DBUILD_AUTHZ=False \ + -DBUILD_CORE_SERVER=False \ + -DBUILD_WEB_SERVER=True \ + -DBUILD_DOCS=False \ + -DBUILD_PYTHON_CLIENT=False \ + -DBUILD_FOXX=False \ + -DBUILD_COMMON=False \ + -DENABLE_INTEGRATION_TESTS=False RUN ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake --build build RUN ${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/cmake --build build --target install # Only part of the final install can be done here @@ -57,30 +67,33 @@ ARG DATAFED_DIR ARG DATAFED_INSTALL_PATH ARG BUILD_DIR ARG DATAFED_DEPENDENCIES_INSTALL_PATH +ARG DATAFED_DEPENDENCIES_ROOT # The above should also be available at runtime ENV DATAFED_INSTALL_PATH="$DATAFED_INSTALL_PATH" ENV DATAFED_DEPENDENCIES_INSTALL_PATH="$DATAFED_DEPENDENCIES_INSTALL_PATH" +ENV DATAFED_DEPENDENCIES_ROOT="$DATAFED_DEPENDENCIES_ROOT" ENV DATAFED_DIR="$DATAFED_DIR" ENV BUILD_DIR="$BUILD_DIR" ENV DATAFED_DEFAULT_LOG_PATH="$DATAFED_INSTALL_PATH/logs" -RUN apt install -y make g++ +RUN apt update && apt install -y make g++ WORKDIR ${DATAFED_DIR} USER datafed -COPY --chown=datafed:root ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/generate_datafed.sh -COPY --chown=datafed:root ./scripts/dependency_versions.sh ${BUILD_DIR}/scripts/dependency_versions.sh -COPY --chown=datafed:root ./scripts/dependency_install_functions.sh ${BUILD_DIR}/scripts/dependency_install_functions.sh -COPY --chown=datafed:root ./scripts/generate_ws_config.sh ${BUILD_DIR}/scripts/generate_ws_config.sh -COPY --chown=datafed:root ./scripts/install_ws.sh ${BUILD_DIR}/scripts/install_ws.sh -COPY --chown=datafed:root ./cmake/Version.cmake ${BUILD_DIR}/cmake/Version.cmake -COPY --chown=datafed:root ./scripts/utils.sh ${BUILD_DIR}/scripts/utils.sh -COPY --chown=datafed:root ./scripts/export_dependency_version.sh ${BUILD_DIR}/scripts/export_dependency_version.sh +COPY --from=ws-build --chown=datafed:root ${DATAFED_DEPENDENCIES_ROOT}/scripts/ {DATAFED_DEPENDENCIES_ROOT}/scripts/ + +COPY --chown=datafed:root ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/generate_datafed.sh +COPY --chown=datafed:root ./scripts/export_dependency_version.sh ${BUILD_DIR}/scripts/export_dependency_version.sh +COPY --chown=datafed:root ./scripts/generate_ws_config.sh ${BUILD_DIR}/scripts/generate_ws_config.sh +COPY --chown=datafed:root ./scripts/install_ws.sh ${BUILD_DIR}/scripts/install_ws.sh +COPY --chown=datafed:root ./cmake/Version.cmake ${BUILD_DIR}/cmake/Version.cmake + COPY --from=ws-build --chown=datafed:root ${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm ${DATAFED_DEPENDENCIES_INSTALL_PATH}/nvm COPY --from=ws-build --chown=datafed:root ${BUILD_DIR}/web ${BUILD_DIR}/web +COPY --from=ws-build --chown=datafed:root ${DATAFED_DEPENDENCIES_ROOT}/scripts ${DATAFED_DEPENDENCIES_ROOT}/scripts COPY --from=ws-build --chown=datafed:root ${DATAFED_INSTALL_PATH}/web ${DATAFED_INSTALL_PATH}/web COPY --from=ws-build --chown=datafed:root /usr/bin/curl /usr/bin/curl From dfb241abcbbb1f37c8a3898003b270291d0075eb Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Tue, 25 Nov 2025 06:54:59 -0500 Subject: [PATCH 02/65] [DAPS-1777] - Foxx, fix: user_router fix regression in missing response. (#1778) [DAPS-1786] - Web tests, refactor: add test for hitting password reset. (#1787) --- core/database/foxx/api/user_router.js | 1 + tests/end-to-end/README.md | 73 +++++++++++++++++ tests/end-to-end/web-UI/CMakeLists.txt | 2 +- tests/end-to-end/web-UI/auth.setup.js.in | 79 ++++++++++++------ tests/end-to-end/web-UI/playwright.config.js | 14 ---- .../scripts/testingBasicFunction.spec.js | 80 +++++++++---------- .../scripts/testingUserPasswordChange.spec.js | 73 +++++++++++++++++ 7 files changed, 240 insertions(+), 82 deletions(-) create mode 100644 tests/end-to-end/web-UI/scripts/testingUserPasswordChange.spec.js diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 95cd4d368..5e233d1f5 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -393,6 +393,7 @@ router result = [user.new]; }, }); + res.send(result); logger.logRequestSuccess({ client: client?._id, correlationId: req.headers["x-correlation-id"], diff --git a/tests/end-to-end/README.md b/tests/end-to-end/README.md index fbcea5878..939f2bda0 100644 --- a/tests/end-to-end/README.md +++ b/tests/end-to-end/README.md @@ -14,3 +14,76 @@ To use the python API you will need to build it cmake -S. -B build -DBUILD_PYTHON_CLIENT=ON cmake --build build --target pydatafed ``` + +## Playwright + +On windows, it is recommended to run playwright directly on windows and not in +a docker container or on wsl2. If you do take that approach you will likely +encounter compatibility problems, and will still need to stand up an XServer +on the windows host. + +To run + +```bash +npm install . +npx playwright install +npx playwright test +``` + +You can also use the playwright code generator to add additional tests. + +```bash +npx playwright codegen +``` + +If you are running on linux you might be able to get away with running in a +docker image. + +Below is a minimal dockerfile to build playwright with a few useful developer tools. + +```Dockerfile +FROM mcr.microsoft.com/playwright:v1.45.1-noble + +# Install Chromium only +WORKDIR /work +RUN npx playwright install chromium --with-deps; npx playwright install +RUN apt-get update && apt-get install -y ca-certificates bash vim && update-ca-certificates +``` + +Build it with. + +```bash +docker build . -t playwright:latest +``` + +```bash +docker run --rm -v "$PWD:/work" -w /work -e DATAFED_WEB_TEST_USERNAME="$DATAFED_WEB_TEST_USERNAME" -e DATAFED_WEB_TEST_PASSWORD="$DATAFED_WEB_TEST_PASSWORD" -e DATAFED_DOMAIN="$DATAFED_DOMAIN" -e DISPLAY=host.docker. +internal:0 playwright:latest npx -y playwright test +``` + +NOTE: By default the web tests are setup to run in headless mode but if you +wish to see the web tests as they execute while debugging etc you will need +to edit the configuration in playwright.config.js + +This might need to be specified in the following places +``` + projects: [ + { + name: "chromium", + use: { + ...devices["Desktop Chrome"], + headless: false, // optional: run headed + }, + }, + ] +``` + +``` + use: { + /* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */ + trace: "on-first-retry", + headless: false, + screenshot: "only-on-failure", + +``` + diff --git a/tests/end-to-end/web-UI/CMakeLists.txt b/tests/end-to-end/web-UI/CMakeLists.txt index 4e21e2fc6..cf70c32f5 100644 --- a/tests/end-to-end/web-UI/CMakeLists.txt +++ b/tests/end-to-end/web-UI/CMakeLists.txt @@ -8,7 +8,7 @@ configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/auth.setup.js" @ONLY ) - +message("ENABLE END TO END!!!!!!!") #FIXTHIS # For E2E web ui test if(ENABLE_END_TO_END_WEB_TESTS) diff --git a/tests/end-to-end/web-UI/auth.setup.js.in b/tests/end-to-end/web-UI/auth.setup.js.in index 9d898dacc..eea652183 100644 --- a/tests/end-to-end/web-UI/auth.setup.js.in +++ b/tests/end-to-end/web-UI/auth.setup.js.in @@ -1,69 +1,102 @@ -const { chromium } = require('playwright'); -const path = require('path'); -const process = require('process'); +const { chromium } = require("playwright"); +const path = require("path"); +const process = require("process"); console.log("INFO - ******Inside Setup file******"); module.exports = async function () { // if a playwright page object doesn't exist, create one const browser = await chromium.launch({ - args: ['--ignore-certificate-errors'], + args: ["--ignore-certificate-errors"], headless: true, }); - const page = await browser.newPage(); + const page = await browser.newPage(); console.log("INFO - new page object created"); - + // Extra safety for debugging slow loads - page.on('console', msg => console.log('INFO - [PAGE LOG] ', msg.text())); - page.on('response', res => { - if (res.status() >= 400) - console.log(`ERROR - [HTTP ${res.status()}] ${res.url()}`); + page.on("console", (msg) => console.log("INFO - [PAGE LOG] ", msg.text())); + page.on("response", (res) => { + if (res.status() >= 400) console.log(`ERROR - [HTTP ${res.status()}] ${res.url()}`); }); // --- Step 1: Go to DataFed --- console.log("INFO - 1. Got to DataFed"); - await page.goto('https://@DATAFED_DOMAIN@/ui/welcome', { waitUntil: 'networkidle', timeout: 60000 }); + await page.goto("https://@DATAFED_DOMAIN@/ui/welcome", { + waitUntil: "networkidle", + timeout: 60000, + }); // --- Step 2: Click Login/Register --- console.log("INFO - 2. Login and Register"); - const loginButton = page.getByRole('button', { name: 'Log In / Register' }); - await loginButton.waitFor({ state: 'visible', timeout: 30000 }); + const loginButton = page.getByRole("button", { name: "Log In / Register" }); + await loginButton.waitFor({ state: "visible", timeout: 30000 }); await loginButton.click(); // --- Step 3: Check if link is visible --- console.log("INFO - 3. Check if link is visible"); - const globusLink = page.getByRole('link', { name: /Globus/i }); - await globusLink.waitFor({ state: 'visible', timeout: 30000 }); + const globusLink = page.getByRole("link", { name: /Globus/i }); + await globusLink.waitFor({ state: "visible", timeout: 30000 }); + await page.waitForTimeout(1000); // --- Step 4: Click Globus ID to sign in --- console.log("INFO - 4. Use Globus ID to sign in"); - const globusIDButton = page.getByRole('button', { name: 'Globus ID to sign in' }); - await globusIDButton.waitFor({ state: 'visible', timeout: 30000 }); + const globusIDButton = page.getByRole("button", { name: "Globus ID to sign in" }); + await globusIDButton.waitFor({ state: "visible", timeout: 30000 }); await globusIDButton.click(); + await page.waitForTimeout(1000); // --- Step 5: Wait for Globus redirect --- console.log("INFO - 5. Wait for Globus login to redirect"); - await page.waitForLoadState('networkidle', { timeout: 45000 }); + await page.waitForLoadState("networkidle", { timeout: 45000 }); // --- Step 6: Fill in credentials robustly --- console.log("INFO - 6. Fill in credentials"); const usernameField = page.getByLabel(/username/i); - await usernameField.waitFor({ state: 'visible', timeout: 45000 }); + await usernameField.waitFor({ state: "visible", timeout: 45000 }); await usernameField.fill(process.env.DATAFED_WEB_TEST_USERNAME); const passwordField = page.getByLabel(/password/i); - await passwordField.waitFor({ state: 'visible', timeout: 45000 }); + await passwordField.waitFor({ state: "visible", timeout: 45000 }); await passwordField.fill(process.env.DATAFED_WEB_TEST_PASSWORD); // --- Step 7: Submit form --- await page.click('button[type="submit"]'); - await page.waitForURL('https://@DATAFED_DOMAIN@/ui/main') + await Promise.race([ + page.waitForURL(/\/ui\/main$/, { timeout: 45000 }), + page.waitForURL(/\/ui\/register$/, { timeout: 45000 }), + ]); + + let current = page.url(); + console.log("INFO - 7a Redirected to:", current); + + if (current.includes("/ui/register")) { + console.log("INFO - 7b. Registering:", current); + await page.getByRole('button', { name: 'Continue Registration' }).click(); + } else { + console.log("INFO - 7b. Grabbing fresh url:", current); + if (!page.url().includes("/ui/main")) { + await page.waitForURL("https://@DATAFED_DOMAIN@/ui/main", { timeout: 45000 }); + } + current = page.url(); + console.log("INFO - 7c. URL is: ", current); + } + + await page.screenshot({ path: "after_login.png", fullPage: true }); + if (current.includes("/ui/main")) { + console.log("INFO - 8b Successful: ", current); + } else { + throw new Error(`Unexpected redirect URL: ${current}`); + } + //await Promise.all([ + // page.waitForNavigation({ url: /\/ui\/main/ }), // robust matching + // page.click('button[type="submit"]'), + //]); + //page.screenshot(path="final.png", full_page=True) console.log("INFO - ******PAST LOGIN******"); - await page.context().storageState({ path: './.auth/auth.json'}); //TESTING + await page.context().storageState({ path: "./.auth/auth.json" }); //TESTING console.log("INFO - ******Done with login******"); await browser.close(); }; - diff --git a/tests/end-to-end/web-UI/playwright.config.js b/tests/end-to-end/web-UI/playwright.config.js index cae11dd50..630b3bcba 100644 --- a/tests/end-to-end/web-UI/playwright.config.js +++ b/tests/end-to-end/web-UI/playwright.config.js @@ -50,19 +50,5 @@ module.exports = defineConfig({ ...devices["Desktop Chrome"], }, }, - - // { - // name: 'firefox', - // use: { - // ...devices['Desktop Firefox'], - // }, - // }, - - // { - // name: 'webkit', - // use: { - // ...devices['Desktop Safari'], - // }, - // }, ], }); diff --git a/tests/end-to-end/web-UI/scripts/testingBasicFunction.spec.js b/tests/end-to-end/web-UI/scripts/testingBasicFunction.spec.js index c45c37898..ff516510c 100644 --- a/tests/end-to-end/web-UI/scripts/testingBasicFunction.spec.js +++ b/tests/end-to-end/web-UI/scripts/testingBasicFunction.spec.js @@ -1,54 +1,46 @@ import { test, expect } from "@playwright/test"; // checking visibility and expanding some dropdowns -test("test visibility", async ({ page }) => { - try { - console.log("******Begin test******"); - // Temporary fix - let domain = process.env.DATAFED_DOMAIN; - await page.goto("https://" + domain + "/"); - if (await page.getByRole("button", { name: "Log In / Register" }).isVisible()) { - console.log("NOT LOGGED IN"); +test.describe("DataFed UI Navigation", () => { + test("should display main navigation elements", async ({ page }) => { + const domain = process.env.DATAFED_DOMAIN; + if (!domain) { + throw new Error("DATAFED_DOMAIN environment variable not set"); } - if (await expect(page.getByText("Continue Registration")).toBeVisible()) { - await page.getByText("Continue Registration").click({ timeout: 20000 }); - } - await expect(page.locator(".ui-icon").first()).toBeVisible({ - timeout: 20000, - }); + + await page.goto(`https://${domain}/ui/main`); + + // Verify main elements + await expect(page.locator(".ui-icon").first()).toBeVisible(); await expect(page.getByText("DataFed - Scientific Data")).toBeVisible(); await expect(page.getByRole("link", { name: "My Data" })).toBeVisible(); await expect(page.getByRole("link", { name: "Catalog" })).toBeVisible(); - await expect(page.getByRole("button", { name: "" })).toBeVisible(); + }); + + test("should expand tree navigation items", async ({ page }) => { + const domain = process.env.DATAFED_DOMAIN; + if (!domain) { + throw new Error("DATAFED_DOMAIN environment variable not set"); + } + + await page.goto(`https://${domain}/ui/main`); + + // Define tree items to expand + const treeItems = [ + "Public Collections", + "Allocations", + "Project Data", + "Shared Data", + "Saved Queries", + "By User", + ]; - await page - .getByRole("treeitem", { name: "  Public Collections" }) - .getByRole("button") - .click(); - await page - .getByRole("treeitem", { name: "  Public Collections" }) - .getByRole("group") - .click(); - await page.getByRole("treeitem", { name: "  Allocations" }).getByRole("button").click(); - await page.getByRole("treeitem", { name: "  Project Data" }).getByRole("button").click(); - await page.getByRole("treeitem", { name: "  Shared Data" }).getByRole("button").click(); - await page - .getByRole("treeitem", { name: "  Saved Queries" }) - .locator("span") - .first() - .click(); - await page.getByRole("treeitem", { name: "  Saved Queries" }).getByRole("button").click(); - await page.getByText("Provenance Annotate Upload").click({ timeout: 20000 }); - await page.getByRole("treeitem", { name: "  By User" }).getByRole("button").click(); - } catch (error) { - // element not visible, either the test broke due to tags changing, or not logged in - // try to log out, because if not logged out, future tests will fail due to globus being annoying - if (await page.getByRole("button", { name: "" }).isVisible()) { - await page.getByRole("button", { name: "" }).click(); - } else { - // if in here, check if you logged out properly - throw error; + for (const item of treeItems) { + const treeItem = page.getByRole("treeitem", { name: new RegExp(item) }); + const button = treeItem.getByRole("button").first(); + await expect(button).toBeVisible(); + await button.click(); + // Add assertion that it expanded if needed } - } - //removed logout + }); }); diff --git a/tests/end-to-end/web-UI/scripts/testingUserPasswordChange.spec.js b/tests/end-to-end/web-UI/scripts/testingUserPasswordChange.spec.js new file mode 100644 index 000000000..dbbc8f48d --- /dev/null +++ b/tests/end-to-end/web-UI/scripts/testingUserPasswordChange.spec.js @@ -0,0 +1,73 @@ +import { test, expect } from "@playwright/test"; + +// checking visibility and expanding some dropdowns +test.describe("DataFed UI password change", () => { + // Since auth is handled by global setup, we start from authenticated state + test.beforeEach(async ({ page }) => { + const domain = process.env.DATAFED_DOMAIN || "domain.com"; + + // Navigate to main page (already authenticated via storageState) + await page.goto(`https://${domain}/ui/main`, { + waitUntil: "networkidle", + }); + + // Verify we're on the main page and authenticated + await expect(page).toHaveURL(new RegExp(`https://${domain}/ui/main`)); + + // Wait for main UI to be ready + await page + .waitForSelector('[data-testid="main-content"], .main-content, #main', { + state: "visible", + timeout: 3000, + }) + .catch(() => { + // Fallback: just wait for any main element + return page.waitForTimeout(2000); + }); + + // Log current page state for debugging + console.log(`INFO - Test starting on: ${page.url()}`); + }); + + test("should display main page after entering password", async ({ page }) => { + await test.step("Open settings menu", async () => { + const button = page.locator("#btn_settings"); + + if (await button.isVisible()) { + console.log("INFO - Button is visible (btn_settings)"); + await button.click(); + } else { + console.log("INFO - Button is not visible (btn_settings)"); + } + }); + + await test.step("Enter password and confirmation password", async () => { + // Define locators for the elements + const newPasswordInput = page.locator("#cli_new_pw"); + const confirmPasswordInput = page.locator("#cli_confirm_pw"); + const revokeButton = page.locator("#btn_revoke_cred"); + const saveButton = page.getByRole("button", { name: "Save" }); + + // Wait for all elements to be visible + await Promise.all([ + newPasswordInput.waitFor({ state: "visible", timeout: 5000 }), + confirmPasswordInput.waitFor({ state: "visible", timeout: 5000 }), + revokeButton.waitFor({ state: "visible", timeout: 5000 }), + saveButton.waitFor({ state: "visible", timeout: 5000 }), + ]); + await newPasswordInput.click(); + await newPasswordInput.fill("Terrible2s!!!"); + await confirmPasswordInput.click(); + await confirmPasswordInput.fill("Terrible2s!!!"); + await saveButton.click(); + // Make sure an error does not appear. + // Unfortunately it take a while for the error to show up + await page.waitForTimeout(15000); + + await page.screenshot({ path: "change-password-has-error.png", fullPage: true }); + + // These WILL fail if error appears + await expect(page.getByText("Save Settings Error")).not.toBeVisible(); + }); + }); +}); From fb1a80008bfb77fae7e83c6e36a3658c4b76ed7d Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Mon, 1 Dec 2025 07:14:59 -0500 Subject: [PATCH 03/65] [DAPS-1774] - Core, Python, Database, Foxx, Test add query end to end tests (#1779) * [DAPS-1775] - fix: core, foxx, add missing {}, foxx query_router add params object schema to routes. (#1781) * [DAPS-1777] - fix: foxx, user_router fix regression in missing response. (#1778) * [DAPS-1786] - refactor: web tests, add test for hitting password reset. (#1787) * [DAPS-1277] - fix: mock, core, common, PROXY_BASIC_ZMQ and PROXY_CUSTOM correctly defined * [DAPS-1790] - fix: common, core, repo, zmq assertion failure during EXCEPT call due to callin zmq_msg with invalid state after closing it. * [DAPS-1791] - fix: build, python client, requirements.txt was being moved to a folder named requirements.txt during cmake configure script. --- CMakeLists.txt | 2 +- common/source/ServerFactory.cpp | 3 +- .../communicators/ZeroMQCommunicator.cpp | 3 +- core/database/foxx/api/query_router.js | 16 +- core/server/CoreServer.cpp | 4 +- core/server/DatabaseAPI.cpp | 4 +- tests/end-to-end/CMakeLists.txt | 3 + tests/end-to-end/test_api_query.py | 253 ++++++++++++++++++ tests/mock_core/MockCoreServer.cpp | 2 +- 9 files changed, 271 insertions(+), 19 deletions(-) create mode 100755 tests/end-to-end/test_api_query.py diff --git a/CMakeLists.txt b/CMakeLists.txt index ac472ce8e..e04ea9cb6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -200,7 +200,7 @@ endif() if( BUILD_PYTHON_CLIENT ) # make target = pydatafed - file(COPY ${PROJECT_SOURCE_DIR}/external/DataFedDependencies/python/datafed_pkg/requirements.txt DESTINATION ${PROJECT_SOURCE_DIR}/python/datafed_pkg/requirements.txt) + file(COPY ${PROJECT_SOURCE_DIR}/external/DataFedDependencies/python/datafed_pkg/requirements.txt DESTINATION ${PROJECT_SOURCE_DIR}/python/datafed_pkg/) add_subdirectory( python EXCLUDE_FROM_ALL ) endif() diff --git a/common/source/ServerFactory.cpp b/common/source/ServerFactory.cpp index b76764987..6a810e036 100644 --- a/common/source/ServerFactory.cpp +++ b/common/source/ServerFactory.cpp @@ -1,6 +1,7 @@ // Local private includes #include "servers/Proxy.hpp" +#include "servers/ProxyBasicZMQ.hpp" // Local public includes #include "common/IServer.hpp" @@ -39,7 +40,7 @@ std::unique_ptr ServerFactory::create( "been provided that will never be used!"); } return std::unique_ptr( - new Proxy(socket_options, socket_credentials, m_log_context)); + new ProxyBasicZMQ(socket_options, socket_credentials, m_log_context)); } EXCEPT_PARAM(1, "Error Server type unsupported"); diff --git a/common/source/communicators/ZeroMQCommunicator.cpp b/common/source/communicators/ZeroMQCommunicator.cpp index c2d402fe4..8ada24922 100644 --- a/common/source/communicators/ZeroMQCommunicator.cpp +++ b/common/source/communicators/ZeroMQCommunicator.cpp @@ -378,10 +378,11 @@ void receiveBody(IMessage &msg, Buffer &buffer, ProtoBufFactory &factory, if (frame_size > 0) { if (zmq_msg_size(&zmq_msg) != frame_size) { + size_t msg_size = zmq_msg_size(&zmq_msg); zmq_msg_close(&zmq_msg); EXCEPT_PARAM(1, "RCV Invalid message body received. Expected: " << frame_size - << ", got: " << zmq_msg_size(&zmq_msg)); + << ", got: " << msg_size); } copyToBuffer(buffer, zmq_msg_data(&zmq_msg), frame_size); diff --git a/core/database/foxx/api/query_router.js b/core/database/foxx/api/query_router.js index 30de93454..88e3accb4 100644 --- a/core/database/foxx/api/query_router.js +++ b/core/database/foxx/api/query_router.js @@ -66,8 +66,6 @@ router g_lib.procInputParam(req.body, "title", false, obj); - //console.log("qry/create filter:",obj.qry_filter); - var qry = g_db.q.save(obj, { returnNew: true, }).new; @@ -124,7 +122,7 @@ router qry_begin: joi.string().required(), qry_end: joi.string().required(), qry_filter: joi.string().allow("").required(), - params: joi.any().required(), + params: joi.object().required(), limit: joi.number().integer().required(), query: joi.any().required(), }) @@ -169,6 +167,7 @@ router qry.qry_begin = req.body.qry_begin; qry.qry_end = req.body.qry_end; qry.qry_filter = req.body.qry_filter; + qry.params = req.body.params; qry.limit = req.body.limit; qry.query = req.body.query; @@ -178,7 +177,6 @@ router qry.params.cols = null; }*/ - //console.log("qry/upd filter:",obj.qry_filter); qry = g_db._update(qry._id, qry, { mergeObjects: false, returnNew: true, @@ -231,7 +229,7 @@ router qry_begin: joi.string().required(), qry_end: joi.string().required(), qry_filter: joi.string().allow("").required(), - params: joi.any().required(), + params: joi.object().required(), limit: joi.number().integer().required(), query: joi.any().required(), }) @@ -597,10 +595,6 @@ function execQuery(client, mode, published, orig_query) { qry += query.qry_end; - //console.log( "execqry" ); - //console.log( "qry", qry ); - //console.log( "params", query.params ); - // Enforce query paging limits if (query.params.cnt > g_lib.MAX_PAGE_SIZE) { query.params.cnt = g_lib.MAX_PAGE_SIZE; @@ -725,7 +719,7 @@ router const query = { ...req.body, - params: JSON.parse(req.body.params), + params: req.body.params, }; results = execQuery(client, req.body.mode, req.body.published, query); @@ -762,7 +756,7 @@ router qry_begin: joi.string().required(), qry_end: joi.string().required(), qry_filter: joi.string().optional().allow(""), - params: joi.string().required(), + params: joi.object().required(), limit: joi.number().integer().required(), }) .required(), diff --git a/core/server/CoreServer.cpp b/core/server/CoreServer.cpp index fab7f9f0b..f4caec0f6 100644 --- a/core/server/CoreServer.cpp +++ b/core/server/CoreServer.cpp @@ -241,7 +241,7 @@ void Server::msgRouter(LogContext log_context, int thread_count) { } ServerFactory server_factory(log_context); - auto proxy = server_factory.create(ServerType::PROXY_BASIC_ZMQ, + auto proxy = server_factory.create(ServerType::PROXY_CUSTOM, socket_options, socket_credentials); // Ceate worker threads @@ -439,7 +439,7 @@ void Server::ioInsecure(LogContext log_context, int thread_count) { } ServerFactory server_factory(log_context); - auto proxy = server_factory.create(ServerType::PROXY_BASIC_ZMQ, + auto proxy = server_factory.create(ServerType::PROXY_CUSTOM, socket_options, socket_credentials); proxy->run(); diff --git a/core/server/DatabaseAPI.cpp b/core/server/DatabaseAPI.cpp index aa0d35905..43ce988a3 100644 --- a/core/server/DatabaseAPI.cpp +++ b/core/server/DatabaseAPI.cpp @@ -1308,7 +1308,7 @@ void DatabaseAPI::generalSearch(const Auth::SearchRequest &a_request, payload["qry_begin"] = qry_begin; payload["qry_end"] = qry_end; payload["qry_filter"] = qry_filter; - payload["params"] = "{" + params + "}"; + payload["params"] = params; payload["limit"] = to_string(cnt); string body = payload.dump(-1, ' ', true); @@ -3942,7 +3942,7 @@ uint32_t DatabaseAPI::parseSearchRequest(const Auth::SearchRequest &a_request, a_qry_begin = a_qry_begin; a_qry_end = a_qry_end; a_qry_filter = a_qry_filter; - + a_params = "{" + a_params + "}"; return cnt; } diff --git a/tests/end-to-end/CMakeLists.txt b/tests/end-to-end/CMakeLists.txt index 37de8b8b7..8a107fdbd 100644 --- a/tests/end-to-end/CMakeLists.txt +++ b/tests/end-to-end/CMakeLists.txt @@ -10,6 +10,7 @@ if( ENABLE_END_TO_END_API_TESTS ) add_test(NAME end_to_end_alloc COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/test_api_alloc.py") add_test(NAME end_to_end_collection COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/test_api_collection.py") add_test(NAME end_to_end_record COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/test_api_record.py") + add_test(NAME end_to_end_query COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/test_api_query.py") # Note because these tests are all using the same database we cannot run most of them concurrently # They must be run sequentially so that concurrent API calls do not create problems @@ -26,5 +27,7 @@ if( ENABLE_END_TO_END_API_TESTS ) set_tests_properties(end_to_end_alloc PROPERTIES FIXTURES_SETUP FIX_ALLOC) set_tests_properties(end_to_end_collection PROPERTIES FIXTURES_REQUIRED FIX_ALLOC) set_tests_properties(end_to_end_record PROPERTIES FIXTURES_REQUIRED FIX_ALLOC) + set_tests_properties(end_to_end_record PROPERTIES FIXTURES_SETUP FIX_RECORD) + set_tests_properties(end_to_end_query PROPERTIES FIXTURES_REQUIRED FIX_RECORD) endif() diff --git a/tests/end-to-end/test_api_query.py b/tests/end-to-end/test_api_query.py new file mode 100755 index 000000000..4d740593e --- /dev/null +++ b/tests/end-to-end/test_api_query.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 +# WARNING - to work with python environments we cannot use /bin/python3 or +# a hardcoded abs path. +import json +import os +import sys +import time +import unittest + +# Depends on the success of the following tests +# 1. Login +# 2. Repo +# 3. Allocation +# 4. Collection + + +class TestDataFedPythonAPIQueryCRUD(unittest.TestCase): + def setUp(self): + path_of_file = os.path.abspath(__file__) + current_folder = os.path.dirname(path_of_file) + path_to_python_datafed_module = os.path.normpath( + current_folder + + os.sep + + ".." + + os.sep + + ".." + + os.sep + + "python/datafed_pkg" + ) + sys.path.insert(0, path_to_python_datafed_module) + try: + from datafed.CommandLib import API + except ImportError: + print( + "datafed was not found, make sure you are running script with " + "PYTHONPATH set to the location of the package in the datafed repo" + ) + sys.exit(1) + + from datafed import version as df_ver + + print(df_ver) + + datafed_domain = os.environ.get("DATAFED_DOMAIN") + opts = {"server_host": datafed_domain} + + if datafed_domain is None: + print("DATAFED_DOMAIN must be set before the end-to-end tests can be run") + sys.exit(1) + + self._df_api = API(opts) + + self._username = "datafed89" + password = os.environ.get("DATAFED_USER89_PASSWORD") + + self._timeout = int(os.environ.get('DATAFED_TEST_TIMEOUT_OVERRIDE', '1')) + count = 0 + while True: + try: + self._df_api.loginByPassword(self._username, password) + break + except BaseException: + pass + count += 1 + # Try three times to authenticate + assert count < 3 + + path_to_repo_form = os.environ.get("DATAFED_REPO_FORM_PATH") + if path_to_repo_form is None: + self.fail("DATAFED_REPO_FORM_PATH env variable is not defined") + + if not path_to_repo_form.endswith(".json"): + self.fail( + "repo create test requires that the repo form exist and be " + "provided as a json file, the test uses the environment " + "variable DATAFED_REPO_PATH to search for the repo form" + ) + + self._repo_form = {} + with open(path_to_repo_form) as json_file: + self._repo_form = json.load(json_file) + + if len(self._repo_form["exp_path"]) == 0: + print( + "exp_path is empty, we will set it to / for the test. This is " + "cruft and should be removed anyway" + ) + self._repo_form["exp_path"] = "/" + + self._repo_form["admins"] = ["u/" + self._username] + + # Create the repositories + result = self._df_api.repoCreate( + repo_id=self._repo_form["id"], + title=self._repo_form["title"], + desc=self._repo_form["desc"], + domain=self._repo_form["domain"], + capacity=self._repo_form["capacity"], + pub_key=self._repo_form["pub_key"], + address=self._repo_form["address"], + endpoint=self._repo_form["endpoint"], + path=self._repo_form["path"], + exp_path=self._repo_form["exp_path"], + admins=self._repo_form["admins"], + ) + + result = self._df_api.repoList(list_all=True) + count = 0 + while len(result[0].repo) == 0: + time.sleep(self._timeout) + result = self._df_api.repoList(list_all=True) + count = count + 1 + if count > 3: + self.fail("Setup failed with repo create") + + self._repo_id = self._repo_form["id"] + if not self._repo_id.startswith("repo/"): + self._repo_id = "repo/" + self._repo_id + + # Will return a task + result = self._df_api.repoAllocationCreate( + repo_id=self._repo_id, + subject=self._username, + data_limit=1000000000, + rec_limit=100, + ) + + task_id = result[0].task[0].id + + # Check the status of the task + task_result = self._df_api.taskView(task_id) + + # If status is less than 3 it is in the works + status = task_result[0].task[0].status + count = 0 + while status < 3: + if count > 2: + print(task_result) + self.fail( + "Something went wrong task was unable to complete, attempt " + "to create an allocation after 3 seconds failed, make sure " + "all services are running." + ) + break + time.sleep(self._timeout) + task_result = self._df_api.taskView(task_id) + status = task_result[0].task[0].status + count = count + 1 + + parameters = { + "testing_tempareture": 900000, + "voltage": [1, 2, -4, 7.123], + "creator": "Lex Luther", + "occupation": "super villan", + "equipment_serial_numbers": {"SEM": 14, "AFM": 9134} + } + + title = "Adamantium" + alias = "adamantium" + record = self._df_api.dataCreate( + title=title, + alias=alias, + metadata=json.dumps(parameters), + tags=["material"], + parent_id="root", + ) + + print("Created record") + print(record) + + def test_query_create_delete(self): + + search_query = self._df_api.queryCreate(title="Search for Adamantium", owner="u/" + self._username, coll=["root"], meta="md.creator == 'Lex Luther'") + + print("Search query create") + print(search_query) + + query_result = self._df_api.queryExec(search_query[0].id) + print("Query result") + print(query_result) + + material = "" + for model in query_result[0].item: + if model.alias.startswith("adamantium"): + material = model.alias + + print(f"Query found {material}") + + self.assertEqual(material, "adamantium") + +# self._df_api.queryDelete(search_query[0].id) + +# def tearDown(self): +# +# task_result = self._df_api.dataDelete("adamantium") +# +# status = task_result[0].task[0].status +# count = 0 +# while status < 3: +# if count > 20: +# break +# time.sleep(self._timeout) +# task_result = self._df_api.taskView(task_result[0].task[0].id) +# status = task_result[0].task[0].status +# count = count + 1 +# +# print("Delete record result.") +# print(task_result) +# +# result = self._df_api.repoAllocationDelete( +# repo_id=self._repo_id, subject=self._username +# ) +# +# task_id = result[0].task[0].id +# +# # Check the status of the task +# task_result = self._df_api.taskView(task_id) +# +# # If status is less than 3 it is in the works +# status = task_result[0].task[0].status +# count = 0 +# while status < 3: +# if count > 2: +# print(task_result) +# self.fail( +# "Something went wrong task was unable to complete, attempt" +# " to delete an allocation after 3 seconds failed, make sure" +# " all services are running." +# ) +# break +# time.sleep(self._timeout) +# task_result = self._df_api.taskView(task_id) +# status = task_result[0].task[0].status +# count = count + 1 +# +# print("Delete Allocations") +# print(result) +# +# repo_id = self._repo_form["id"] +# if not repo_id.startswith("repo/"): +# repo_id = "repo/" + repo_id +# result = self._df_api.repoDelete(repo_id) +# result = self._df_api.repoList(list_all=True) + + +if __name__ == "__main__": + suite = unittest.TestSuite() + # Add them in the order they should be executed + suite.addTest(TestDataFedPythonAPIQueryCRUD("test_query_create_delete")) + runner = unittest.TextTestRunner() + result = runner.run(suite) + # wasSuccessful() return True which is not 0 + sys.exit(not result.wasSuccessful()) diff --git a/tests/mock_core/MockCoreServer.cpp b/tests/mock_core/MockCoreServer.cpp index 3195e1676..3025faaef 100644 --- a/tests/mock_core/MockCoreServer.cpp +++ b/tests/mock_core/MockCoreServer.cpp @@ -196,7 +196,7 @@ void Server::msgRouter(LogContext log_context, int thread_count) { } ServerFactory server_factory(log_context); - auto proxy = server_factory.create(ServerType::PROXY_BASIC_ZMQ, + auto proxy = server_factory.create(ServerType::PROXY_CUSTOM, socket_options, socket_credentials); // Ceate worker threads From a0145e31841793d6227185fcec9bb5c00d20d52b Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Mon, 1 Dec 2025 12:15:29 +0000 Subject: [PATCH 04/65] chore: Auto-format JavaScript files with Prettier --- docs/_static/js/html5shiv-printshiv.min.js | 3 -- web/static/ace/worker-coffee.js | 42 +++++++--------------- 2 files changed, 12 insertions(+), 33 deletions(-) diff --git a/docs/_static/js/html5shiv-printshiv.min.js b/docs/_static/js/html5shiv-printshiv.min.js index 72406b792..ae10bdff2 100644 --- a/docs/_static/js/html5shiv-printshiv.min.js +++ b/docs/_static/js/html5shiv-printshiv.min.js @@ -88,7 +88,6 @@ f = RegExp("^(?:" + d().join("|") + ")$", "i"), g = []; e--; - ) ((b = c[e]), f.test(b.nodeName) && g.push(b.applyElement(l(b)))); return g; @@ -100,7 +99,6 @@ d = c.length, e = a.ownerDocument.createElement(A + ":" + a.nodeName); d--; - ) ((b = c[d]), b.specified && e.setAttribute(b.nodeName, b.nodeValue)); return ((e.style.cssText = a.style.cssText), e); @@ -113,7 +111,6 @@ f = RegExp("(^|[\\s,>+~])(" + d().join("|") + ")(?=[[\\s,>+~#.:]|$)", "gi"), g = "$1" + A + "\\:$2"; e--; - ) ((b = c[e] = c[e].split("}")), (b[b.length - 1] = b[b.length - 1].replace(f, g)), diff --git a/web/static/ace/worker-coffee.js b/web/static/ace/worker-coffee.js index 2692b5bca..7400d29a4 100644 --- a/web/static/ace/worker-coffee.js +++ b/web/static/ace/worker-coffee.js @@ -1646,7 +1646,6 @@ for ( i = this.tokens, n = 0; (r = i[n]); - ) n += t.call(this, r, n, i); return !0; @@ -1668,7 +1667,6 @@ for ( h = this.tokens, o = 0; (c = h[n]); - ) { if ( 0 === o && @@ -1914,7 +1912,6 @@ .generated) && ((c = this.tag(n)), 0 > t.call(v, c)))); - ) (((s = this.tag(n)), 0 <= t.call(a, s)) && @@ -2287,7 +2284,6 @@ (C() && ":" !== H); - ) T() ? g() @@ -2746,7 +2742,6 @@ ? 1 : 0; C(); - ) y(o + P); return w(1); @@ -2813,7 +2808,6 @@ u !== i.length && ((l = i[u][0]), 0 <= t.call(o, l)); - ) u++; if ( @@ -2852,7 +2846,6 @@ -1 !== s && ((u = i[s][0]), 0 <= t.call(o, u)); - ) s--; return -1 === s || @@ -2892,7 +2885,6 @@ .length - 1; -1 !== a; - ) (!1 === e @@ -2946,7 +2938,6 @@ .length - 1; -1 !== a; - ) (!e .comments[ @@ -3986,7 +3977,6 @@ t = this.clean(t), s = 0; (this.chunk = t.slice(s)); - ) { r = this.identifierToken() || @@ -4536,7 +4526,6 @@ return n; })().join("#{}"); (p = E.exec(u)); - ) ((s = p[1]), (null === c || @@ -5377,7 +5366,6 @@ (null == (i = this.ends[n]) ? void 0 : i.tag) || 0 < t--; - ) n--; return ( @@ -5558,7 +5546,6 @@ n = s[--t], n[0] = "PARAM_END"; (i = s[--t]); - ) switch (i[0]) { case ")": @@ -25711,7 +25698,6 @@ o, ) ); - ) i++; return ( @@ -26488,7 +26474,6 @@ for ( t = this; t !== (t = t.unwrap()); - ) continue; return t; @@ -26767,7 +26752,6 @@ for ( r = this.expressions.length; r--; - ) { ((n = this.expressions[r]), (this.expressions[r] = @@ -26926,8 +26910,8 @@ this, null, null == - (o = - t.referencedVars) + (o = + t.referencedVars) ? [] : o, ), @@ -30954,7 +30938,6 @@ ); }; (t = y[r]); - ) ((c = this.addInitializerExpression( @@ -31327,7 +31310,6 @@ r instanceof Pt && r.isString() ); - ) if (r.hoisted) i++; else { @@ -32965,7 +32947,8 @@ .properties[0] .name : new ct( - s.unwrap().value, + s.unwrap() + .value, )), (f = c.unwrap() instanceof @@ -34754,7 +34737,8 @@ ? r.expressions.unshift( new D( new ft( - this.guard, + this + .guard, ).invert(), new wt( "continue", @@ -34764,7 +34748,8 @@ : this.guard && (r = f.wrap([ new D( - this.guard, + this + .guard, r, ), ]))), @@ -34946,7 +34931,6 @@ for ( t = !0, n = this; n && n.operator; - ) (t && (t = @@ -34960,7 +34944,6 @@ for ( n = this; n && n.operator; - ) ((n.invert = !n.invert), (n.operator = @@ -35671,7 +35654,8 @@ : void 0, this.recovery.unshift( new o( - this.errorVariable, + this + .errorVariable, u, ), )) @@ -36629,7 +36613,8 @@ ? s.expressions.unshift( new D( new ft( - this.guard, + this + .guard, ).invert(), new wt( "continue", @@ -37370,7 +37355,6 @@ for ( var n; !((n = this.columns[t]) || 0 >= t); - ) t--; return n && [n.sourceLine, n.sourceColumn]; @@ -37422,7 +37406,6 @@ (s = this.lines[r]) || 0 >= r ); - ) r--; return s && s.sourceLocation(i); @@ -37544,7 +37527,6 @@ a = 0 > t ? 1 : 0, f = (_Mathabs(t) << 1) + a; f || !n; - ) ((u = f & s), (f >>= i), From 65733df98145056c9d170653ef5b4d0fdd8b9679 Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Tue, 2 Dec 2025 13:31:23 -0500 Subject: [PATCH 05/65] [DAPS-1522] - refactor: foxx, Note Router Logging Improvements (#1796) --- core/database/CMakeLists.txt | 2 + core/database/foxx/api/note_router.js | 265 +++++++++++++-- core/database/foxx/tests/note_router.test.js | 327 +++++++++++++++++++ docs/_static/js/html5shiv-printshiv.min.js | 3 - web/static/ace/worker-coffee.js | 42 +-- 5 files changed, 584 insertions(+), 55 deletions(-) create mode 100644 core/database/foxx/tests/note_router.test.js diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index b4ebeb975..b6db95f97 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -28,6 +28,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_version COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version:") add_test(NAME foxx_support COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_support:") add_test(NAME foxx_user_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_router:") + add_test(NAME foxx_note_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_note_router:") add_test(NAME foxx_version_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version_router:") add_test(NAME foxx_tag_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_tag_router:") add_test(NAME foxx_task_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_task_router:") @@ -55,6 +56,7 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_validation_repo PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_path PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_user_router PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") + set_tests_properties(foxx_note_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_version_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_tag_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_query_router PROPERTIES FIXTURES_REQUIRED Foxx) diff --git a/core/database/foxx/api/note_router.js b/core/database/foxx/api/note_router.js index 445ed28d1..5eb4ab54d 100644 --- a/core/database/foxx/api/note_router.js +++ b/core/database/foxx/api/note_router.js @@ -7,14 +7,18 @@ const g_db = require("@arangodb").db; const g_lib = require("./support"); const error = require("./lib/error_codes"); const permissions = require("./lib/permissions"); - +const basePath = "note"; +const logger = require("./lib/logger"); module.exports = router; //==================== ACL API FUNCTIONS router .post("/create", function (req, res) { - console.log("note/create"); + let client = null; + let result = {}; + let doc; + let _key, _rev; try { g_db._executeTransaction({ collections: { @@ -22,9 +26,17 @@ router write: ["d", "n", "note"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); - var id = g_lib.resolveDataCollID(req.queryParams.subject, client), - doc = g_db._document(id); + client = g_lib.getUserFromClientID(req.queryParams.client); + var id = g_lib.resolveDataCollID(req.queryParams.subject, client); + doc = g_db._document(id); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Started", + description: "Create an annotation on an object " + req.queryParams.subject, + }); if (!permissions.hasAdminPermObject(client, id)) { if ( @@ -87,9 +99,32 @@ router results: [note.new], updates: Object.values(updates), }); + ({ _key, _rev, ...result } = doc); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Success", + description: "Create an annotation on an object " + req.queryParams.subject, + extra: result, + }); }, }); } catch (e) { + if (doc) { + ({ _key, _rev, ...result } = doc); + } + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Failure", + description: "Create an annotation on an object " + req.queryParams.subject, + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -108,7 +143,8 @@ router router .post("/update", function (req, res) { - console.log("note/update"); + let client = null; + let result, doc, _key, _rev; try { g_db._executeTransaction({ collections: { @@ -116,7 +152,15 @@ router write: ["d", "n", "note"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Started", + description: "Update annotation " + req.queryParams.id, + }); if (!req.queryParams.id.startsWith("n/")) throw [ @@ -232,9 +276,32 @@ router results: [note], updates: Object.values(updates), }); + ({ _key, _rev, ...result } = doc); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Success", + description: "Update annotation " + req.queryParams.id, + extra: result, + }); }, }); } catch (e) { + if (doc) { + ({ _key, _rev, ...result } = doc); + } + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Failure", + description: "Update annotation " + req.queryParams.id, + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -253,6 +320,8 @@ router router .post("/comment/edit", function (req, res) { + let client = null; + let note = null; try { g_db._executeTransaction({ collections: { @@ -260,7 +329,19 @@ router write: ["n"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/comment/edit", + status: "Started", + description: + "Edit annotation comment " + + req.queryParams.id + + " Comment ID:" + + req.queryParams.comment_idx, + }); if (!req.queryParams.id.startsWith("n/")) throw [ @@ -274,7 +355,7 @@ router "Annotaion ID '" + req.queryParams.id + "' does not exist.", ]; - var note = g_db.n.document(req.queryParams.id); + note = g_db.n.document(req.queryParams.id); if (req.queryParams.comment_idx >= note.comments.length) throw [error.ERR_INVALID_PARAM, "Comment index out of range."]; @@ -300,9 +381,40 @@ router res.send({ results: [note.new], }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/comment/edit", + status: "Success", + description: + "Edit annotation comment " + + req.queryParams.id + + " Comment ID:" + + req.queryParams.comment_idx, + extra: { + title: note.new.title, + creator: note.new.creator, + comments: note.new.comments[req.queryParams.comment_idx], + }, + }); }, }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/comment/edit", + status: "Failure", + description: + "Edit an annotation comment " + + req.queryParams.id + + " Comment ID:" + + req.queryParams.comment_idx, + extra: note.new, + error: e, + }); g_lib.handleException(e, res); } }) @@ -315,8 +427,17 @@ router router .get("/view", function (req, res) { + let client = null; try { - const client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: "View annotation " + req.queryParams.id, + }); if (!req.queryParams.id.startsWith("n/")) throw [ @@ -369,7 +490,39 @@ router res.send({ results: [note], }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Success", + description: "View annotation " + req.queryParams.id, + extra: { + title: note?.title, + creator: note?.creator, + lastComment: Array.isArray(note.comments) + ? note.comments[note.comments.length - 1] || null + : null, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: "View annotation " + req.queryParams.id, + extra: { + title: note?.title, + creator: note?.creator, + lastComment: Array.isArray(note.comments) + ? note.comments[note.comments.length - 1] || null + : null, + }, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -380,11 +533,20 @@ router router .get("/list/by_subject", function (req, res) { + let client = null; + let results = null; try { - const client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/by_subject", + status: "Started", + description: "List annotations by subject " + req.queryParams.subject, + }); - var results, - qry, + var qry, id = g_lib.resolveDataCollID(req.queryParams.subject, client); if (!client) { @@ -411,7 +573,26 @@ router res.send({ results: results.toArray(), }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/by_subject", + status: "Success", + description: "List annotations by subject " + req.queryParams.subject, + extra: { found: results?._countTotal }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/by_subject", + status: "Failure", + description: "List annotations by subject " + req.queryParams.subject, + extra: { found: results?._countTotal }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -422,7 +603,20 @@ router router .get("/purge", function (req, res) { + let client = null; + let id = null; + const purgedIds = []; try { + client = req.queryParams?.client; + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/purge", + status: "Started", + description: "Purge old closed annotations older than " + req.queryParams.age_sec, + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn"], @@ -432,23 +626,50 @@ router //console.log("note purge, age:", req.queryParams.age_sec ); var t = Date.now() / 1000 - req.queryParams.age_sec; - var id, - notes = g_db._query( - "for i in n filter i.state == " + - g_lib.NOTE_CLOSED + - " && i.ut < " + - t + - " and i.parent_id == null return i._id", - ); + + //maybe id = below + var notes = g_db._query( + "for i in n filter i.state == " + + g_lib.NOTE_CLOSED + + " && i.ut < " + + t + + " and i.parent_id == null return i._id", + ); while (notes.hasNext()) { id = notes.next(); - console.log("purging", id); + purgedIds.push(id); + // This will also delete all dependent annotations g_lib.annotationDelete(id); } }, }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/purge", + status: "Success", + description: + "Purge old closed annotations older than " + + req.queryParams.age_sec + + "seconds.", + extra: `Ids of purged notes: ${purgedIds.join(", ")}`, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/purge", + status: "Failure", + description: + "Purge old closed annotations older than " + + req.queryParams.age_sec + + "seconds.", + extra: { last_purged_note: id }, + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/note_router.test.js b/core/database/foxx/tests/note_router.test.js new file mode 100644 index 000000000..9aac6dc7d --- /dev/null +++ b/core/database/foxx/tests/note_router.test.js @@ -0,0 +1,327 @@ +"use strict"; +// NOTE: completion of tests requires successful run of user_fixture.js script + +// Need to pull enum from support +const g_lib = require("../api/support"); + +// Integration test of API +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const { db } = require("@arangodb"); + +const note_base_url = `${baseUrl}/note`; + +describe("unit_note_router: the Foxx microservice note_router /create endpoint", () => { + after(function () { + const collections = ["note", "d", "u"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); + }); + + beforeEach(() => { + const collections = [ + { name: "u", type: "document" }, + { name: "d", type: "document" }, + { name: "note", type: "edge" }, // must be edge + ]; + + collections.forEach(({ name, type }) => { + const col = db._collection(name); + if (col) { + col.truncate(); + } else { + db._create(name, { type: type === "edge" ? 3 : 2 }); + } + }); + }); + + it("should successfully run the search route", () => { + const user = db.u.save({ + _key: "testUser", + _id: "u/testUser", + name: "Test User", + email: "testuser@example.com", + is_admin: true, + }); + + const data = db.d.save({ + _key: "ID", + _id: "d/ID", + owner: user._id, + }); + + // Prepare the request + const request_string = `${note_base_url}/create?client=${encodeURIComponent( + user._id, + )}&subject=${encodeURIComponent(data._id)}&type=1&title=UnitTestTitle&comment=UnitTestComment`; + + // act + const response = request.post(request_string); + // assert + expect(response.status).to.equal(200); + }); + + it("should successfully update an existing annotation", () => { + // Arrange + const user = db.u.save({ + _key: "testUser", + _id: "u/testUser", + name: "Test User", + email: "testuser@example.com", + is_admin: true, + }); + + const data = db.d.save({ + _key: "ID", + _id: "d/ID", + owner: user._id, + }); + + // Create a note record (edge + document) + const note_doc = db.n.save({ + type: 1, + state: 0, + title: "OldTitle", + creator: user._id, + comments: [], + }); + + db.note.save({ + _from: data._id, + _to: note_doc._id, + }); + + // Act — call /update + const request_string = `${note_base_url}/update?client=${encodeURIComponent(user._id)}&id=${encodeURIComponent(note_doc._id)}&new_state=1&new_title=UpdatedTitle&comment=UpdatedComment`; + + const response = request.post(request_string); + + // Assert + expect(response.status).to.equal(200); + const body = JSON.parse(response.body); + expect(body.results[0]._id).to.equal(note_doc._id); + expect(body.results[0].state).to.equal(1); + }); + + it("should successfully edit an annotation comment", () => { + // Arrange: create user and data + const user = db.u.save({ + _key: "testUser", + _id: "u/testUser", + name: "Test User", + email: "testuser@example.com", + is_admin: true, + }); + + const data = db.d.save({ + _key: "ID", + _id: "d/ID", + owner: user._id, + }); + + // Create a note document with one comment + const note_doc = db.n.save({ + type: 1, + state: 0, + title: "Test Note", + creator: user._id, + comments: [ + { + user: user._id, + time: Math.floor(Date.now() / 1000), + comment: "Original Comment", + }, + ], + }); + + db.note.save({ + _from: data._id, + _to: note_doc._id, + }); + + // Act: edit the existing comment + const newComment = "Edited Comment"; + const request_string = `${note_base_url}/comment/edit?client=${encodeURIComponent(user._id)}&id=${encodeURIComponent(note_doc._id)}&comment=${encodeURIComponent(newComment)}&comment_idx=0`; + + const response = request.post(request_string); + + // Assert + expect(response.status).to.equal(200); + const body = JSON.parse(response.body); + const updatedNote = body.results[0]; + expect(updatedNote.comments[0].comment).to.equal(newComment); + }); + + it("should successfully view an annotation", () => { + // Arrange: create user and data record + const user = db.u.save({ + _key: "testUser", + _id: "u/testUser", + name: "Test User", + email: "testuser@example.com", + is_admin: true, + }); + + const data = db.d.save({ + _key: "ID", + _id: "d/ID", + owner: user._id, + }); + + // Create a note linked to the data record + const note_doc = db.n.save({ + type: 1, + state: 0, + title: "Viewable Note", + creator: user._id, + comments: [ + { + user: user._id, + time: Math.floor(Date.now() / 1000), + comment: "Initial Comment", + }, + ], + }); + + db.note.save({ + _from: data._id, + _to: note_doc._id, + }); + + // Act: call /note/view + const request_string = `${note_base_url}/view?client=${encodeURIComponent( + user._id, + )}&id=${encodeURIComponent(note_doc._id)}`; + + const response = request.get(request_string); + + // Assert + expect(response.status).to.equal(200); + const body = JSON.parse(response.body); + expect(body.results).to.be.an("array").that.is.not.empty; + expect(body.results[0]._id).to.equal(note_doc._id); + expect(body.results[0].title).to.equal("Viewable Note"); + }); + + it("should list all annotations for a subject", () => { + // Arrange: create a user and subject document + const user = db.u.save({ + _key: "testUser", + _id: "u/testUser", + name: "Test User", + email: "testuser@example.com", + is_admin: true, + }); + + const subject = db.d.save({ + _key: "subj1", + _id: "d/subj1", + owner: user._id, + }); + + // Create a few note documents and connect them + const note1 = db.n.save({ + type: 1, + state: 2, // active + title: "Subject Note 1", + creator: user._id, + comments: [ + { + user: user._id, + time: Math.floor(Date.now() / 1000), + comment: "First comment", + }, + ], + }); + + const note2 = db.n.save({ + type: 0, + state: 2, + title: "Subject Note 2", + creator: user._id, + comments: [], + }); + + db.note.save({ _from: subject._id, _to: note1._id }); + db.note.save({ _from: subject._id, _to: note2._id }); + + // Act: call /note/list/by_subject + const request_string = `${note_base_url}/list/by_subject?client=${encodeURIComponent( + user._id, + )}&subject=${encodeURIComponent(subject._id)}`; + + const response = request.get(request_string); + + // Assert + expect(response.status).to.equal(200); + const body = JSON.parse(response.body); + expect(body.results).to.be.an("array"); + expect(body.results.length).to.equal(2); + expect(body.results.map((r) => r.title)).to.include("Subject Note 1"); + }); + + it("should purge old closed annotations", () => { + // Arrange: create user and subject + const user = db.u.save({ + _key: "purgeUser", + _id: "u/purgeUser", + name: "Purge Tester", + email: "purgetest@example.com", + is_admin: true, + }); + + const subject = db.d.save({ + _key: "purgeSubj", + _id: "d/purgeSubj", + owner: user._id, + }); + + const now = Math.floor(Date.now() / 1000); + + // Create one old closed note (should be deleted) + const oldClosedNote = db.n.save({ + type: 1, + state: g_lib.NOTE_CLOSED, // usually 2 or something similar + title: "Old Closed Note", + creator: user._id, + ut: now - 100000, // 100k seconds old + parent_id: null, + }); + db.note.save({ _from: subject._id, _to: oldClosedNote._id }); + + // Create one recent closed note (should stay) + const recentClosedNote = db.n.save({ + type: 1, + state: g_lib.NOTE_CLOSED, + title: "Recent Closed Note", + creator: user._id, + ut: now, // current time + parent_id: null, + }); + db.note.save({ _from: subject._id, _to: recentClosedNote._id }); + + // Create one open note (should stay) + const openNote = db.n.save({ + type: 1, + state: 0, // open + title: "Open Note", + creator: user._id, + ut: now - 200000, + parent_id: null, + }); + db.note.save({ _from: subject._id, _to: openNote._id }); + + // Act: purge notes older than 50,000 seconds + const request_string = `${note_base_url}/purge?client=${encodeURIComponent( + user._id, + )}&age_sec=50000`; + + const response = request.get(request_string); + + // Assert: response should be OK + expect(response.status).to.equal(204); + }); +}); diff --git a/docs/_static/js/html5shiv-printshiv.min.js b/docs/_static/js/html5shiv-printshiv.min.js index 72406b792..ae10bdff2 100644 --- a/docs/_static/js/html5shiv-printshiv.min.js +++ b/docs/_static/js/html5shiv-printshiv.min.js @@ -88,7 +88,6 @@ f = RegExp("^(?:" + d().join("|") + ")$", "i"), g = []; e--; - ) ((b = c[e]), f.test(b.nodeName) && g.push(b.applyElement(l(b)))); return g; @@ -100,7 +99,6 @@ d = c.length, e = a.ownerDocument.createElement(A + ":" + a.nodeName); d--; - ) ((b = c[d]), b.specified && e.setAttribute(b.nodeName, b.nodeValue)); return ((e.style.cssText = a.style.cssText), e); @@ -113,7 +111,6 @@ f = RegExp("(^|[\\s,>+~])(" + d().join("|") + ")(?=[[\\s,>+~#.:]|$)", "gi"), g = "$1" + A + "\\:$2"; e--; - ) ((b = c[e] = c[e].split("}")), (b[b.length - 1] = b[b.length - 1].replace(f, g)), diff --git a/web/static/ace/worker-coffee.js b/web/static/ace/worker-coffee.js index 2692b5bca..7400d29a4 100644 --- a/web/static/ace/worker-coffee.js +++ b/web/static/ace/worker-coffee.js @@ -1646,7 +1646,6 @@ for ( i = this.tokens, n = 0; (r = i[n]); - ) n += t.call(this, r, n, i); return !0; @@ -1668,7 +1667,6 @@ for ( h = this.tokens, o = 0; (c = h[n]); - ) { if ( 0 === o && @@ -1914,7 +1912,6 @@ .generated) && ((c = this.tag(n)), 0 > t.call(v, c)))); - ) (((s = this.tag(n)), 0 <= t.call(a, s)) && @@ -2287,7 +2284,6 @@ (C() && ":" !== H); - ) T() ? g() @@ -2746,7 +2742,6 @@ ? 1 : 0; C(); - ) y(o + P); return w(1); @@ -2813,7 +2808,6 @@ u !== i.length && ((l = i[u][0]), 0 <= t.call(o, l)); - ) u++; if ( @@ -2852,7 +2846,6 @@ -1 !== s && ((u = i[s][0]), 0 <= t.call(o, u)); - ) s--; return -1 === s || @@ -2892,7 +2885,6 @@ .length - 1; -1 !== a; - ) (!1 === e @@ -2946,7 +2938,6 @@ .length - 1; -1 !== a; - ) (!e .comments[ @@ -3986,7 +3977,6 @@ t = this.clean(t), s = 0; (this.chunk = t.slice(s)); - ) { r = this.identifierToken() || @@ -4536,7 +4526,6 @@ return n; })().join("#{}"); (p = E.exec(u)); - ) ((s = p[1]), (null === c || @@ -5377,7 +5366,6 @@ (null == (i = this.ends[n]) ? void 0 : i.tag) || 0 < t--; - ) n--; return ( @@ -5558,7 +5546,6 @@ n = s[--t], n[0] = "PARAM_END"; (i = s[--t]); - ) switch (i[0]) { case ")": @@ -25711,7 +25698,6 @@ o, ) ); - ) i++; return ( @@ -26488,7 +26474,6 @@ for ( t = this; t !== (t = t.unwrap()); - ) continue; return t; @@ -26767,7 +26752,6 @@ for ( r = this.expressions.length; r--; - ) { ((n = this.expressions[r]), (this.expressions[r] = @@ -26926,8 +26910,8 @@ this, null, null == - (o = - t.referencedVars) + (o = + t.referencedVars) ? [] : o, ), @@ -30954,7 +30938,6 @@ ); }; (t = y[r]); - ) ((c = this.addInitializerExpression( @@ -31327,7 +31310,6 @@ r instanceof Pt && r.isString() ); - ) if (r.hoisted) i++; else { @@ -32965,7 +32947,8 @@ .properties[0] .name : new ct( - s.unwrap().value, + s.unwrap() + .value, )), (f = c.unwrap() instanceof @@ -34754,7 +34737,8 @@ ? r.expressions.unshift( new D( new ft( - this.guard, + this + .guard, ).invert(), new wt( "continue", @@ -34764,7 +34748,8 @@ : this.guard && (r = f.wrap([ new D( - this.guard, + this + .guard, r, ), ]))), @@ -34946,7 +34931,6 @@ for ( t = !0, n = this; n && n.operator; - ) (t && (t = @@ -34960,7 +34944,6 @@ for ( n = this; n && n.operator; - ) ((n.invert = !n.invert), (n.operator = @@ -35671,7 +35654,8 @@ : void 0, this.recovery.unshift( new o( - this.errorVariable, + this + .errorVariable, u, ), )) @@ -36629,7 +36613,8 @@ ? s.expressions.unshift( new D( new ft( - this.guard, + this + .guard, ).invert(), new wt( "continue", @@ -37370,7 +37355,6 @@ for ( var n; !((n = this.columns[t]) || 0 >= t); - ) t--; return n && [n.sourceLine, n.sourceColumn]; @@ -37422,7 +37406,6 @@ (s = this.lines[r]) || 0 >= r ); - ) r--; return s && s.sourceLocation(i); @@ -37544,7 +37527,6 @@ a = 0 > t ? 1 : 0, f = (_Mathabs(t) << 1) + a; f || !n; - ) ((u = f & s), (f >>= i), From 5b76791c4e4773695991590e8c4330854e9e6805 Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Wed, 3 Dec 2025 10:44:50 -0500 Subject: [PATCH 06/65] [DAPS-1774] - continuation of fix (#1798) --- tests/end-to-end/test_api_query.py | 107 ++++++++++++++--------------- 1 file changed, 53 insertions(+), 54 deletions(-) diff --git a/tests/end-to-end/test_api_query.py b/tests/end-to-end/test_api_query.py index 4d740593e..f1de102e1 100755 --- a/tests/end-to-end/test_api_query.py +++ b/tests/end-to-end/test_api_query.py @@ -168,8 +168,10 @@ def setUp(self): print("Created record") print(record) - def test_query_create_delete(self): + print("Response from dataView") + print(self._df_api.dataView(record[0].data[0].id)) + def test_query_create_delete(self): search_query = self._df_api.queryCreate(title="Search for Adamantium", owner="u/" + self._username, coll=["root"], meta="md.creator == 'Lex Luther'") print("Search query create") @@ -188,59 +190,56 @@ def test_query_create_delete(self): self.assertEqual(material, "adamantium") -# self._df_api.queryDelete(search_query[0].id) - -# def tearDown(self): -# -# task_result = self._df_api.dataDelete("adamantium") -# -# status = task_result[0].task[0].status -# count = 0 -# while status < 3: -# if count > 20: -# break -# time.sleep(self._timeout) -# task_result = self._df_api.taskView(task_result[0].task[0].id) -# status = task_result[0].task[0].status -# count = count + 1 -# -# print("Delete record result.") -# print(task_result) -# -# result = self._df_api.repoAllocationDelete( -# repo_id=self._repo_id, subject=self._username -# ) -# -# task_id = result[0].task[0].id -# -# # Check the status of the task -# task_result = self._df_api.taskView(task_id) -# -# # If status is less than 3 it is in the works -# status = task_result[0].task[0].status -# count = 0 -# while status < 3: -# if count > 2: -# print(task_result) -# self.fail( -# "Something went wrong task was unable to complete, attempt" -# " to delete an allocation after 3 seconds failed, make sure" -# " all services are running." -# ) -# break -# time.sleep(self._timeout) -# task_result = self._df_api.taskView(task_id) -# status = task_result[0].task[0].status -# count = count + 1 -# -# print("Delete Allocations") -# print(result) -# -# repo_id = self._repo_form["id"] -# if not repo_id.startswith("repo/"): -# repo_id = "repo/" + repo_id -# result = self._df_api.repoDelete(repo_id) -# result = self._df_api.repoList(list_all=True) + self._df_api.queryDelete(search_query[0].id) + + def tearDown(self): + + task_result = self._df_api.dataDelete("adamantium") + + status = task_result[0].task[0].status + count = 0 + while status < 3: + if count > 20: + break + time.sleep(self._timeout) + task_result = self._df_api.taskView(task_result[0].task[0].id) + status = task_result[0].task[0].status + count = count + 1 + + print("Delete record result.") + print(task_result) + + result = self._df_api.repoAllocationDelete( + repo_id=self._repo_id, subject=self._username + ) + + task_id = result[0].task[0].id + # Check the status of the task + task_result = self._df_api.taskView(task_id) + # If status is less than 3 it is in the works + status = task_result[0].task[0].status + count = 0 + while status < 3: + if count > 2: + print(task_result) + self.fail( + "Something went wrong task was unable to complete, attempt" + " to delete an allocation after 3 seconds failed, make sure" + " all services are running." + ) + break + time.sleep(self._timeout) + task_result = self._df_api.taskView(task_id) + status = task_result[0].task[0].status + count = count + 1 + + print("Delete Allocations") + print(result) + repo_id = self._repo_form["id"] + if not repo_id.startswith("repo/"): + repo_id = "repo/" + repo_id + result = self._df_api.repoDelete(repo_id) + result = self._df_api.repoList(list_all=True) if __name__ == "__main__": From 036079bf6519d4867e33d4f2754262e64fe0e112 Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Thu, 4 Dec 2025 08:08:00 -0500 Subject: [PATCH 07/65] [DAPS-1774] - continuation of fix (#1798) --- tests/end-to-end/test_api_query.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/tests/end-to-end/test_api_query.py b/tests/end-to-end/test_api_query.py index f1de102e1..cf1894a09 100755 --- a/tests/end-to-end/test_api_query.py +++ b/tests/end-to-end/test_api_query.py @@ -177,14 +177,20 @@ def test_query_create_delete(self): print("Search query create") print(search_query) - query_result = self._df_api.queryExec(search_query[0].id) - print("Query result") - print(query_result) - + count = 0 material = "" - for model in query_result[0].item: - if model.alias.startswith("adamantium"): - material = model.alias + while material == "": + + query_result = self._df_api.queryExec(search_query[0].id) + print("Query result") + print(query_result) + if count > 3: + break + + for model in query_result[0].item: + if model.alias.startswith("adamantium"): + material = model.alias + time.sleep(self._timeout) print(f"Query found {material}") From 4e3f0fcdfa04cbfb3e4361e11b5b8cc652d644da Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Thu, 4 Dec 2025 08:09:47 -0500 Subject: [PATCH 08/65] [DAPS-1522] refactor: foxx, Metrics Router Logging Improvements (#1797) --- core/database/CMakeLists.txt | 2 + core/database/foxx/api/metrics_router.js | 130 ++++++++++- .../foxx/tests/metrics_router.test.js | 217 ++++++++++++++++++ 3 files changed, 344 insertions(+), 5 deletions(-) create mode 100644 core/database/foxx/tests/metrics_router.test.js diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index b6db95f97..73526ba43 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -34,6 +34,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_task_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_task_router:") add_test(NAME foxx_authz_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_authz_router:") add_test(NAME foxx_query_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_query_router:") + add_test(NAME foxx_metrics_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_metrics_router:") add_test(NAME foxx_unit_user_token COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_token:") add_test(NAME foxx_unit_user_model COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_model:") add_test(NAME foxx_unit_globus_collection_model COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_globus_collection_model:") @@ -56,6 +57,7 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_validation_repo PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_path PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_user_router PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") + set_tests_properties(foxx_metrics_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_note_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_version_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_tag_router PROPERTIES FIXTURES_REQUIRED Foxx) diff --git a/core/database/foxx/api/metrics_router.js b/core/database/foxx/api/metrics_router.js index 0345e1188..66c762436 100644 --- a/core/database/foxx/api/metrics_router.js +++ b/core/database/foxx/api/metrics_router.js @@ -5,12 +5,22 @@ const router = createRouter(); const g_db = require("@arangodb").db; const g_lib = require("./support"); const joi = require("joi"); +const logger = require("./lib/logger"); module.exports = router; +const basePath = "metrics"; router .post("/msg_count/update", function (req, res) { try { + logger.logRequestStarted({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/msg_count/update", + status: "Started", + description: "Update message metrics", + }); var i, u, ts = req.body.timestamp, @@ -33,7 +43,26 @@ router }; g_db.metrics.save(obj); } + logger.logRequestSuccess({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/msg_count/update", + status: "Success", + description: "Update message metrics", + extra: obj, + }); } catch (e) { + logger.logRequestFailure({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/msg_count/update", + status: "Failure", + description: "Update message metrics", + extra: obj, + error: e, + }); g_lib.handleException(e, res); } }) @@ -43,7 +72,18 @@ router router .get("/msg_count", function (req, res) { + let client = null; + let result = null; try { + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/msg_count", + status: "Started", + description: "Grab all message metrics", + }); var par = { now: Date.now() / 1000, since: 60 * (req.queryParams.since ? req.queryParams.since : 60), @@ -61,9 +101,9 @@ router } var qry = "for i in metrics filter " + filter + " sort i.timestamp return i", - result = g_db._query(qry, par).toArray(), r; + result = g_db._query(qry, par).toArray(); for (var i in result) { r = result[i]; delete r._rev; @@ -71,7 +111,26 @@ router } res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/msg_count", + status: "Success", + description: "Grab all message metrics", + extra: { count_msg_types: result.length }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/msg_count", + status: "Failure", + description: "Grab all message metrics", + extra: { count_msg_types: result.length }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -82,14 +141,28 @@ router "Return since last specified minutes ago (default 60)", ) .queryParam("uid", joi.string().optional(), "User ID (default none)") - .summary("Update message metrics.") - .description("Update message metrics."); + .summary("Grab all message metrics.") + .description("Grab all message metrics."); router .get("/users/active", function (req, res) { + let client = null; + let cnt = null; try { - var cnt = {}, - u, + client = req.queryParams.client + ? g_lib.getUserFromClientID(req.queryParams.client) + : null; + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/users/active", + status: "Started", + description: "Get recently active users from metrics", + }); + + cnt = {}; + var u, r, qryres = g_db ._query( @@ -111,7 +184,26 @@ router } res.json(cnt); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/users/active", + status: "Success", + description: "Get recently active users from metrics", + extra: { total_active_users: Object.keys(cnt).length }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/users/active", + status: "Failure", + description: "Get recently active users from metrics", + extra: { total_active_users: Object.keys(cnt).length }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -126,6 +218,15 @@ router router .post("/purge", function (req, res) { try { + logger.logRequestStarted({ + client: "undefined", + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/purge", + status: "Started", + description: "Purge older metrics", + }); + g_db.metrics.save({ timestamp: Math.floor(Date.now() / 1000), type: "purge", @@ -135,7 +236,26 @@ router g_db._query("for i in metrics filter i.timestamp < @ts remove i in metrics", { ts: req.queryParams.timestamp, }); + logger.logRequestSuccess({ + client: "undefined", + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/purge", + status: "Success", + description: "Purge older metrics", + extra: "undefined", + }); } catch (e) { + logger.logRequestFailure({ + client: "undefined", + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/purge", + status: "Failure", + description: "Purge older metrics", + extra: "undefined", + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/metrics_router.test.js b/core/database/foxx/tests/metrics_router.test.js new file mode 100644 index 000000000..7df39b4a3 --- /dev/null +++ b/core/database/foxx/tests/metrics_router.test.js @@ -0,0 +1,217 @@ +"use strict"; + +const g_lib = require("../api/support"); +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const { db } = require("@arangodb"); + +const metrics_base_url = `${baseUrl}/metrics`; + +describe("unit_metrics_router: /users/active endpoint", () => { + after(function () { + const collections = ["metrics", "u"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); + }); + beforeEach(() => { + const collections = ["metrics", "u"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) { + col.truncate(); + } else { + db._create(name); + } + }); + }); + + it("should return active users within the default 15 minutes", () => { + const now = Math.floor(Date.now() / 1000); + + db.metrics.save([ + { + _key: "m1", + type: "msgcnt_user", + uid: "u/fakeUser", + total: 5, + timestamp: now - 60 * 5, // 5 minutes ago + }, + { + _key: "m2", + type: "msgcnt_user", + uid: "u/otherUser", + total: 10, + timestamp: now - 60 * 20, // 20 minutes ago + }, + ]); + + const request_string = `${metrics_base_url}/users/active`; + const response = request.get(request_string); + + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + + // u/fakeUser should appear, u/otherUser should not + expect(body).to.have.property("u/fakeUser"); + expect(body["u/fakeUser"]).to.equal(5); + expect(body).to.not.have.property("u/otherUser"); + }); + + it("should respect the 'since' query parameter", () => { + const now = Math.floor(Date.now() / 1000); + + db.metrics.save([ + { + _key: "m3", + type: "msgcnt_user", + uid: "u/fakeUser", + total: 7, + timestamp: now - 60 * 30, // 30 minutes ago + }, + ]); + + // since=45 → include 30-min-old record + let response = request.get(`${metrics_base_url}/users/active?since=45`); + expect(response.status).to.equal(200); + let body = JSON.parse(response.body); + expect(body).to.have.property("u/fakeUser"); + + // since=15 → exclude 30-min-old record + response = request.get(`${metrics_base_url}/users/active?since=15`); + expect(response.status).to.equal(200); + body = JSON.parse(response.body); + expect(body).to.not.have.property("u/fakeUser"); + }); + + it("should return an empty object if no users are active", () => { + const response = request.get(`${metrics_base_url}/users/active`); + expect(response.status).to.equal(200); + const body = JSON.parse(response.body); + expect(body).to.deep.equal({}); + }); + + it("POST /msg_count/update should succeed and write metrics", () => { + // create user explicitly + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name: "Fake", + email: "fake@example.com", + is_admin: true, + }); + + const payload = { + timestamp: Math.floor(Date.now() / 1000), + total: 99, + uids: { + a: { tot: 5, msg: "hello" }, + b: { tot: 7, msg: "yo" }, + }, + }; + + const res = request.post(`${metrics_base_url}/msg_count/update?client=u/fakeUser`, { + body: payload, + json: true, + }); + + expect(res.status).to.equal(204); + + const docs = db.metrics.toArray(); + expect(docs.length).to.equal(3); // total + 2 users + }); + + it("GET /msg_count should return items within default 60 minutes", () => { + // create user explicitly + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name: "Fake", + email: "fake@example.com", + is_admin: true, + }); + + const now = Math.floor(Date.now() / 1000); + + // recent item (should return) + db.metrics.save({ + timestamp: now, + type: "msgcnt_total", + total: 1, + }); + + // old item (should NOT return) + db.metrics.save({ + timestamp: now - 60 * 60 * 2, // older than 60 min + type: "msgcnt_total", + total: 999, + }); + + const res = request.get(`${metrics_base_url}/msg_count?client=u/fakeUser`); + + expect(res.status).to.equal(200); + + const arr = JSON.parse(res.body); + expect(arr.length).to.equal(1); + expect(arr[0].total).to.equal(1); + }); + + it("GET /msg_count should filter by type and uid", () => { + // create user explicitly + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name: "Fake", + email: "fake@example.com", + is_admin: true, + }); + + const ts = Math.floor(Date.now() / 1000); + + db.metrics.save({ + timestamp: ts, + type: "msgcnt_user", + uid: "u1", + total: 10, + }); + + db.metrics.save({ + timestamp: ts, + type: "msgcnt_user", + uid: "u2", + total: 20, + }); + + const res = request.get( + `${metrics_base_url}/msg_count?client=u/fakeUser&type=msgcnt_user&uid=u2`, + ); + + expect(res.status).to.equal(200); + + const arr = JSON.parse(res.body); + expect(arr.length).to.equal(1); + expect(arr[0].uid).to.equal("u2"); + }); + + it("POST /purge should remove metrics older than timestamp", () => { + const now = Math.floor(Date.now() / 1000); + + db.metrics.save([ + { timestamp: now - 1000, type: "msgcnt_total", total: 1 }, // should be removed + { timestamp: now, type: "msgcnt_total", total: 2 }, // should stay + ]); + + const ts = now - 500; + const res = request.post(`${metrics_base_url}/purge?timestamp=${ts}`); + + expect(res.status).to.equal(204); + + const docs = db.metrics.toArray(); + //Equals 2 due to writing the purge doc + expect(docs.length).to.equal(2); + expect(docs[0].total).to.equal(2); + }); +}); From 2c6cde2880e3a2202bc815e5d97645bc9266274b Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Thu, 4 Dec 2025 08:26:55 -0500 Subject: [PATCH 09/65] [DAPS-1522] - refactor: foxx, Group Router Logging Improvements (#1795) --- core/database/CMakeLists.txt | 2 + core/database/foxx/api/group_router.js | 196 +++++++++++++++++- core/database/foxx/tests/group_router.test.js | 142 +++++++++++++ 3 files changed, 332 insertions(+), 8 deletions(-) create mode 100644 core/database/foxx/tests/group_router.test.js diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index 73526ba43..c9bf90af5 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -28,6 +28,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_version COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version:") add_test(NAME foxx_support COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_support:") add_test(NAME foxx_user_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_router:") + add_test(NAME foxx_group_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_group_router:") add_test(NAME foxx_note_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_note_router:") add_test(NAME foxx_version_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version_router:") add_test(NAME foxx_tag_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_tag_router:") @@ -57,6 +58,7 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_validation_repo PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_path PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_user_router PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") + set_tests_properties(foxx_group_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_metrics_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_note_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_version_router PROPERTIES FIXTURES_REQUIRED Foxx) diff --git a/core/database/foxx/api/group_router.js b/core/database/foxx/api/group_router.js index f2b0960a6..6a49bf3b6 100644 --- a/core/database/foxx/api/group_router.js +++ b/core/database/foxx/api/group_router.js @@ -9,14 +9,26 @@ const permissions = require("./lib/permissions"); const g_db = require("@arangodb").db; const g_graph = require("@arangodb/general-graph")._graph("sdmsg"); const g_lib = require("./support"); - +const logger = require("./lib/logger"); +const basePath = "grp"; module.exports = router; //========== GROUP API FUNCTIONS ========== router .get("/create", function (req, res) { + let client = null; + let logExtra = null; try { + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Started", + description: "Create a new group", + }); var result = []; g_db._executeTransaction({ @@ -25,7 +37,7 @@ router write: ["g", "owner", "member"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); var uid; if (req.queryParams.proj) { @@ -87,9 +99,38 @@ router result.push(group.new); }, }); + logExtra = { + _id: result[0]._id, + uid: result[0].uid, + gid: result[0].gid, + title: (result[0].title || "").slice(0, 10), + truncated_members: Array.isArray(result[0].members) + ? result[0].members.slice(-5) + : [], + }; res.send(result); + + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Success", + description: "Create a new group", + extra: logExtra, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Failure", + description: "Create a new group", + extra: logExtra, + error: e, + }); g_lib.handleException(e, res); } }) @@ -104,7 +145,18 @@ router router .get("/update", function (req, res) { + let client = null; + let logExtra = null; try { + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Started", + description: "Updates an existing group. GID:" + req.queryParams.gid, + }); var result = []; g_db._executeTransaction({ @@ -113,7 +165,7 @@ router write: ["g", "owner", "member"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); var group; if (req.queryParams.proj) { @@ -204,9 +256,37 @@ router result.push(group); }, }); + logExtra = { + title: (result[0].title || "").slice(0, 10), + description: (result[0].desc || "").slice(0, 10), + truncated_members: Array.isArray(result[0].members) + ? result[0].members.slice(-5) + : [], + }; res.send(result); + + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Success", + description: "Updates an existing group. GID:" + req.queryParams.gid, + extra: logExtra, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Failure", + description: "Updates an existing group. GID:" + req.queryParams.gid, + extra: logExtra, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -230,15 +310,26 @@ router router .get("/delete", function (req, res) { + let client = null; + let group = null; try { + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Started", + description: "Deletes an existing group. GID:" + req.queryParams.gid, + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "owner", "admin"], write: ["g", "owner", "member", "acl"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); - var group; + client = g_lib.getUserFromClientID(req.queryParams.client); if (req.queryParams.proj) { var uid = req.queryParams.proj; @@ -271,7 +362,26 @@ router g_graph.g.remove(group._id); }, }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Success", + description: `Deletes an existing group: ${group?._id}, GID: ${req.queryParams.gid}`, + extra: "N/A", + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Failure", + description: `Deletes an existing group: ${group?._id}, GID: ${req.queryParams.gid}`, + extra: "N/A", + error: e, + }); g_lib.handleException(e, res); } }) @@ -283,8 +393,20 @@ router router .get("/list", function (req, res) { + let client = null; + let groups = null; + let logExtra = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Started", + description: "List groups", + }); + var owner_id; if (req.queryParams.proj) { @@ -295,7 +417,7 @@ router owner_id = client._id; } - var groups = g_db + groups = g_db ._query( "for v in 1..1 inbound @client owner filter IS_SAME_COLLECTION('g', v) return { uid: v.uid, gid: v.gid, title: v.title }", { @@ -304,8 +426,30 @@ router ) .toArray(); + logExtra = { + totalGroups: groups.length, + }; res.send(groups); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Success", + description: "List groups", + extra: logExtra, + }); } catch (e) { + res.send(groups); + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Failure", + description: "List groups", + extra: logExtra, + }); g_lib.handleException(e, res); } }) @@ -316,8 +460,19 @@ router router .get("/view", function (req, res) { + let client = null; + let logExtra = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: `View group details. GID: ${req.queryParams.gid}`, + }); + var group; if (req.queryParams.proj) { @@ -351,8 +506,33 @@ router group: group._id, }) .toArray(); + logExtra = { + title: (result.title || "").slice(0, 10), + description: (result.desc || "").slice(0, 10), + truncated_members: Array.isArray(result.members) ? result.members.slice(-5) : [], + }; res.send([result]); + + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Successful", + description: `View group details. GID: ${req.queryParams.gid}`, + extra: logExtra, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: `View group details. GID: ${req.queryParams.gid}`, + extra: logExtra, + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/group_router.test.js b/core/database/foxx/tests/group_router.test.js new file mode 100644 index 000000000..bd3441df2 --- /dev/null +++ b/core/database/foxx/tests/group_router.test.js @@ -0,0 +1,142 @@ +"use strict"; + +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { db } = require("@arangodb"); +const { baseUrl } = module.context; + +const group_base_url = `${baseUrl}/grp`; + +describe("unit_group_router: test group router endpoints", () => { + beforeEach(() => { + const collections = ["u", "g", "owner", "member", "p", "uuid", "accn", "admin"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + else db._create(name); + }); + }); + + after(() => { + const collections = ["u", "g", "owner", "member", "p", "uuid", "accn", "admin"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); + }); + + // ==================================================================== + // /create + // ==================================================================== + + it("should create a new group", () => { + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name_first: "Fake", + name_last: "User", + is_admin: true, + email: "fake@user.com", + }); + + const url = `${group_base_url}/create?client=u/fakeUser&gid=testgroup&title=Test+Group`; + + const response = request.get(url, { + headers: { "x-correlation-id": "test-correlation-id" }, + }); + + expect(response.status).to.equal(200); + const body = JSON.parse(response.body); + + expect(body).to.be.an("array"); + expect(body[0]).to.have.property("gid", "testgroup"); + expect(body[0]).to.have.property("title", "Test Group"); + expect(body[0]).to.have.property("members"); + expect(body[0].members).to.be.an("array").that.is.empty; + }); + + // ==================================================================== + // /list + // ==================================================================== + + it("should list groups for the user", () => { + db.u.save({ _key: "fakeUser", _id: "u/fakeUser", is_admin: true }); + + request.get(`${group_base_url}/create?client=u/fakeUser&gid=a&title=A`); + request.get(`${group_base_url}/create?client=u/fakeUser&gid=b&title=B`); + + const response = request.get(`${group_base_url}/list?client=u/fakeUser`); + expect(response.status).to.equal(200); + + const list = JSON.parse(response.body); + + expect(list.length).to.equal(2); + expect(list.map((g) => g.gid)).to.have.members(["a", "b"]); + }); + + // ==================================================================== + // /delete + // ==================================================================== + + it("should delete a group", () => { + db.u.save({ _key: "fakeUser", _id: "u/fakeUser", is_admin: true }); + + request.get(`${group_base_url}/create?client=u/fakeUser&gid=testgroup&title=A`); + + const delUrl = `${group_base_url}/delete?client=u/fakeUser&gid=testgroup&title=A`; + const response = request.get(delUrl); + + expect(response.status).to.equal(204); + + // Now verify it is actually deleted + const list = request.get(`${group_base_url}/list?client=u/fakeUser`); + const groups = JSON.parse(list.body); + + expect(groups).to.be.an("array").that.is.empty; + }); + // ==================================================================== + // /view + // ==================================================================== + + it("should view an existing group", () => { + db.u.save({ _key: "fakeUser", _id: "u/fakeUser", is_admin: true }); + + request.get(`${group_base_url}/create?client=u/fakeUser&gid=viewtest&title=Viewer`); + + const response = request.get(`${group_base_url}/view?client=u/fakeUser&gid=viewtest`); + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + + expect(body[0]).to.include({ + gid: "viewtest", + title: "Viewer", + }); + }); + + // ==================================================================== + // /update + // ==================================================================== + it("should update an existing group", () => { + db.u.save({ _key: "fakeUser", _id: "u/fakeUser", is_admin: true }); + + db.g.save({ + uid: "u/fakeUser", + gid: "updateMe", + title: "OldTitle", + desc: "Old description", + }); + // Create + request.get(`${group_base_url}/create?client=u/fakeUser&gid=updateMe&title=OldTitle`); + + // Update title via endpoint + const response = request.get( + `${group_base_url}/update?client=u/fakeUser&gid=updateMe&title=NewTitle`, + ); + + expect(response.status).to.equal(200); + + const updated = JSON.parse(response.body)[0]; + expect(updated.title).to.equal("NewTitle"); + }); +}); From d05f83bd86a532408cc9a0be0f6b49db50a28334 Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Thu, 4 Dec 2025 10:40:35 -0500 Subject: [PATCH 10/65] [DAPS-1522] - foxx, Authz Router Logging Improvements (#1788) --- core/database/foxx/api/authz_router.js | 162 +++++++++++++----- core/database/foxx/tests/authz_router.test.js | 54 ++++++ 2 files changed, 171 insertions(+), 45 deletions(-) diff --git a/core/database/foxx/api/authz_router.js b/core/database/foxx/api/authz_router.js index 162585499..7600669d5 100644 --- a/core/database/foxx/api/authz_router.js +++ b/core/database/foxx/api/authz_router.js @@ -8,23 +8,25 @@ const g_lib = require("./support"); const error = require("./lib/error_codes"); const permissions = require("./lib/permissions"); const authzModule = require("./authz"); +const logger = require("./lib/logger"); const { Repo, PathType } = require("./repo"); - +const basePath = "authz"; module.exports = router; router .get("/gridftp", function (req, res) { + let client = null; + let description = `Check authorization to ${req.queryParams.act} ${req.queryParams.file} on ${req.queryParams.repo} `; try { - console.log( - "/gridftp start authz client", - req.queryParams.client, - "repo", - req.queryParams.repo, - "file", - req.queryParams.file, - "act", - req.queryParams.act, - ); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/gridftp", + status: "Started", + description: description, + }); // Client will contain the following information // @@ -39,17 +41,8 @@ router // "max_sav_qry" : 20, // : // "email" : "bobjones@gmail.com" - const client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); if (!client) { - console.log( - "AUTHZ act: " + - req.queryParams.act + - " client: " + - +req.queryParams.client + - " path " + - req.queryParams.file + - " FAILED", - ); throw [error.ERR_PERM_DENIED, "Unknown client: " + req.queryParams.client]; } let repo = new Repo(req.queryParams.repo); @@ -57,15 +50,6 @@ router // If the provided path is not within the repo throw an error if (path_type === PathType.UNKNOWN) { - console.log( - "AUTHZ act: " + - req.queryParams.act + - " client: " + - client._id + - " path " + - req.queryParams.file + - " FAILED", - ); throw [ error.ERR_PERM_DENIED, "Unknown path, or path is not consistent with supported repository folder hierarchy: " + @@ -83,16 +67,33 @@ router } else { throw [error.ERR_INVALID_PARAM, "Invalid gridFTP action: ", req.queryParams.act]; } - console.log( - "AUTHZ act: " + - req.queryParams.act + - " client: " + - client._id + - " path " + - req.queryParams.file + - " SUCCESS", - ); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/gridftp", + status: "Success", + description: description, + extra: { + id: client?._id, + is_admin: client?.is_admin, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/gridftp", + status: "Failure", + description: description, + extra: { + id: client?._id, + is_admin: client?.is_admin, + }, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -113,12 +114,23 @@ router router .get("/perm/check", function (req, res) { + let client = null; + let result = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/perm/check", + status: "Started", + description: "Checks client permissions for object", + }); + var perms = req.queryParams.perms ? req.queryParams.perms : permissions.PERM_ALL; - var obj, - result = true, - id = g_lib.resolveID(req.queryParams.id, client), + var obj; + result = true; + var id = g_lib.resolveID(req.queryParams.id, client), ty = id[0]; if (id[1] != "/") { @@ -172,7 +184,34 @@ router res.send({ granted: result, }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/perm/check", + status: "Success", + description: + "Checks client permissions for object. OBJ ID:" + + req.queryParams.id + + ", Permissions: " + + req.queryParams.perms, + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/perm/check", + status: "Failure", + description: + "Checks client permissions for object. OBJ ID:" + + req.queryParams.id + + ", Permissions: " + + req.queryParams.perms, + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -184,9 +223,21 @@ router router .get("/perm/get", function (req, res) { + let client = null; + let result = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); - var result = req.queryParams.perms ? req.queryParams.perms : permissions.PERM_ALL; + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/perm/get", + status: "Started", + description: + "Gets client permissions for object. Permissions:" + req.queryParams.perms, + }); + + result = req.queryParams.perms ? req.queryParams.perms : permissions.PERM_ALL; var obj, id = g_lib.resolveID(req.queryParams.id, client), ty = id[0]; @@ -220,7 +271,28 @@ router res.send({ granted: result, }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/perm/get", + status: "Success", + description: + "Gets client permissions for object. Permissions:" + req.queryParams.perms, + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/perm/get", + status: "Failure", + description: + "Gets client permissions for object. Permissions:" + req.queryParams.perms, + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/authz_router.test.js b/core/database/foxx/tests/authz_router.test.js index 134abfaef..ed074c053 100644 --- a/core/database/foxx/tests/authz_router.test.js +++ b/core/database/foxx/tests/authz_router.test.js @@ -335,4 +335,58 @@ describe("unit_authz_router: the Foxx microservice authz_router", () => { // assert expect(response.status).to.equal(204); }); + // + // ===== PERM CHECK TESTS ===== + // + it("unit_authz_router: perm/check should return granted=true for admin user on owned record", () => { + defaultWorkingSetup(); + + const request_string = + `${authz_base_url}/perm/check?client=` + + james_uuid + + `&id=` + + encodeURIComponent(record_id) + + `&perms=` + + permissions.PERM_ALL; + + const response = request.get(request_string); + + expect(response.status).to.equal(200); + const body = JSON.parse(response.body); + expect(body).to.have.property("granted", true); + }); + // + // ===== PERM GET TESTS ===== + // + it("unit_authz_router: perm/get should return permission bits for admin user on record", () => { + defaultWorkingSetup(); + + const request_string = + `${authz_base_url}/perm/get?client=` + + james_uuid + + `&id=` + + encodeURIComponent(record_id); + + const response = request.get(request_string); + + expect(response.status).to.equal(200); + const body = JSON.parse(response.body); + expect(body).to.have.property("granted"); + expect(body.granted).to.be.a("number"); + }); + + it("unit_authz_router: perm/get should fail with invalid id", () => { + defaultWorkingSetup(); + + const request_string = + `${authz_base_url}/perm/get?client=` + + james_uuid + + `&id=` + + encodeURIComponent("x/invalid") + + `&perms=` + + permissions.PERM_ALL; + + const response = request.get(request_string); + expect(response.status).to.equal(400); + }); }); From 894510d80a58ef13b7a9046a7e7d0d2c07b4b7fa Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Fri, 5 Dec 2025 12:28:00 -0500 Subject: [PATCH 11/65] release: version 4.0.1 (#1807) --- cmake/Version.cmake | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/cmake/Version.cmake b/cmake/Version.cmake index 262d3deb7..6e5cdc7c5 100644 --- a/cmake/Version.cmake +++ b/cmake/Version.cmake @@ -1,21 +1,21 @@ set(DATAFED_RELEASE_YEAR 2025) -set(DATAFED_RELEASE_MONTH 11) -set(DATAFED_RELEASE_DAY 17) -set(DATAFED_RELEASE_HOUR 14) +set(DATAFED_RELEASE_MONTH 12) +set(DATAFED_RELEASE_DAY 9) +set(DATAFED_RELEASE_HOUR 10) set(DATAFED_RELEASE_MINUTE 0) -set(DATAFED_COMMON_LIB_MAJOR 1) +set(DATAFED_COMMON_LIB_MAJOR 2) set(DATAFED_COMMON_LIB_MINOR 0) -set(DATAFED_COMMON_LIB_PATCH 3) +set(DATAFED_COMMON_LIB_PATCH 0) -set(DATAFED_COMMON_PROTOCOL_API_MAJOR 1) -set(DATAFED_COMMON_PROTOCOL_API_MINOR 2) +set(DATAFED_COMMON_PROTOCOL_API_MAJOR 2) +set(DATAFED_COMMON_PROTOCOL_API_MINOR 0) set(DATAFED_COMMON_PROTOCOL_API_PATCH 0) -set(DATAFED_CORE_MAJOR 1) -set(DATAFED_CORE_MINOR 1) +set(DATAFED_CORE_MAJOR 2) +set(DATAFED_CORE_MINOR 0) set(DATAFED_CORE_PATCH 0) set(DATAFED_FOXX_MAJOR 1) @@ -26,20 +26,20 @@ set(DATAFED_FOXX_API_MAJOR 1) set(DATAFED_FOXX_API_MINOR 2) set(DATAFED_FOXX_API_PATCH 0) -set(DATAFED_WEB_MAJOR 1) -set(DATAFED_WEB_MINOR 1) -set(DATAFED_WEB_PATCH 1) +set(DATAFED_WEB_MAJOR 2) +set(DATAFED_WEB_MINOR 0) +set(DATAFED_WEB_PATCH 0) -set(DATAFED_REPO_MAJOR 1) +set(DATAFED_REPO_MAJOR 2) set(DATAFED_REPO_MINOR 0) -set(DATAFED_REPO_PATCH 1) +set(DATAFED_REPO_PATCH 0) -set(DATAFED_AUTHZ_MAJOR 1) +set(DATAFED_AUTHZ_MAJOR 2) set(DATAFED_AUTHZ_MINOR 0) -set(DATAFED_AUTHZ_PATCH 2) +set(DATAFED_AUTHZ_PATCH 0) -set(DATAFED_PYTHON_CLIENT_MAJOR 3) -set(DATAFED_PYTHON_CLIENT_MINOR 1) +set(DATAFED_PYTHON_CLIENT_MAJOR 4) +set(DATAFED_PYTHON_CLIENT_MINOR 0) set(DATAFED_PYTHON_CLIENT_PATCH 0) set(DATAFED_PYTHON_CLIENT_RELEASE_TYPE "") set(DATAFED_PYTHON_CLIENT_PRE_RELEASE_IDENTIFER "") From dde50e69cc2cac61850988f9331bddea583c9b5f Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Sun, 7 Dec 2025 23:01:20 -0500 Subject: [PATCH 12/65] refactor: only print subset of user properties. (#1804) * refactor: only print subset of user properties. * chore: Auto-format JavaScript files with Prettier --- core/database/foxx/api/user_router.js | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 5e233d1f5..189ff5b00 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -316,6 +316,7 @@ router .get("/update", function (req, res) { let client = null; let result = null; + let extra_log_info = null; try { g_db._executeTransaction({ collections: { @@ -391,6 +392,15 @@ router delete user.new.refresh; result = [user.new]; + + const { is_admin, max_coll, max_proj, max_sav_qry } = user.new; + + extra_log_info = { + is_admin, + max_coll, + max_proj, + max_sav_qry, + }; }, }); res.send(result); @@ -401,7 +411,7 @@ router routePath: basePath + "/update", status: "Success", description: "Update user information", - extra: result, + extra: extra_log_info, }); } catch (e) { logger.logRequestFailure({ @@ -411,7 +421,7 @@ router routePath: basePath + "/update", status: "Failure", description: "Update user information", - extra: result, + extra: extra_log_info, error: e, }); g_lib.handleException(e, res); From 7b76aa37a95b46b0501d09e090e9fa316743bf55 Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Tue, 9 Dec 2025 08:41:25 -0500 Subject: [PATCH 13/65] [DAPS-1806] - refactor: foxx, user tokens expiring route. (#1806) --- core/database/foxx/api/user_router.js | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 189ff5b00..23dabdd8e 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -1158,43 +1158,44 @@ router router .get("/token/get/expiring", function (req, res) { - let user = null; - let result = null; + let extra_log_info = []; + const desc = `User access tokens expiring in ${req.queryParams.expires_in} seconds`; try { logger.logRequestStarted({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get/expiring", status: "Started", - description: "Getting expiring user access token", + description: desc, }); - results = g_db._query( + const results = g_db._query( "for i in u filter i.expiration != Null && i.expiration < @exp return {id:i._id,access:i.access,refresh:i.refresh,expiration:i.expiration}", { exp: Math.floor(Date.now() / 1000) + req.queryParams.expires_in, }, ); res.send(results); + extra_log_info = results.toArray(); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get/expiring", status: "Success", - description: "Getting expiring user access token", - extra: results, + description: desc, + extra: { expiring_token_count: extra_log_info.length }, }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get/expiring", status: "Failure", - description: "Getting expiring user access token", - extra: result, + description: desc, + extra: { expiring_token_count: extra_log_info.length }, error: e, }); g_lib.handleException(e, res); From 27237674842ec2274a38d9c665709dfe7a14a57f Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Wed, 10 Dec 2025 08:37:01 -0500 Subject: [PATCH 14/65] [DAPS-1522] - refactor: foxx, topic router logging improvements (#1801) Co-authored-by: Joshua S Brown --- core/database/CMakeLists.txt | 2 + core/database/foxx/api/topic_router.js | 117 +++++++++++++- core/database/foxx/tests/topic_router.test.js | 143 ++++++++++++++++++ 3 files changed, 259 insertions(+), 3 deletions(-) create mode 100644 core/database/foxx/tests/topic_router.test.js diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index c9bf90af5..af3b57f3e 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -31,6 +31,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_group_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_group_router:") add_test(NAME foxx_note_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_note_router:") add_test(NAME foxx_version_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version_router:") + add_test(NAME foxx_topic_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_topic_router:") add_test(NAME foxx_tag_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_tag_router:") add_test(NAME foxx_task_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_task_router:") add_test(NAME foxx_authz_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_authz_router:") @@ -58,6 +59,7 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_validation_repo PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_path PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_user_router PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") + set_tests_properties(foxx_topic_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_group_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_metrics_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_note_router PROPERTIES FIXTURES_REQUIRED Foxx) diff --git a/core/database/foxx/api/topic_router.js b/core/database/foxx/api/topic_router.js index 38bc07866..b5b531505 100644 --- a/core/database/foxx/api/topic_router.js +++ b/core/database/foxx/api/topic_router.js @@ -7,17 +7,32 @@ const error = require("./lib/error_codes"); const g_db = require("@arangodb").db; const g_lib = require("./support"); +const logger = require("./lib/logger"); +const basePath = "topic"; module.exports = router; //==================== TOPIC API FUNCTIONS router .get("/list/topics", function (req, res) { + let client = req.queryParams.client + ? g_lib.getUserFromClientID(req.queryParams.client) + : undefined; + let result = null; try { + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/topics", + status: "Started", + description: "List topics", + }); + var qry, par = {}, - result, off = 0, cnt = 50; @@ -58,7 +73,30 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/topics", + status: "Success", + description: "List topics", + extra: { + topicCount: Array.isArray(result) ? result.length : undefined, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/topics", + status: "Failure", + description: "List topics", + extra: { + topicCount: Array.isArray(result) ? result.length : undefined, + }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -71,38 +109,92 @@ router router .get("/view", function (req, res) { + let client = req.queryParams.client + ? g_lib.getUserFromClientID(req.queryParams.client) + : undefined; + let topic_extra = undefined; try { + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: `View topic. ID: ${req.queryParams.id}`, + }); + if (!g_db.t.exists(req.queryParams.id)) throw [error.ERR_NOT_FOUND, "Topic, " + req.queryParams.id + ", not found"]; var topic = g_db.t.document(req.queryParams.id); res.send([topic]); + + topic_extra = { + title: topic.title, + creator: topic.creator, + coll_cnt: topic.coll_cnt, + }; + + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Success", + description: `View topic. ID: ${req.queryParams.id}`, + extra: topic_extra, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: `View topic. ID: ${req.queryParams.id}`, + extra: topic_extra, + error: e, + }); g_lib.handleException(e, res); } }) .queryParam("client", joi.string().optional(), "Client ID") - .queryParam("id", joi.string().optional(), "ID of topic to view") + .queryParam("id", joi.string().required(), "ID of topic to view") .summary("View topic") .description("View a topic."); router .get("/search", function (req, res) { + let client = req.queryParams.client + ? g_lib.getUserFromClientID(req.queryParams.client) + : undefined; + let result = null; + const phrase = req.queryParams.phrase; + const shortPhrase = phrase.length > 10 ? phrase.slice(0, 10) + "..." : phrase; try { + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/search", + status: "Started", + description: `Search topics. Search Phrase: ${shortPhrase}`, + }); + var tokens = req.queryParams.phrase.match(/(?:[^\s"]+|"[^"]*")+/g), qry = "for i in topicview search analyzer((", params = {}, i, p, qry_res, - result = [], item, it, topic, path, op = false; + result = []; if (tokens.length == 0) throw [error.ERR_INVALID_PARAM, "Invalid topic search phrase."]; it = 0; @@ -154,7 +246,26 @@ router } res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/search", + status: "Success", + description: `Search topics. Search Phrase: ${shortPhrase}`, + extra: { numOfResults: result.length }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/search", + status: "Failure", + description: `Search topics. Search Phrase: ${shortPhrase}`, + extra: { numOfResults: result.length }, + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/topic_router.test.js b/core/database/foxx/tests/topic_router.test.js new file mode 100644 index 000000000..9f60da2bc --- /dev/null +++ b/core/database/foxx/tests/topic_router.test.js @@ -0,0 +1,143 @@ +"use strict"; +// NOTE: completion of tests requires successful run of user_fixture.js script + +// Need to pull enum from support +const g_lib = require("../api/support"); + +// Integration test of API +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const { db } = require("@arangodb"); + +const topic_base_url = `${baseUrl}/topic`; + +describe("unit_topic_router: the Foxx microservice topic_router /view endpoint", () => { + after(function () { + const collections = ["u", "t"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); + }); + + beforeEach(() => { + const collections = ["u", "t"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) { + col.truncate(); // truncate after ensuring collection exists + } else { + db._create(name); // create if it doesn’t exist + } + }); + }); + + it("should successfully run the list route", () => { + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name: "fake user", + name_first: "fake", + name_last: "user", + is_admin: true, + max_coll: 50, + max_proj: 10, + max_sav_qry: 20, + email: "fakeuser@gmail.com", + }); + + db.t.save({ + _key: "10", + }); + + // arrange + // TODO: make encoded query params less hard coded + const request_string = `${topic_base_url}/view?client=u/fakeUser&id=10`; + // act + const response = request.get(request_string); + // assert + expect(response.status).to.equal(200); + }); + it("should successfully run the search route", () => { + // Create user + db.u.save({ + _key: "fakeUser", + is_admin: true, + }); + + // Create ArangoSearch View (if missing) + if (!db._view("topicview")) { + db._createView("topicview", "arangosearch", { + links: { + t: { + includeAllFields: true, + }, + }, + }); + } + + // Insert topic + db.t.save({ + _key: "s1", + title: "Sample Topic", + }); + + // Force view to update (ArangoSearch is async) + db._query("FOR d IN topicview SEARCH d.title == 'nothing' RETURN d"); + + const request_string = `${topic_base_url}/search?client=fakeUser&phrase=Sample`; + + const response = request.get(request_string); + + expect(response.status).to.equal(200); + }); + + it("should list only top-level topics", () => { + db.u.save({ + _key: "fakeUser", + is_admin: true, + }); + + db.t.save({ + _key: "t1", + title: "Alpha", + top: true, + admin: false, + coll_cnt: 1, + }); + + db.t.save({ + _key: "t2", + title: "Beta", + top: true, + admin: true, + coll_cnt: 5, + }); + + db.t.save({ + _key: "child1", + title: "Child Should Not Appear", + top: false, + }); + + const url = `${topic_base_url}/list/topics?client=u/fakeUser`; + + const response = request.get(url); + + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + + // last item is paging metadata + const paging = body[body.length - 1].paging; + + expect(paging.tot).to.equal(2); // only Alpha + Beta + + const ids = body.slice(0, -1).map((x) => x._id); + + expect(ids).to.include("t/t1"); + expect(ids).to.include("t/t2"); + expect(ids).to.not.include("t/child1"); + }); +}); From 3066969eb035241f22449aaf86bcb370eec30cad Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Thu, 11 Dec 2025 16:32:25 -0500 Subject: [PATCH 15/65] [DAPS-1522] - refactor: foxx, config router logging improvements (#1813) Co-authored-by: Joshua S Brown --- core/database/CMakeLists.txt | 2 + core/database/foxx/api/config_router.js | 37 +++++++++-- .../database/foxx/tests/config_router.test.js | 66 +++++++++++++++++++ 3 files changed, 100 insertions(+), 5 deletions(-) create mode 100644 core/database/foxx/tests/config_router.test.js diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index af3b57f3e..4c186ec61 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -28,6 +28,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_version COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version:") add_test(NAME foxx_support COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_support:") add_test(NAME foxx_user_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_router:") + add_test(NAME foxx_config_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_config_router:") add_test(NAME foxx_group_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_group_router:") add_test(NAME foxx_note_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_note_router:") add_test(NAME foxx_version_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version_router:") @@ -59,6 +60,7 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_validation_repo PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_path PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_user_router PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") + set_tests_properties(foxx_config_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_topic_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_group_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_metrics_router PROPERTIES FIXTURES_REQUIRED Foxx) diff --git a/core/database/foxx/api/config_router.js b/core/database/foxx/api/config_router.js index 2531a1167..0d94b2eaa 100644 --- a/core/database/foxx/api/config_router.js +++ b/core/database/foxx/api/config_router.js @@ -4,16 +4,25 @@ const createRouter = require("@arangodb/foxx/router"); const router = createRouter(); const g_db = require("@arangodb").db; const g_lib = require("./support"); - +const logger = require("./lib/logger"); +const basePath = "config"; module.exports = router; router .get("/msg/daily", function (req, res) { + let msg = null; try { - var msg = {}, - key = { - _key: "msg_daily", - }; + logger.logRequestStarted({ + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/msg/daily", + status: "Started", + description: "Get message of the day", + }); + msg = {}; + var key = { + _key: "msg_daily", + }; if (g_db.config.exists(key)) { msg = g_db.config.document(key); @@ -24,7 +33,25 @@ router } res.send(msg); + logger.logRequestSuccess({ + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/msg/daily", + status: "Success", + description: "Get message of the day", + extra: (msg.msg || "").substring(0, 10), + }); } catch (e) { + logger.logRequestFailure({ + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/msg/daily", + status: "Failure", + description: "Get message of the day", + extra: (msg.msg || "").substring(0, 10), + error: e, + }); + g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/config_router.test.js b/core/database/foxx/tests/config_router.test.js new file mode 100644 index 000000000..593156f6a --- /dev/null +++ b/core/database/foxx/tests/config_router.test.js @@ -0,0 +1,66 @@ +"use strict"; +// NOTE: completion of tests requires successful run of user_fixture.js script + +// Need to pull enum from support +const g_lib = require("../api/support"); + +// Integration test of API +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const { db } = require("@arangodb"); + +const config_base_url = `${baseUrl}/config`; + +describe("unit_config_router: test /msg/daily route", () => { + after(function () { + const col = db._collection("config"); + if (col) col.truncate(); + }); + + beforeEach(() => { + let col = db._collection("config"); + if (col) { + col.truncate(); + } else { + db._create("config"); + } + }); + + it("should return an empty object when no daily message exists", () => { + // arrange + const url = `${config_base_url}/msg/daily`; + + // act + const response = request.get(url); + + // assert + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + expect(body).to.deep.equal({}); // empty object expected + }); + + it("should return the daily message when it exists", () => { + // arrange: insert a config entry + db.config.save({ + _key: "msg_daily", + msg: "Hello world!", + }); + + const url = `${config_base_url}/msg/daily`; + + // act + const response = request.get(url); + + // assert + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + + // The route strips _id, _key, _rev + expect(body).to.deep.equal({ + msg: "Hello world!", + }); + }); +}); From 38c5e0e2ea60ee1b6943152ce07d3a45419b7fc9 Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Mon, 15 Dec 2025 11:47:16 -0500 Subject: [PATCH 16/65] [DAPS-1522] - refactor: foxx, admin router logging improvements (#1814) Co-authored-by: Joshua S Brown --- core/database/CMakeLists.txt | 2 + core/database/foxx/api/admin_router.js | 89 ++++++++++- core/database/foxx/tests/admin_router.test.js | 147 ++++++++++++++++++ 3 files changed, 236 insertions(+), 2 deletions(-) create mode 100644 core/database/foxx/tests/admin_router.test.js diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index 4c186ec61..0f7323b3b 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -28,6 +28,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_version COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version:") add_test(NAME foxx_support COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_support:") add_test(NAME foxx_user_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_router:") + add_test(NAME foxx_admin_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_admin_router:") add_test(NAME foxx_config_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_config_router:") add_test(NAME foxx_group_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_group_router:") add_test(NAME foxx_note_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_note_router:") @@ -63,6 +64,7 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_config_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_topic_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_group_router PROPERTIES FIXTURES_REQUIRED Foxx) + set_tests_properties(foxx_admin_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_metrics_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_note_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_version_router PROPERTIES FIXTURES_REQUIRED Foxx) diff --git a/core/database/foxx/api/admin_router.js b/core/database/foxx/api/admin_router.js index 9468118f1..8ef49ce9d 100644 --- a/core/database/foxx/api/admin_router.js +++ b/core/database/foxx/api/admin_router.js @@ -8,16 +8,45 @@ const g_db = require("@arangodb").db; const g_lib = require("./support"); const permissions = require("./lib/permissions"); //const perf = require('@arangodb/foxx'); +const basePath = "admin"; +const logger = require("./lib/logger"); module.exports = router; router .get("/ping", function (req, res) { try { + logger.logRequestStarted({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ping", + status: "Started", + description: "Ping DB server", + }); res.send({ status: 1, }); + logger.logRequestSuccess({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ping", + status: "Success", + description: "Ping DB server", + extra: "N/A", + }); } catch (e) { + logger.logRequestFailure({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ping", + status: "Failure", + description: "Ping DB server", + extra: "N/A", + error: e, + }); g_lib.handleException(e, res); } }) @@ -26,9 +55,18 @@ router router .get("/test", function (req, res) { + let result = null; try { + logger.logRequestStarted({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/test", + status: "Started", + description: "Do perf test", + }); const client = g_lib.getUserFromClientID(req.queryParams.client); - var result = true; + result = true; var item = g_lib.resolveID(req.queryParams.item, client); var obj = g_db[item[0]].document(item); @@ -43,7 +81,26 @@ router perm: result, time: (t2 - t1) / 1000, }); + logger.logRequestSuccess({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/test", + status: "Success", + extra: { execution_time_seconds: (t2 - t1) / 1000 }, + description: "Do perf test", + }); } catch (e) { + logger.logRequestFailure({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/test", + status: "Failure", + description: "Do perf test", + extra: { execution_time_seconds: (t2 - t1) / 1000 }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -54,8 +111,17 @@ router router .get("/check", function (req, res) { + let result = null; try { - var result = {}; + logger.logRequestStarted({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/check", + status: "Started", + description: "Database integrity check", + }); + result = {}; g_db._executeTransaction({ collections: { @@ -281,7 +347,26 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/check", + status: "Success", + description: "Database integrity check", + extra: "N/A", + }); } catch (e) { + logger.logRequestFailure({ + client: "N/A", + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/check", + status: "Failure", + description: "Database integrity check", + extra: "N/A", + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/admin_router.test.js b/core/database/foxx/tests/admin_router.test.js new file mode 100644 index 000000000..f305de9c0 --- /dev/null +++ b/core/database/foxx/tests/admin_router.test.js @@ -0,0 +1,147 @@ +"use strict"; + +// Integration test of API +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const { db } = require("@arangodb"); + +// router base path (same pattern as your tag example) +const admin_base_url = `${baseUrl}/admin`; + +describe("unit_admin_router: the Foxx microservice admin_router /ping endpoint", () => { + // Clean up any collections if needed (this router doesn't use any) + after(function () { + const collections = ["u", "test_collection", "d"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); + }); + + beforeEach(() => { + // no collections used, but keeping consistency with your example + const collections = ["u", "test_collection", "d"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) { + col.truncate(); + } else { + db._create(name); + } + }); + }); + + it("should successfully run the ping route", () => { + // arrange + const request_string = `${admin_base_url}/ping`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(200); + + // Response structure from router: + // { status: 1 } + const body = JSON.parse(response.body); + expect(body).to.be.an("object"); + expect(body.status).to.equal(1); + }); + + it("should successfully run the test route", () => { + //Create user document for the client + db.u.save({ + _key: "testUser", + name: "Test User", + email: "testuser@example.com", + is_admin: true, + }); + + const doc = db.d.save({ value: "testValue" }); // 'd' collection is allowed + const item = `d/${doc._key}`; + + // Build query params + const client = "testUser"; + + const request_string = `${admin_base_url}/test?client=${encodeURIComponent(client)}&item=${encodeURIComponent(item)}`; + + // Act + const response = request.get(request_string); + + // Assert response code + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + + expect(body).to.be.an("object"); + expect(body).to.have.property("perm"); + expect(body).to.have.property("time"); + + // perm should be boolean + expect(body.perm).to.be.a("boolean"); + + // time should be numeric (seconds) + expect(body.time).to.be.a("number"); + }); + + it("should successfully run the check route", () => { + const request_string = `${admin_base_url}/check`; + + const response = request.get(request_string); + + expect(response.status).to.equal(200); + const body = JSON.parse(response.body); + + // Basic structure checks + expect(body).to.be.an("object"); + expect(body).to.have.property("edge_bad_count"); + expect(body).to.have.property("vertex_bad_count"); + + // Each edge/vertex category should exist + const expectedKeys = [ + "owner", + "member", + "item", + "acl", + "ident", + "admin", + "alias", + "alloc", + "loc", + "top", + "dep", + "data_no_owner", + "data_multi_owner", + "data_no_loc", + "data_multi_loc", + "data_no_parent", + "coll_no_owner", + "coll_multi_owner", + "coll_no_parent", + "coll_multi_parent", + "group_no_owner", + "group_multi_owner", + "alias_no_owner", + "alias_multi_owner", + "alias_no_alias", + "alias_multi_alias", + "proj_no_owner", + "proj_multi_owner", + "query_no_owner", + "query_multi_owner", + "topic_no_parent", + "topic_multi_parent", + "repo_no_admin", + ]; + + expectedKeys.forEach((key) => { + expect(body).to.have.property(key); + expect(body[key]).to.be.an("array"); + }); + + // Counts should be numeric + expect(body.edge_bad_count).to.be.a("number"); + expect(body.vertex_bad_count).to.be.a("number"); + }); +}); From 02210d4843c9b8b57adadd7ca53c1131e3def100 Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Mon, 15 Dec 2025 12:31:00 -0500 Subject: [PATCH 17/65] [DAPS-1522] - refactor: foxx, acl router logging improvements (#1817) Co-authored-by: Joshua S Brown --- core/database/CMakeLists.txt | 2 + core/database/foxx/api/acl_router.js | 156 +++++++++++++++++--- core/database/foxx/tests/acl_router.test.js | 155 +++++++++++++++++++ 3 files changed, 292 insertions(+), 21 deletions(-) create mode 100644 core/database/foxx/tests/acl_router.test.js diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index 0f7323b3b..7363691af 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -28,6 +28,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_version COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version:") add_test(NAME foxx_support COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_support:") add_test(NAME foxx_user_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_router:") + add_test(NAME foxx_acl_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_acl_router:") add_test(NAME foxx_admin_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_admin_router:") add_test(NAME foxx_config_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_config_router:") add_test(NAME foxx_group_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_group_router:") @@ -61,6 +62,7 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_validation_repo PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_path PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_user_router PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") + set_tests_properties(foxx_acl_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_config_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_topic_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_group_router PROPERTIES FIXTURES_REQUIRED Foxx) diff --git a/core/database/foxx/api/acl_router.js b/core/database/foxx/api/acl_router.js index 336afbc00..ac0963bb7 100644 --- a/core/database/foxx/api/acl_router.js +++ b/core/database/foxx/api/acl_router.js @@ -7,6 +7,8 @@ const g_db = require("@arangodb").db; const g_lib = require("./support"); const error = require("./lib/error_codes"); const permissions = require("./lib/permissions"); +const logger = require("./lib/logger"); +const basePath = "acl"; module.exports = router; @@ -14,8 +16,17 @@ module.exports = router; router .get("/update", function (req, res) { + let result = null; try { - var result = []; + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Started", + description: `Update ACL(s) on a data record or collection. ID: ${req.queryParams.id}`, + }); + result = []; g_db._executeTransaction({ collections: { @@ -185,7 +196,27 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/authn/password", + status: "Success", + description: `Update ACL(s) on a data record or collection. ID: ${req.queryParams.id}`, + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/authn/password", + status: "Success", + description: `Update ACL(s) on a data record or collection. ID: ${req.queryParams.id}`, + extra: result, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -203,7 +234,17 @@ router router .get("/view", function (req, res) { + let rules = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: `View current ACL on an object. ID: ${req.queryParams.id}`, + }); + const client = g_lib.getUserFromClientID(req.queryParams.client); var object = g_lib.getObject(req.queryParams.id, client); @@ -215,7 +256,7 @@ router throw error.ERR_PERM_DENIED; } - var rules = g_db + rules = g_db ._query( "for v, e in 1..1 outbound @object acl return { id: v._id, gid: v.gid, grant: e.grant, inhgrant: e.inhgrant }", { @@ -226,7 +267,26 @@ router postProcACLRules(rules, object); res.send(rules); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Success", + description: `View current ACL on an object. ID: ${req.queryParams.id}`, + extra: { NumOfRules: rules.length }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: `View current ACL on an object. ID: ${req.queryParams.id}`, + extra: { NumOfRules: rules.length }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -234,20 +294,46 @@ router .queryParam("id", joi.string().required(), "ID or alias of data record or collection") .summary("View current ACL on an object") .description("View current ACL on an object (data record or collection)"); - router .get("/shared/list", function (req, res) { + let result = null; try { const client = g_lib.getUserFromClientID(req.queryParams.client); - - res.send( - g_lib.getACLOwnersBySubject( - client._id, - req.queryParams.inc_users, - req.queryParams.inc_projects, - ), + result = g_lib.getACLOwnersBySubject( + client._id, + req.queryParams.inc_users, + req.queryParams.inc_projects, ); + + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/shared/list", + status: "Started", + description: `List users/projects that have shared data or collections with client/subject. Users: ${req.queryParams.inc_users}; Projects: ${req.queryParams.inc_projects}`, + }); + res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/shared/list", + status: "Success", + description: `List users/projects that have shared data or collections with client/subject. Users: ${req.queryParams.inc_users}; Projects: ${req.queryParams.inc_projects}`, + extra: { NumOfUsersAndProjs: result.length }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/shared/list", + status: "Failure", + description: `List users/projects that have shared data or collections with client/subject. Users: ${req.queryParams.inc_users}; Projects: ${req.queryParams.inc_projects}`, + extra: { NumOfUsersAndProjs: result.length }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -259,7 +345,16 @@ router router .get("/shared/list/items", function (req, res) { + let shares = []; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/shared/list/items", + status: "Started", + description: `Lists data and collections shared with client/subject by owner. Owner ID: ${req.queryParams.owner}`, + }); const client = g_lib.getUserFromClientID(req.queryParams.client); var owner_id; @@ -273,17 +368,16 @@ router owner_id = g_lib.getUserFromClientID(req.queryParams.owner)._id; } - var i, - share, - shares = g_db - ._query( - "for v in 1..2 inbound @client member, acl filter v.owner == @owner return {id:v._id,title:v.title,alias:v.alias,owner:v.owner,creator:v.creator,md_err:v.md_err,external:v.external,locked:v.locked}", - { - client: client._id, - owner: owner_id, - }, - ) - .toArray(); + var i, share; + shares = g_db + ._query( + "for v in 1..2 inbound @client member, acl filter v.owner == @owner return {id:v._id,title:v.title,alias:v.alias,owner:v.owner,creator:v.creator,md_err:v.md_err,external:v.external,locked:v.locked}", + { + client: client._id, + owner: owner_id, + }, + ) + .toArray(); for (i in shares) { share = shares[i]; @@ -295,7 +389,27 @@ router } else { res.send(dedupShares(client, shares)); } + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/shared/list/items", + status: "Success", + description: `Lists data and collections shared with client/subject by owner. Owner ID: ${req.queryParams.owner}`, + extra: { NumOfShares: shares.length }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/shared/list/items", + status: "Failure", + description: `Lists data and collections shared with client/subject by owner. Owner ID: ${req.queryParams.owner}`, + extra: { NumOfShares: shares.length }, + error: e, + }); + g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/acl_router.test.js b/core/database/foxx/tests/acl_router.test.js new file mode 100644 index 000000000..454deae50 --- /dev/null +++ b/core/database/foxx/tests/acl_router.test.js @@ -0,0 +1,155 @@ +"use strict"; + +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { db } = require("@arangodb"); +const { baseUrl } = module.context; + +const acl_base_url = `${baseUrl}/acl`; + +describe("unit_acl_router: test /update route", () => { + after(function () { + const collections = ["member", "u", "c", "d", "acl", "owner", "g"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); + }); + + beforeEach(() => { + // Ensure necessary collections exist + const collections = ["u", "c", "d", "acl", "owner", "g"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + else db._create(name); + }); + + // Create a fake user + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name: "Fake User", + is_admin: true, + }); + + // Create a fake collection + db.c.save({ + _key: "coll1", + _id: "c/coll1", + name: "Fake Collection", + }); + + // Link owner + db.owner.save({ + _from: "c/coll1", + _to: "u/fakeUser", + }); + db.acl.save({ + _from: "c/coll1", + _to: "u/fakeUser", + id: "u/fakeUser", + grant: 1, + inhgrant: 0, + }); + + db.u.save({ + _key: "otherUser", + _id: "u/otherUser", + name: "Other User", + is_admin: false, + }); + + db.c.update("coll1", { owner: "u/fakeUser" }); + + db.acl.save({ + _from: "c/coll1", + _to: "u/otherUser", + grant: 1, + inhgrant: 0, + }); + + db.member.save({ + _from: "c/coll1", + _to: "u/otherUser", + }); + db.c.update("coll1", { owner: "u/fakeUser" }); + }); + + it("should update ACL for a collection", () => { + const rules = [ + { + id: "u/fakeUser", + grant: 1, // minimal permission for testing + inhgrant: 0, + }, + ]; + + // Build query string + const query = `client=u/fakeUser&id=c/coll1&rules=${encodeURIComponent( + JSON.stringify(rules), + )}`; + + // Act + const response = request.get(`${acl_base_url}/update?${query}`); + + // Assert + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + expect(body).to.be.an("array"); + expect(body[0]).to.have.property("id", "u/fakeUser"); + expect(body[0]).to.have.property("grant", 1); + }); + + it("should view ACLs for a collection", () => { + const query = `client=u/fakeUser&id=c/coll1`; + + const response = request.get(`${baseUrl}/acl/view?${query}`); + + // Expect HTTP 200 + expect(response.status).to.equal(200); + + // Parse body + const body = JSON.parse(response.body); + expect(body).to.be.an("array"); + expect(body.length).to.equal(2); + + const ids = body.map((x) => x.id); + expect(ids).to.include("u/fakeUser"); + expect(ids).to.include("u/otherUser"); + }); + + it("should list users who have shared objects with the subject", () => { + const query = "client=u/otherUser&inc_users=true&inc_projects=false"; + + const response = request.get(`${baseUrl}/acl/shared/list?${query}`); + + db.member.save({ _from: "c/coll1", _to: "u/otherUser" }); + + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + + // Expect an array of users/projects that shared with otherUser + expect(body).to.be.an("array"); + expect(body.length).to.equal(1); + + // Expect fakeUser to appear as the one who shared + expect(body[0]).to.have.property("id", "u/fakeUser"); + }); + + it("should list items shared by owner with the client", () => { + const query = "client=u/otherUser&owner=u/fakeUser"; + + const response = request.get(`${baseUrl}/acl/shared/list/items?${query}`); + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + expect(body).to.be.an("array"); + + // Should include the collection already shared + const ids = body.map((item) => item.id); + expect(ids).to.include("c/coll1"); // already exists + }); +}); From c0cb6d80fddbc825c326ce50e9b8fa7e47dd1482 Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Mon, 12 Jan 2026 14:14:56 -0500 Subject: [PATCH 18/65] [DAPS-1522] - refactor: foxx, repo router logging improvements (#1823) --- core/database/foxx/api/repo_router.js | 419 +++++++++++- core/database/foxx/tests/repo_router.test.js | 631 +++++++++++++++++++ 2 files changed, 1035 insertions(+), 15 deletions(-) diff --git a/core/database/foxx/api/repo_router.js b/core/database/foxx/api/repo_router.js index 6f79c53fb..45e9ffae2 100644 --- a/core/database/foxx/api/repo_router.js +++ b/core/database/foxx/api/repo_router.js @@ -12,6 +12,9 @@ const g_db = require("@arangodb").db; const g_lib = require("./support"); const g_tasks = require("./tasks"); +const logger = require("./lib/logger"); +const basePath = "repo"; + module.exports = router; function validateAndNormalizeRepoPath(obj) { @@ -47,6 +50,14 @@ function validateAndNormalizeRepoPath(obj) { router .get("/list", function (req, res) { var client; + logger.logRequestStarted({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Started", + description: "List repo servers administered by client", + }); if (req.queryParams.client) { client = g_lib.getUserFromClientID(req.queryParams.client); @@ -95,6 +106,15 @@ router } res.send(result); + logger.logRequestSuccess({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Success", + description: "List repo servers administered by client", + extra: { NumOfRepoServers: result.length }, + }); }) .queryParam("client", joi.string().allow("").optional(), "Client ID") .queryParam("details", joi.boolean().optional(), "Show additional record details") @@ -106,8 +126,17 @@ router router .get("/view", function (req, res) { + let repo = null; try { - var repo = g_db.repo.document(req.queryParams.id); + logger.logRequestStarted({ + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: `View repo server record: ${req.queryParams.id}`, + }); + + repo = g_db.repo.document(req.queryParams.id); repo.admins = []; var admins = g_db.admin @@ -122,7 +151,32 @@ router delete repo._rev; res.send([repo]); + logger.logRequestSuccess({ + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Success", + description: `View repo server record: ${req.queryParams.id}`, + extra: { + type: repo.type, + capacity: repo.capacity, + admins: repo.admins, + }, + }); } catch (e) { + logger.logRequestFailure({ + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: `View repo server record: ${req.queryParams.id}`, + extra: { + type: repo.type, + capacity: repo.capacity, + admins: repo.admins, + }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -132,7 +186,17 @@ router router .post("/create", function (req, res) { + let repo_doc = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Started", + description: `Create a server record: ${req.queryParams.id}`, + }); + g_db._executeTransaction({ collections: { read: ["u"], @@ -166,7 +230,7 @@ router } const repo = Repositories.createRepositoryByType(obj).raiseIfError(); - const repo_doc = repo.save().raiseIfError(); + repo_doc = repo.save().raiseIfError(); for (const adminId of req.body.admins) { if (!g_db._exists(adminId)) @@ -185,7 +249,34 @@ router res.send([repo_doc]); }, }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Success", + description: `Create a server record: ${req.queryParams.id}`, + extra: { + type: repo_doc?.type, + capacity: repo_doc?.capacity, + admins: repo_doc?.admins, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Failure", + description: `Create a server record: ${req.queryParams.id}`, + extra: { + type: repo_doc?.type, + capacity: repo_doc?.capacity, + admins: repo_doc?.admins, + }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -215,7 +306,16 @@ router router .post("/update", function (req, res) { + let repo = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Started", + description: `Update a repo server record: ${req.queryParams.id}`, + }); g_db._executeTransaction({ collections: { read: ["u"], @@ -294,7 +394,33 @@ router res.send([repo.new]); }, }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Success", + description: `Update a repo server record: ${req.queryParams.id}`, + extra: { + capacity: req.queryParams.capacity, + admins: req.queryParams.admins, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Failure", + description: `Update a repo server record: ${req.queryParams.id}`, + extra: { + capacity: req.queryParams.capacity, + admins: req.queryParams.admins, + }, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -323,6 +449,15 @@ router router .get("/delete", function (req, res) { try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Started", + description: `Delete a repo server record: ${req.queryParams.id}`, + }); + g_db._executeTransaction({ collections: { read: ["lock"], @@ -372,7 +507,24 @@ router graph.repo.remove(req.queryParams.id); }, }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Success", + description: `Delete a repo server record: ${req.queryParams.id}`, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Failure", + description: `Delete a repo server record: ${req.queryParams.id}`, + error: e, + }); g_lib.handleException(e, res); } }) @@ -393,6 +545,15 @@ router */ router .get("/calc_size", function (req, res) { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/calc_size", + status: "Started", + description: `Calculate per-repo sizes for specified data records and collections: ${req.queryParams.items.length}`, + }); + g_lib.getUserFromClientID(req.queryParams.client); // TODO Check permissions @@ -411,6 +572,14 @@ router } res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/calc_size", + status: "Success", + description: `Calculate per-repo sizes for specified data records and collections: ${req.queryParams.items.length}`, + }); }) .queryParam("client", joi.string().required(), "Client ID") .queryParam( @@ -462,6 +631,14 @@ function calcSize(a_item, a_recurse, a_depth, a_visited, a_result) { router .get("/alloc/list/by_repo", function (req, res) { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/list/by_repo", + status: "Started", + description: `List all allocations for a repo: ${req.queryParams.repo}`, + }); var client = g_lib.getUserFromClientID(req.queryParams.client); var repo = g_db.repo.document(req.queryParams.repo); @@ -477,6 +654,14 @@ router .toArray(); res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/list/by_repo", + status: "Success", + description: `List all allocations for a repo: ${req.queryParams.repo}`, + }); }) .queryParam("client", joi.string().required(), "Client ID") .queryParam("repo", joi.string().required(), "Repo ID") @@ -485,6 +670,14 @@ router router .get("/alloc/list/by_owner", function (req, res) { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/list/by_owner", + status: "Started", + description: `List owner's repo allocations: ${req.queryParams.owner}`, + }); var obj, result = g_db.alloc .byExample({ @@ -511,6 +704,14 @@ router } res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/list/by_owner", + status: "Success", + description: `List owner's repo allocations: ${req.queryParams.owner}`, + }); }) .queryParam("owner", joi.string().required(), "Owner ID (user or project)") .queryParam("stats", joi.boolean().optional(), "Include statistics") @@ -519,6 +720,14 @@ router router .get("/alloc/list/by_object", function (req, res) { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/list/by_object", + status: "Started", + description: `List object repo allocations: ${req.queryParams.object}`, + }); var client = g_lib.getUserFromClientID(req.queryParams.client); var obj_id = g_lib.resolveID(req.queryParams.object, client); var owner_id = g_db.owner.firstExample({ @@ -546,6 +755,14 @@ router } res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/list/by_object", + status: "Success", + description: `List object repo allocations: ${req.queryParams.object}`, + }); }) .queryParam("client", joi.string().required(), "Client ID") .queryParam("object", joi.string().required(), "Object ID (data or collection ID or alias)") @@ -554,7 +771,16 @@ router router .get("/alloc/view", function (req, res) { + let result = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/view", + status: "Started", + description: `View allocation details: ${req.queryParams.repo}`, + }); var owner_id, client = g_lib.getUserFromClientID(req.queryParams.client); @@ -571,13 +797,13 @@ router owner_id = client._id; } - var obj, - result = g_db.alloc - .byExample({ - _from: owner_id, - _to: req.queryParams.repo, - }) - .toArray(); + var obj; + result = g_db.alloc + .byExample({ + _from: owner_id, + _to: req.queryParams.repo, + }) + .toArray(); for (var i in result) { obj = result[i]; @@ -592,7 +818,26 @@ router } res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/view", + status: "Success", + description: `View allocation details: ${req.queryParams.repo}`, + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/view", + status: "Failure", + description: `View allocation details: ${req.queryParams.repo}`, + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -659,12 +904,40 @@ function getAllocStats(a_repo, a_subject) { router .get("/alloc/stats", function (req, res) { + let result = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/stats", + status: "Started", + description: `View allocation statistics: ${req.queryParams.repo}`, + }); var client = g_lib.getUserFromClientID(req.queryParams.client); permissions.ensureAdminPermRepo(client, req.queryParams.repo); - var result = getAllocStats(req.queryParams.repo, req.queryParams.subject); + result = getAllocStats(req.queryParams.repo, req.queryParams.subject); res.send(result); + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/stats", + status: "Success", + description: `View allocation statistics: ${req.queryParams.repo}`, + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/stats", + status: "Failure", + description: `View allocation statistics: ${req.queryParams.repo}`, + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -676,7 +949,17 @@ router router .get("/alloc/create", function (req, res) { + let result = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/create", + status: "Started", + description: `Create user/projects repo allocation: ${req.queryParams.repo}. Subject: ${req.queryParams.subject}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "repo", "admin"], @@ -700,9 +983,40 @@ router ); res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/create", + status: "Success", + description: `Create user/projects repo allocation: ${req.queryParams.repo}. Subject: ${req.queryParams.subject}`, + extra: { + task_id: result.task._id, + repo: result.task.state.repo_id, + data_limit: result.task.state.data_limit, + rec_limit: result.task.state.rec_limit, + status: "queued", + }, + }); }, }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/create", + status: "Failure", + description: `Create user/projects repo allocation: ${req.queryParams.repo}. Subject: ${req.queryParams.subject}`, + extra: { + task_id: result?.task?._id, + repo: result?.task?.state?.repo_id, + data_limit: result?.task?.state?.data_limit, + rec_limit: result?.task?.state?.rec_limit, + status: "queued", + }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -726,7 +1040,17 @@ router router .get("/alloc/delete", function (req, res) { + let result = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/delete", + status: "Started", + description: `Delete user/projects repo allocation: ${req.queryParams.repo}. Subject: ${req.queryParams.subject}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "repo", "admin"], @@ -741,16 +1065,31 @@ router subject_id = req.queryParams.subject; else subject_id = g_lib.getUserFromClientID(req.queryParams.subject)._id; - var result = g_tasks.taskInitAllocDelete( - client, - req.queryParams.repo, - subject_id, - ); + result = g_tasks.taskInitAllocDelete(client, req.queryParams.repo, subject_id); res.send(result); }, }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/delete", + status: "Success", + description: `Delete user/projects repo allocation: ${req.queryParams.repo}. Subject: ${req.queryParams.subject}`, + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/delete", + status: "Failure", + description: `Delete user/projects repo allocation: ${req.queryParams.repo}. Subject: ${req.queryParams.subject}`, + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -765,6 +1104,14 @@ router router .get("/alloc/set", function (req, res) { try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/set", + status: "Started", + description: `Set user/projects repo allocation: ${req.queryParams.repo}. Subject: ${req.queryParams.subject}`, + }); g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "repo", "admin"], @@ -806,7 +1153,24 @@ router }); }, }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/set", + status: "Success", + description: `Set user/projects repo allocation: ${req.queryParams.repo}. Subject: ${req.queryParams.subject}`, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/set", + status: "Failure", + description: `Set user/projects repo allocation: ${req.queryParams.repo}. Subject: ${req.queryParams.subject}`, + error: e, + }); g_lib.handleException(e, res); } }) @@ -829,6 +1193,14 @@ router router .get("/alloc/set/default", function (req, res) { try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/set/default", + status: "Started", + description: `Set user/projects repo allocation as default: ${req.queryParams.repo}`, + }); g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "repo", "admin"], @@ -888,7 +1260,24 @@ router } }, }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/set/default", + status: "Success", + description: `Set user/projects repo allocation as default: ${req.queryParams.repo}`, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/alloc/set/default", + status: "Failure", + description: `Set user/projects repo allocation as default: ${req.queryParams.repo}`, + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/repo_router.test.js b/core/database/foxx/tests/repo_router.test.js index 72fecc3a0..f7689ab3a 100644 --- a/core/database/foxx/tests/repo_router.test.js +++ b/core/database/foxx/tests/repo_router.test.js @@ -253,4 +253,635 @@ describe("integration_repo_router: the Foxx microservice repo_router create endp expect(json[0]).to.have.property("path", "/mnt/nfs/large/heavymetal/"); expect(json[0]).to.have.property("pub_key", "Zm7W6W5vJjZZqFj7okjBOS8K9wVjHhYyLzX+zA8B"); }); + + it("should list all repos when no client is provided", () => { + g_db.repo.save({ _key: "r1", title: "Repo One", domain: "test" }); + g_db.repo.save({ _key: "r2", title: "Repo Two", domain: "test" }); + + const response = request.get(`${repo_base_url}/list`); + + expect(response.status).to.equal(200); + const json = JSON.parse(response.body); + + expect(json).to.be.an("array").with.lengthOf(2); + expect(json[0]).to.have.property("id"); + expect(json[0]).to.not.have.property("_key"); + }); + it("should view a repo by id", () => { + // arrange: seed users + g_db.u.save({ _key: "shredder", is_admin: false }); + g_db.u.save({ _key: "splinter", is_admin: true }); + + // arrange: seed repo + g_db.repo.save({ + _key: "heavymetal", + title: "Rock On!!!!", + capacity: 0, + type: "metadata", + }); + + // arrange: admin edges (repo -> user) + g_db.admin.save({ + _from: "repo/heavymetal", + _to: "u/shredder", + }); + g_db.admin.save({ + _from: "repo/heavymetal", + _to: "u/splinter", + }); + + // act + const response = request.get(`${repo_base_url}/view?id=repo/heavymetal`); + + // assert + expect(response.status).to.equal(200); + const json = JSON.parse(response.body); + + expect(json).to.be.an("array").with.lengthOf(1); + + const repo = json[0]; + + // id remapped from _id + expect(repo).to.have.property("id", "repo/heavymetal"); + + // admins resolved from admin edges + expect(repo.admins).to.have.members(["u/shredder", "u/splinter"]); + + // internal fields stripped + expect(repo).to.not.have.property("_id"); + expect(repo).to.not.have.property("_key"); + expect(repo).to.not.have.property("_rev"); + }); + + it("should update a repo when client has admin permissions", () => { + // arrange: seed admin user + g_db.u.save({ + _key: "splinter", + is_admin: true, + }); + + // arrange: seed repo + g_db.repo.save({ + _key: "heavymetal", + title: "Old Title", + summary: "Old summary", + domain: "old-domain", + capacity: 0, + type: "metadata", + }); + + // arrange: admin edge (repo -> user) + g_db.admin.save({ + _from: "repo/heavymetal", + _to: "u/splinter", + }); + + const request_string = `${repo_base_url}/update?client=u/splinter`; + + const update_body = { + id: "repo/heavymetal", + title: "New Title", + domain: "new-domain", + path: "/mnt/nfs/heavymetal", + capacity: 42, + admins: ["u/splinter"], + }; + + // act + const response = request.post(request_string, { + body: JSON.stringify(update_body), + headers: { "Content-Type": "application/json" }, + }); + + // assert + expect(response.status).to.equal(200); + const json = JSON.parse(response.body); + + expect(json).to.be.an("array").with.lengthOf(1); + + const repo = json[0]; + + // id remapped + expect(repo).to.have.property("id", "repo/heavymetal"); + + // updated fields + expect(repo).to.have.property("title", "New Title"); + expect(repo).to.have.property("domain", "new-domain"); + expect(repo).to.have.property("capacity", 42); + + // path normalized with trailing slash + expect(repo).to.have.property("path", "/mnt/nfs/heavymetal/"); + + // internal fields stripped + expect(repo).to.not.have.property("_id"); + expect(repo).to.not.have.property("_key"); + expect(repo).to.not.have.property("_rev"); + }); + + it("should delete a repo when user has admin perms and repo is not in use", () => { + // arrange + // create admin user + g_db.u.save(user_params_raw_admin); + + // create repo document + const repoDoc = { + _key: "heavymetal", + title: "Rock On!!!!", + capacity: 0, + type: "metadata", + }; + g_db.repo.save(repoDoc); + + // link admin to repo + g_db.admin.save({ + _from: "repo/heavymetal", + _to: "u/splinter", + }); + + // sanity check: repo exists before delete + expect(!!g_db._exists("repo/heavymetal")).to.equal(true); + const request_string = `${repo_base_url}/delete?client=${user_params_admin.id}&id=repo/heavymetal`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(204); + + // repo should no longer exist + expect(g_db._exists("repo/heavymetal")).to.equal(false); + }); + it("should calculate per-repo sizes for specified items", () => { + // arrange + g_db.u.save(user_params_raw_admin); + + g_db.repo.save({ + _key: "heavymetal", + title: "Rock On!!!!", + capacity: 0, + type: "metadata", + }); + + g_db.d.save({ + _key: "song1", + size: 1234, + repo: "repo/heavymetal", + }); + + g_db.alloc.save({ + _from: "d/song1", + _to: "repo/heavymetal", + }); + + const items = encodeURIComponent(JSON.stringify(["d/song1"])); + const request_string = + `${repo_base_url}/calc_size?client=${user_params_admin.id}` + + `&items=${items}&recurse=false`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(200); + + const json = JSON.parse(response.body); + expect(json).to.be.an("array"); + + // Only check repo info if something is returned + if (json.length > 0) { + expect(json[0]).to.have.property("repo"); + expect(json[0]).to.have.property("size"); + } + }); + it("should list all allocations for a repo", () => { + // arrange + const adminUser = { _key: "alloc_admin", is_admin: true }; + g_db.u.save(adminUser); + + const repoDoc = { + _key: "rock_repo", + title: "Rock Collection", + capacity: 0, + type: "metadata", + }; + g_db.repo.save(repoDoc); + + // create some data vertices + g_db.d.save({ _key: "songA", size: 1000 }); + g_db.d.save({ _key: "songB", size: 2000 }); + + // create allocation edges + g_db.alloc.save({ + _from: "d/songA", + _to: "repo/rock_repo", + data_limit: 5000, + data_size: 1000, + rec_limit: 10, + rec_count: 1, + path: "/mnt/rock/songA", + }); + + g_db.alloc.save({ + _from: "d/songB", + _to: "repo/rock_repo", + data_limit: 5000, + data_size: 2000, + rec_limit: 10, + rec_count: 1, + path: "/mnt/rock/songB", + }); + + // query parameters + const clientId = encodeURIComponent("alloc_admin"); + const repoId = encodeURIComponent("repo/rock_repo"); + + const request_string = `${repo_base_url}/alloc/list/by_repo?client=${clientId}&repo=${repoId}`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(200); + + const json = JSON.parse(response.body); + expect(json).to.be.an("array").with.lengthOf(2); + + // validate first allocation structure + expect(json[0]).to.have.all.keys( + "id", + "repo", + "data_limit", + "data_size", + "rec_limit", + "rec_count", + "path", + ); + expect(json[0].repo).to.equal("repo/rock_repo"); + + // validate second allocation + expect(json[1].repo).to.equal("repo/rock_repo"); + }); + it("should list allocations for a specific object", () => { + // arrange + const adminUser = { _key: "alloc_admin", is_admin: true }; + g_db.u.save(adminUser); + + // create repo + const repoDoc = { + _key: "rock_repo", + title: "Rock Collection", + capacity: 0, + type: "metadata", + }; + g_db.repo.save(repoDoc); + + // create data object + const dataObj = { _key: "songX", size: 500 }; + g_db.d.save(dataObj); + + // create owner edge: object -> owner + g_db.owner.save({ + _from: "d/songX", + _to: "u/alloc_admin", + }); + + // create allocations: owner -> repo + g_db.alloc.save({ + _from: "u/alloc_admin", + _to: "repo/rock_repo", + data_limit: 1000, + data_size: 500, + rec_limit: 5, + rec_count: 1, + path: "/mnt/rock/songX", + }); + + const clientId = encodeURIComponent("alloc_admin"); + const objectId = encodeURIComponent("d/songX"); + const request_string = `${repo_base_url}/alloc/list/by_object?client=${clientId}&object=${objectId}`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(200); + + const json = JSON.parse(response.body); + expect(json).to.be.an("array").with.lengthOf(1); + + const alloc = json[0]; + expect(alloc).to.have.all.keys( + "id", + "repo", + "data_limit", + "data_size", + "rec_limit", + "rec_count", + "path", + ); + + expect(alloc.id).to.equal("u/alloc_admin"); + expect(alloc.repo).to.equal("repo/rock_repo"); + expect(alloc.data_size).to.equal(500); + expect(alloc.rec_count).to.equal(1); + }); + it("should view allocation details for a repo", () => { + // arrange + const adminUser = { _key: "alloc_admin", is_admin: true }; + g_db.u.save(adminUser); + + // create repo + const repoDoc = { + _key: "rock_repo", + title: "Rock Collection", + capacity: 0, + type: "metadata", + }; + g_db.repo.save(repoDoc); + + // create allocation: user -> repo + g_db.alloc.save({ + _from: "u/alloc_admin", + _to: "repo/rock_repo", + data_limit: 1000, + data_size: 500, + rec_limit: 5, + rec_count: 1, + path: "/mnt/rock/songX", + }); + + const clientId = encodeURIComponent("alloc_admin"); + const repoId = encodeURIComponent("repo/rock_repo"); + const request_string = `${repo_base_url}/alloc/view?client=${clientId}&repo=${repoId}`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(200); + + const json = JSON.parse(response.body); + expect(json).to.be.an("array").with.lengthOf(1); + + const alloc = json[0]; + expect(alloc).to.have.all.keys( + "id", + "repo", + "data_limit", + "data_size", + "rec_limit", + "rec_count", + "path", + ); + + expect(alloc.id).to.equal("u/alloc_admin"); + expect(alloc.repo).to.equal("repo/rock_repo"); + expect(alloc.data_size).to.equal(500); + expect(alloc.rec_count).to.equal(1); + }); + + it("should view allocation details for a repo (self view)", () => { + // arrange + const user = { _key: "alloc_user", is_admin: true }; + g_db.u.save(user); + + // create repo + const repoDoc = { + _key: "rock_repo", + title: "Rock Collection", + capacity: 0, + type: "metadata", + }; + g_db.repo.save(repoDoc); + + // create allocation: user -> repo + g_db.alloc.save({ + _from: "u/alloc_user", + _to: "repo/rock_repo", + data_limit: 1000, + data_size: 500, + rec_limit: 5, + rec_count: 1, + path: "/mnt/rock/songX", + }); + + const clientId = encodeURIComponent("alloc_user"); + const repoId = encodeURIComponent("repo/rock_repo"); + const request_string = `${repo_base_url}/alloc/view?client=${clientId}&repo=${repoId}`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(200); + + const json = JSON.parse(response.body); + expect(json).to.be.an("array").with.lengthOf(1); + + const alloc = json[0]; + expect(alloc).to.have.all.keys( + "id", + "repo", + "data_limit", + "data_size", + "rec_limit", + "rec_count", + "path", + ); + + expect(alloc.id).to.equal("u/alloc_user"); + expect(alloc.repo).to.equal("repo/rock_repo"); + expect(alloc.data_size).to.equal(500); + expect(alloc.rec_count).to.equal(1); + }); + + it("should fetch allocation stats for a repo", () => { + // Arrange: create admin user + g_db.u.save({ _key: "stats_admin", name: "Stats Admin", role: "admin" }); + + // Create repo + g_db.repo.save({ + _key: "stats_repo", + title: "Stats Repo", + type: "metadata", + capacity: 0, + }); + + // Link the admin user to the repo via the admin edge + g_db.admin.save({ + _from: "repo/stats_repo", + _to: "u/stats_admin", + }); + + const clientId = encodeURIComponent("stats_admin"); + const repoId = encodeURIComponent("repo/stats_repo"); + const request_string = `${repo_base_url}/alloc/stats?client=${clientId}&repo=${repoId}`; + + // Act + const response = request.get(request_string); + + // Assert + expect(response.status).to.equal(200); + + const json = JSON.parse(response.body); + expect(json).to.be.an("object"); + expect(json).to.have.property("repo", "repo/stats_repo"); + expect(json).to.have.property("rec_count"); + expect(json).to.have.property("data_size"); + }); + it("should create an allocation for a user/project when repo admin", () => { + // Arrange: create admin user + g_db.u.save({ _key: "alloc_admin", is_admin: true }); + + // Create a repo + g_db.repo.save({ + _key: "music_repo", + title: "Music Repo", + type: "metadata", + capacity: 0, + }); + + // Link admin to repo + g_db.admin.save({ + _from: "repo/music_repo", + _to: "u/alloc_admin", + }); + + // Create subject user/project + g_db.u.save({ _key: "alloc_user", is_admin: false }); + + // Prepare query parameters + const clientId = encodeURIComponent("alloc_admin"); + const subjectId = encodeURIComponent("alloc_user"); + const repoId = encodeURIComponent("repo/music_repo"); + const dataLimit = 5000; + const recLimit = 10; + + const requestString = + `${repo_base_url}/alloc/create?client=${clientId}` + + `&subject=${subjectId}&repo=${repoId}&data_limit=${dataLimit}&rec_limit=${recLimit}`; + + // Act + const response = request.get(requestString); + + const json = JSON.parse(response.body); + expect(json).to.have.property("task"); + + const task = json.task; + expect(task).to.have.property("_key"); + expect(task).to.have.property("_id"); + expect(task).to.have.property("type", 6); + + // Optional: check state fields + expect(task.state).to.have.property("repo_id", "repo/music_repo"); + expect(task.state).to.have.property("subject", "u/alloc_user"); + expect(task.state).to.have.property("data_limit", dataLimit); + expect(task.state).to.have.property("rec_limit", recLimit); + }); + + it("should delete an allocation for a user/project when repo admin", async function () { + // First, create the allocation and wait for it to finish + const createRes = await request.get("/repo/alloc/create", { + client: "alloc_admin", + subject: "alloc_user", + repo: "repo/music_repo", + data_limit: 5000, + rec_limit: 10, + }); + + expect(createRes.status).to.equal(200); + expect(createRes.json).to.have.property("task"); + + // Now, delete the allocation + const deleteRes = await request.get("/repo/alloc/delete", { + client: "alloc_admin", + subject: "alloc_user", + repo: "repo/music_repo", + }); + + expect(deleteRes.status).to.equal(200); + expect(deleteRes.json).to.be.an("object"); + + const task = deleteRes.json.task; + + expect(task).to.have.property("_key"); + expect(task).to.have.property("_id"); + expect(task).to.have.property("type"); + expect(task).to.have.property("status"); + expect(task.type).to.equal(6); + + // Validate the task state + expect(task).to.have.property("state"); + expect(task.state).to.have.property("repo_id", "repo/music_repo"); + expect(task.state).to.have.property("subject", "u/alloc_user"); + }); + it("should update allocation limits when client is repo admin", () => { + // arrange + g_db.u.save({ _key: "alloc_admin", is_admin: true }); + g_db.u.save({ _key: "alloc_user", is_admin: false }); + + g_db.repo.save({ + _key: "rock_repo", + title: "Rock Repo", + capacity: 0, + type: "metadata", + }); + + // admin edge + g_db.admin.save({ + _from: "repo/rock_repo", + _to: "u/alloc_admin", + }); + + // existing allocation + g_db.alloc.save({ + _from: "u/alloc_user", + _to: "repo/rock_repo", + data_limit: 1000, + rec_limit: 5, + }); + + const request_string = + `${repo_base_url}/alloc/set?client=alloc_admin` + + `&subject=alloc_user&repo=repo/rock_repo` + + `&data_limit=5000&rec_limit=20`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(204); + + const alloc = g_db.alloc.firstExample({ + _from: "u/alloc_user", + _to: "repo/rock_repo", + }); + + expect(alloc.data_limit).to.equal(5000); + expect(alloc.rec_limit).to.equal(20); + }); + it("should set default allocation for self", () => { + // arrange + g_db.u.save({ _key: "alloc_user", is_admin: false }); + + g_db.repo.save({ _key: "repo1", title: "Repo 1", capacity: 0, type: "metadata" }); + g_db.repo.save({ _key: "repo2", title: "Repo 2", capacity: 0, type: "metadata" }); + + g_db.alloc.save({ _from: "u/alloc_user", _to: "repo/repo1", is_def: false }); + g_db.alloc.save({ _from: "u/alloc_user", _to: "repo/repo2", is_def: true }); + + const request_string = `${repo_base_url}/alloc/set/default?client=alloc_user&repo=repo/repo1`; + + // act + const response = request.get(request_string); + + // assert + expect(response.status).to.equal(204); + + const alloc1 = g_db.alloc.firstExample({ _from: "u/alloc_user", _to: "repo/repo1" }); + const alloc2 = g_db.alloc.firstExample({ _from: "u/alloc_user", _to: "repo/repo2" }); + + expect(alloc1.is_def).to.equal(true); + expect(alloc2.is_def).to.equal(false); + }); }); From 7926d0eeca3ae573cffb563fd35718a8b5761a23 Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Wed, 14 Jan 2026 13:43:02 -0500 Subject: [PATCH 19/65] [DAPS-1522] - refactor: foxx, schema router logging improvements (#1821) --- core/database/CMakeLists.txt | 2 + core/database/foxx/api/schema_router.js | 245 +++++++++++++++++- .../database/foxx/tests/schema_router.test.js | 137 ++++++++++ 3 files changed, 370 insertions(+), 14 deletions(-) create mode 100644 core/database/foxx/tests/schema_router.test.js diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index 7363691af..5a2b89a25 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -28,6 +28,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_version COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version:") add_test(NAME foxx_support COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_support:") add_test(NAME foxx_user_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_router:") + add_test(NAME foxx_schema_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_schema_router:") add_test(NAME foxx_acl_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_acl_router:") add_test(NAME foxx_admin_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_admin_router:") add_test(NAME foxx_config_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_config_router:") @@ -62,6 +63,7 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_validation_repo PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_path PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_user_router PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") + set_tests_properties(foxx_schema_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_acl_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_config_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_topic_router PROPERTIES FIXTURES_REQUIRED Foxx) diff --git a/core/database/foxx/api/schema_router.js b/core/database/foxx/api/schema_router.js index 51734b678..ee5356207 100644 --- a/core/database/foxx/api/schema_router.js +++ b/core/database/foxx/api/schema_router.js @@ -8,6 +8,8 @@ const error = require("./lib/error_codes"); const g_db = require("@arangodb").db; const g_lib = require("./support"); const g_graph = require("@arangodb/general-graph")._graph("sdmsg"); +const logger = require("./lib/logger"); +const basePath = "schema"; module.exports = router; @@ -78,7 +80,16 @@ function _resolveDeps(a_sch_id, a_refs) { router .post("/create", function (req, res) { + let sch = null; try { + logger.logRequestStarted({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Started", + description: `Create schema. ID: ${req.body.id}`, + }); g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn"], @@ -115,7 +126,7 @@ router g_lib.procInputParam(req.body, "_sch_id", false, obj); g_lib.procInputParam(req.body, "desc", false, obj); - var sch = g_db.sch.save(obj, { + sch = g_db.sch.save(obj, { returnNew: true, }).new; @@ -129,7 +140,36 @@ router res.send([sch]); }, }); + logger.logRequestSuccess({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Success", + description: `Create schema. ID: ${req.body.id}`, + extra: { + sch_id: sch?.id, + own_id: sch?.own_id, + pub: req.body.pub, + sys: req.body.sys, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Failure", + description: `Create schema. ID: ${req.body.id}`, + extra: { + sch_id: sch?.id, + own_id: sch?.own_id, + pub: req.body.pub, + sys: req.body.sys, + }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -151,7 +191,17 @@ router router .post("/update", function (req, res) { + let sch_new = null; try { + logger.logRequestStarted({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Started", + description: `Update schema. Schema ID: ${req.queryParams.id}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn"], @@ -243,7 +293,7 @@ router obj.def = req.body.def; } - var sch_new = g_db.sch.update(sch_old._id, obj, { + sch_new = g_db.sch.update(sch_old._id, obj, { returnNew: true, mergeObjects: false, keepNull: false, @@ -259,7 +309,38 @@ router res.send([sch_new]); }, }); + logger.logRequestSuccess({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Success", + description: `Update schema. Schema ID: ${req.queryParams.id}`, + extra: { + id: sch_new.id, + own_id: sch_new.own_id, + pub: sch_new.pub, + sys: req.body?.sys ?? false, + ver: sch_new.ver, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Failure", + description: `Update schema. Schema ID: ${req.queryParams.id}`, + extra: { + id: sch_new.id, + own_id: sch_new.own_id, + pub: sch_new.pub, + sys: req.body?.sys ?? false, + ver: sch_new.ver, + }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -282,7 +363,17 @@ router router .post("/revise", function (req, res) { + let sch_new = null; try { + logger.logRequestStarted({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/revise", + status: "Started", + description: `Revise schema. Schema ID: ${req.queryParams.id}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn"], @@ -364,7 +455,7 @@ router delete sch._key; delete sch._rev; - var sch_new = g_db.sch.save(sch, { + sch_new = g_db.sch.save(sch, { returnNew: true, }).new; @@ -384,7 +475,39 @@ router res.send([sch_new]); }, }); + logger.logRequestSuccess({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/revise", + status: "Success", + description: `Revise schema. Schema ID: ${req.queryParams.id}`, + extra: { + own_id: sch_new.own_id, + own_nm: sch_new.own_nm, + id: sch_new.id, + pub: req.body.pub, + sys: req.body.sys, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/revise", + status: "Failure", + description: `Revise schema. Schema ID: ${req.queryParams.id}`, + extra: { + own_id: sch_new.own_id, + own_nm: sch_new.own_nm, + id: sch_new.id, + pub: req.body.pub, + sys: req.body.sys, + }, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -406,18 +529,29 @@ router router .post("/delete", function (req, res) { + let sch_old = null; try { + logger.logRequestStarted({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Started", + description: `Delete schema. Schema ID: ${req.queryParams.id}`, + }); + const client = g_lib.getUserFromClientID(req.queryParams.client); var idx = req.queryParams.id.indexOf(":"); if (idx < 0) { throw [error.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; } var sch_id = req.queryParams.id.substr(0, idx), - sch_ver = parseInt(req.queryParams.id.substr(idx + 1)), - sch_old = g_db.sch.firstExample({ - id: sch_id, - ver: sch_ver, - }); + sch_ver = parseInt(req.queryParams.id.substr(idx + 1)); + + sch_old = g_db.sch.firstExample({ + id: sch_id, + ver: sch_ver, + }); if (!sch_old) throw [error.ERR_NOT_FOUND, "Schema '" + req.queryParams.id + "' not found."]; @@ -457,7 +591,25 @@ router } g_graph.sch.remove(sch_old._id); + logger.logRequestSuccess({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Success", + description: `Delete schema. Schema ID: ${req.queryParams.id}`, + extra: { deleted: sch_old._id }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Failure", + description: `Delete schema. Schema ID: ${req.queryParams.id}`, + extra: { deleted: sch_old._id }, + }); g_lib.handleException(e, res); } }) @@ -468,18 +620,27 @@ router router .get("/view", function (req, res) { + let sch = null; try { + logger.logRequestStarted({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: `View schema. Schema ID: ${req.queryParams.id}`, + }); const client = g_lib.getUserFromClientID(req.queryParams.client); var idx = req.queryParams.id.indexOf(":"); if (idx < 0) { throw [error.ERR_INVALID_PARAM, "Schema ID missing version number suffix."]; } var sch_id = req.queryParams.id.substr(0, idx), - sch_ver = parseInt(req.queryParams.id.substr(idx + 1)), - sch = g_db.sch.firstExample({ - id: sch_id, - ver: sch_ver, - }); + sch_ver = parseInt(req.queryParams.id.substr(idx + 1)); + sch = g_db.sch.firstExample({ + id: sch_id, + ver: sch_ver, + }); if (!sch) throw [error.ERR_NOT_FOUND, "Schema '" + req.queryParams.id + "' not found."]; @@ -515,7 +676,34 @@ router fixSchOwnNm(sch); res.send([sch]); + logger.logRequestSuccess({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Success", + description: `View schema. Schema ID: ${req.queryParams.id}`, + extra: { + own_id: sch.own_id, + own_nm: sch.own_nm, + id: sch.id, + pub: sch.pub, + sys: sch.sys, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: `View schema. Schema ID: ${req.queryParams.id}`, + extra: { + pub: sch.pub, + sys: sch.sys, + }, + }); g_lib.handleException(e, res); } }) @@ -527,11 +715,20 @@ router router .get("/search", function (req, res) { + let result = null; try { + logger.logRequestStarted({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/search", + status: "Started", + description: `Search schema.`, + }); + const client = g_lib.getUserFromClientID(req.queryParams.client); var qry, par = {}, - result, off = 0, cnt = 50, doc; @@ -628,8 +825,28 @@ router }, }); + const first = result.find((r) => r.own_id); res.send(result); + logger.logRequestSuccess({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/search", + status: "Success", + description: `Search schema.`, + extra: first ? { own_id: first.own_id, own_nm: first.own_nm } : {}, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams?.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/search", + status: "Failure", + description: `Search schema.`, + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/schema_router.test.js b/core/database/foxx/tests/schema_router.test.js new file mode 100644 index 000000000..94f36d7e3 --- /dev/null +++ b/core/database/foxx/tests/schema_router.test.js @@ -0,0 +1,137 @@ +"use strict"; + +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const { db } = require("@arangodb"); + +const schema_base_url = `${baseUrl}/schema`; + +describe("schema router", () => { + before(() => { + const collections = ["u", "sch", "sch_dep"]; + collections.forEach((name) => { + const col = db._collection(name); + if (col) col.truncate(); + else db._create(name); + }); + + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name: "Fake User", + is_admin: true, + }); + }); + + it("unit_schema_router: should create a schema", () => { + const body = { + id: "test_schema_1", + desc: "A simple test schema", + def: { properties: { field1: { type: "string" } } }, + pub: true, + sys: false, + }; + + const response = request.post(`${schema_base_url}/create?client=u/fakeUser`, { + body: JSON.stringify(body), + headers: { "Content-Type": "application/json" }, + }); + + expect(response.status).to.equal(200); + + const result = JSON.parse(response.body); + expect(result).to.be.an("array"); + expect(result[0].def).to.deep.equal(body.def); + expect(result[0].own_nm).to.equal("Fake"); + }); + + it("unit_schema_router: should update a schema", () => { + const response = request.post( + `${schema_base_url}/update?client=u/fakeUser&id=test_schema_1:0`, + { + body: JSON.stringify({ + desc: "Updated schema description", + def: { properties: { field1: { type: "string" } } }, + pub: true, + }), + headers: { "Content-Type": "application/json" }, + }, + ); + + expect(response.status).to.equal(200); + + const schema = JSON.parse(response.body)[0]; + expect(schema).to.have.property("id", "test_schema_1"); + expect(schema).to.have.property("desc", "Updated schema description"); + expect(schema).to.have.property("own_id", "u/fakeUser"); + }); + + it("unit_schema_router: should revise a schema", () => { + const response = request.post( + `${schema_base_url}/revise?client=u/fakeUser&id=test_schema_1:0`, + { + body: JSON.stringify({ + desc: "Revised schema description", + def: { + properties: { + field1: { type: "string" }, + field2: { type: "number" }, + }, + }, + pub: true, + }), + headers: { "Content-Type": "application/json" }, + }, + ); + + expect(response.status).to.equal(200); + + const schema = JSON.parse(response.body)[0]; + expect(schema).to.have.property("ver", 1); + expect(schema).to.have.property("id", "test_schema_1"); + }); + + it("unit_schema_router: should search schemas", () => { + const response = request.get(`${schema_base_url}/search?client=u/fakeUser`); + + expect(response.status).to.equal(200); + + const result = JSON.parse(response.body); + expect(result).to.be.an("array"); + + const paging = result[result.length - 1]; + expect(paging).to.have.property("paging"); + + const schemas = result.filter((r) => !r.paging); + if (schemas.length) { + expect(schemas[0]).to.have.property("ver"); + expect(schemas[0]).to.have.property("own_id"); + expect(schemas[0]).to.have.property("own_nm"); + } + }); + + it("unit_schema_router: should delete latest schema revision", () => { + const response = request.post( + `${schema_base_url}/delete?client=u/fakeUser&id=test_schema_1:1`, + ); + + expect(response.status).to.equal(204); + + const deleted = db.sch.firstExample({ id: "test_schema_1", ver: 1 }); + expect(deleted).to.equal(null); + }); + + it("unit_schema_router: should view a schema", () => { + const response = request.get( + `${schema_base_url}/view?client=u/fakeUser&id=test_schema_1:0`, + ); + + expect(response.status).to.equal(200); + + const schema = JSON.parse(response.body)[0]; + expect(schema).to.have.property("id", "test_schema_1"); + expect(schema).to.have.property("ver", 0); + expect(schema).to.have.property("own_id", "u/fakeUser"); + }); +}); From bbee1c28942d6a6272f8f96308a3fcc5765fc5b4 Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Wed, 14 Jan 2026 14:18:44 -0500 Subject: [PATCH 20/65] [DAPS-1522] - refactor: foxx, proj router logging improvements (#1822) --- core/database/CMakeLists.txt | 2 + core/database/foxx/api/proj_router.js | 271 ++++++++- core/database/foxx/tests/proj_router.test.js | 601 +++++++++++++++++++ 3 files changed, 868 insertions(+), 6 deletions(-) create mode 100644 core/database/foxx/tests/proj_router.test.js diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index 5a2b89a25..8ae670db7 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -28,6 +28,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_version COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version:") add_test(NAME foxx_support COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_support:") add_test(NAME foxx_user_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_router:") + add_test(NAME foxx_proj_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_proj_router:") add_test(NAME foxx_schema_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_schema_router:") add_test(NAME foxx_acl_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_acl_router:") add_test(NAME foxx_admin_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_admin_router:") @@ -63,6 +64,7 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_validation_repo PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_path PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_user_router PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") + set_tests_properties(foxx_proj_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_schema_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_acl_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_config_router PROPERTIES FIXTURES_REQUIRED Foxx) diff --git a/core/database/foxx/api/proj_router.js b/core/database/foxx/api/proj_router.js index a237b561f..47f3d0d53 100644 --- a/core/database/foxx/api/proj_router.js +++ b/core/database/foxx/api/proj_router.js @@ -9,6 +9,8 @@ const g_lib = require("./support"); const error = require("./lib/error_codes"); const permissions = require("./lib/permissions"); const g_tasks = require("./tasks"); +const logger = require("./lib/logger"); +const basePath = "prj"; module.exports = router; @@ -16,8 +18,16 @@ module.exports = router; router .get("/create", function (req, res) { + let result = null; try { - var result; + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Started", + description: `Create new projects. Project ID: ${req.queryParams.id}`, + }); g_db._executeTransaction({ collections: { @@ -199,7 +209,44 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Success", + description: `Create new projects. Project ID: ${req.queryParams.id}`, + }); + if (req.queryParams.admins?.length) { + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Success", + description: `Admins added: ${req.queryParams.admins}`, + }); + } + if (req.queryParams.members?.length) { + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Success", + description: `Members added: ${req.queryParams.members}`, + }); + } } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/create", + status: "Failure", + description: `Create new projects. Project ID: ${req.queryParams.id}`, + error: e, + }); g_lib.handleException(e, res); } }) @@ -218,8 +265,17 @@ router router .get("/update", function (req, res) { + let proj = null; + let result = null; try { - var result; + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Started", + description: `Update project information. Project ID: ${req.queryParams.id}`, + }); g_db._executeTransaction({ collections: { @@ -264,7 +320,7 @@ router } } - var proj = g_db._update(proj_id, obj, { + proj = g_db._update(proj_id, obj, { keepNull: false, returnNew: true, }); @@ -367,7 +423,61 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Success", + description: `Update project information. Project ID: ${req.queryParams.id}`, + extra: { + owner: proj.new?.owner, + title: proj.new?.title + ? proj.new?.title.length > 15 + ? proj.new?.title.slice(0, 15) + "…" + : proj.new?.title + : undefined, + }, + }); + if (req.queryParams.admins?.length) { + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Success", + description: `Admins added: ${req.queryParams.admins}`, + }); + } + if (req.queryParams.members?.length) { + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Success", + description: `Members added: ${req.queryParams.members}`, + }); + } } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Failure", + description: `Update project information. Project ID: ${req.queryParams.id}`, + extra: { + owner: proj.new?.owner, + title: proj.new?.title + ? proj.new?.title.length > 15 + ? proj.new?.title.slice(0, 15) + "…" + : proj.new?.title + : undefined, + }, + + error: e, + }); g_lib.handleException(e, res); } }) @@ -386,7 +496,17 @@ router router .get("/view", function (req, res) { + let proj = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: `View project information. ID: ${req.queryParams.id}`, + }); + // TODO Enforce view permission const client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); @@ -394,7 +514,7 @@ router if (!g_db.p.exists(req.queryParams.id)) throw [error.ERR_INVALID_PARAM, "No such project '" + req.queryParams.id + "'"]; - var proj = g_db.p.document({ + proj = g_db.p.document({ _id: req.queryParams.id, }); @@ -451,7 +571,26 @@ router delete proj._rev; res.send([proj]); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Success", + description: `View project information. ID: ${req.queryParams.id}`, + result: proj, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: `View project information. ID: ${req.queryParams.id}`, + extra: proj, + error: e, + }); g_lib.handleException(e, res); } }) @@ -462,6 +601,15 @@ router router .get("/list", function (req, res) { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Started", + description: `List projects`, + }); + const client = g_lib.getUserFromClientID(req.queryParams.client); var qry, result, @@ -570,6 +718,15 @@ router //res.send( g_db._query( qry, { user: client._id })); res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list", + status: "Success", + description: `List projects`, + extra: { NumOfProjs: tot }, + }); }) .queryParam("client", joi.string().required(), "Client ID") .queryParam("subject", joi.string().optional(), "Subject (user) ID") @@ -591,11 +748,54 @@ router router .get("/search", function (req, res) { + let result = null; + let extra_log = null; + const rawQuery = typeof req.queryParams.query === "string" ? req.queryParams.query : ""; + const safeQuerySnippet = + rawQuery.length > 200 ? rawQuery.slice(0, 200) + "…[truncated]" : rawQuery; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/search", + status: "Started", + description: `Find all projects that match query: ${safeQuerySnippet}`, + }); + g_lib.getUserFromClientID(req.queryParams.client); - res.send(g_db._query(req.queryParams.query, {})); + result = g_db._query(req.queryParams.query, {}); + res.send(result); + extra_log = { + documents: result._documents ? result._documents.slice(0, 10) : [], // first 10 IDs only + countTotal: result?._countTotal, + countQuery: result?._countQuery, + skip: result?._skip, + limit: result?._limit, + cached: result?._cached, + }; + + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/search", + status: "Success", + description: `Find all projects that match query: ${safeQuerySnippet}`, + extra: extra_log, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/search", + status: "Failure", + description: `Find all projects that match query: ${safeQuerySnippet}`, + extra: extra_log, + error: e, + }); g_lib.handleException(e, res); } }) @@ -606,7 +806,17 @@ router router .post("/delete", function (req, res) { + let result = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Started", + description: `Delete project(s) and all associated data records and raw data. IDs: ${req.body.ids}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "p", "alloc"], @@ -621,7 +831,26 @@ router res.send(result); }, }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Success", + description: `Delete project(s) and all associated data records and raw data. IDs: ${req.body.ids}`, + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Failure", + description: `Delete project(s) and all associated data records and raw data. IDs: ${req.body.ids}`, + extra: result, + error: e, + }); g_lib.handleException(e, res); } }) @@ -639,7 +868,17 @@ router router .get("/get_role", function (req, res) { + let role = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/get_role", + status: "Started", + description: `Get client/subject project role. ID: ${req.queryParams.id}`, + }); + const client = g_lib.getUserFromClientID(req.queryParams.client); var subj; @@ -653,12 +892,32 @@ router if (!g_db._exists(req.queryParams.id)) throw [error.ERR_NOT_FOUND, "Project, " + req.queryParams.id + ", not found"]; - var role = g_lib.getProjectRole(subj, req.queryParams.id); + role = g_lib.getProjectRole(subj, req.queryParams.id); res.send({ role: role, }); + + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/get_role", + status: "Success", + description: `Get client/subject project role. ID: ${req.queryParams.id}`, + extra: { role: role }, + }); } catch (e) { + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/get_role", + status: "Success", + description: `Get client/subject project role. ID: ${req.queryParams.id}`, + extra: { role: role }, + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/proj_router.test.js b/core/database/foxx/tests/proj_router.test.js new file mode 100644 index 000000000..3e854f84c --- /dev/null +++ b/core/database/foxx/tests/proj_router.test.js @@ -0,0 +1,601 @@ +"use strict"; + +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { db } = require("@arangodb"); +const { baseUrl } = module.context; + +const proj_base_url = `${baseUrl}/prj`; + +describe("unit_proj_router: test project create endpoint", () => { + beforeEach(() => { + const collections = [ + "u", + "p", + "repo", + "admin", + "owner", + "c", + "a", + "g", + "acl", + "member", + "ident", + "alias", + "uuid", + "accn", + ]; + + collections.forEach((name) => { + const col = db._collection(name); + if (col) col.truncate(); + else db._create(name); + }); + }); + + after(() => { + const collections = [ + "u", + "p", + "repo", + "admin", + "owner", + "c", + "a", + "g", + "acl", + "member", + "ident", + "alias", + "uuid", + "accn", + ]; + + collections.forEach((name) => { + const col = db._collection(name); + if (col) col.truncate(); + }); + }); + + it("should create a new project when client is a repo admin", () => { + // ------------------------------------------------------------------ + // arrange + // ------------------------------------------------------------------ + + // create user + db.u.save({ + _key: "proj_admin", + _id: "u/proj_admin", + is_admin: false, + max_proj: -1, + }); + + // create repo + db.repo.save({ + _key: "testrepo", + title: "Test Repo", + capacity: 0, + type: "metadata", + }); + + // link user as repo admin (required by /prj/create) + db.admin.save({ + _from: "repo/testrepo", + _to: "u/proj_admin", + }); + + const url = + `${proj_base_url}/create` + + `?client=u/proj_admin` + + `&id=myproject` + + `&title=My+Project` + + `&desc=Test+project+description`; + + // ------------------------------------------------------------------ + // act + // ------------------------------------------------------------------ + + const response = request.get(url, { + headers: { "x-correlation-id": "test-proj-create" }, + }); + + // ------------------------------------------------------------------ + // assert + // ------------------------------------------------------------------ + + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + expect(body).to.be.an("array").with.lengthOf(1); + + const project = body[0]; + + // project fields + expect(project).to.have.property("id"); + expect(project).to.have.property("title", "My Project"); + expect(project).to.have.property("desc", "Test project description"); + expect(project).to.have.property("owner", "u/proj_admin"); + + // admins / members arrays initialized + expect(project).to.have.property("admins").that.is.an("array"); + expect(project).to.have.property("members").that.is.an("array"); + + const exists = db._exists(project.id); + expect(exists).to.not.equal(null); + + // owner edge created + const ownerEdge = db.owner.firstExample({ + _from: project.id, + _to: "u/proj_admin", + }); + expect(ownerEdge).to.exist; + + // root collection created + const rootCollection = db.c.firstExample({ + owner: project.id, + is_root: true, + }); + expect(rootCollection).to.exist; + + // members group created + const membersGroup = db.g.firstExample({ + uid: project.id, + gid: "members", + }); + expect(membersGroup).to.exist; + }); + it("should update project metadata and membership when client is project admin", () => { + // ------------------------------------------------------------------ + // arrange + // ------------------------------------------------------------------ + + // users + db.u.save({ _key: "proj_admin", is_admin: false }); + db.u.save({ _key: "new_admin", is_admin: false }); + db.u.save({ _key: "member1", is_admin: false }); + + // project + db.p.save({ + _key: "myproject", + title: "Old Title", + desc: "Old description", + ct: 1, + ut: 1, + owner: "u/proj_admin", + }); + + // owner edge + db.owner.save({ + _from: "p/myproject", + _to: "u/proj_admin", + }); + + // members group (MUST match create logic exactly) + const memGrp = db.g.save({ + uid: "p/myproject", + gid: "members", + title: "Project Members", + desc: "Use to set baseline project member permissions.", + }); + + // ownership edge: group -> project + db.owner.save({ + _from: memGrp._id, + _to: "p/myproject", + }); + + // existing admin edge + db.admin.save({ + _from: "p/myproject", + _to: "u/proj_admin", + }); + + const url = + `${proj_base_url}/update` + + `?client=u/proj_admin` + + `&id=p/myproject` + + `&title=New+Title` + + `&desc=New+description`; + + // ------------------------------------------------------------------ + // act + // ------------------------------------------------------------------ + + const response = request.get(url, { + headers: { "x-correlation-id": "test-proj-update" }, + }); + + // ------------------------------------------------------------------ + // assert + // ------------------------------------------------------------------ + + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + expect(body).to.be.an("array").with.lengthOf(1); + + const proj = body[0]; + // core fields updated + expect(proj).to.have.property("id", "p/myproject"); + expect(proj).to.have.property("title", "New Title"); + expect(proj).to.have.property("desc", "New description"); + + // admins unchanged + expect(proj.admins).to.have.members(["u/proj_admin"]); + + // members unchanged + expect(proj.members).to.be.an("array").that.is.empty; + + // project document updated in DB + const storedProj = db.p.document("p/myproject"); + expect(storedProj.title).to.equal("New Title"); + expect(storedProj.desc).to.equal("New description"); + }); + + it("should return project info including admins and members", () => { + // --- arrange --- + // create users + db.u.save({ _key: "proj_admin" }); + db.u.save({ _key: "member1" }); + + // create project + db.p.save({ + _key: "myproject", + title: "Test Project", + desc: "Test description", + owner: "u/proj_admin", + }); + + // admin edge + db.admin.save({ _from: "p/myproject", _to: "u/proj_admin" }); + + // members group + const memGrp = db.g.save({ + uid: "p/myproject", + gid: "members", + title: "Project Members", + desc: "Use to set baseline project member permissions.", + }); + + // ownership edge: group -> project + db.owner.save({ + _from: memGrp._id, + _to: "p/myproject", + }); + + // member edge: group -> user + db.member.save({ + _from: memGrp._id, + _to: "u/member1", + }); + + // --- act --- + const url = `${proj_base_url}/view?client=u/proj_admin&id=p/myproject`; + const response = request.get(url, { headers: { "x-correlation-id": "test-proj-view" } }); + + // --- assert --- + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + expect(body).to.be.an("array").with.lengthOf(1); + + const proj = body[0]; + expect(proj).to.have.property("id", "p/myproject"); + expect(proj).to.have.property("title", "Test Project"); + expect(proj).to.have.property("desc", "Test description"); + + // admins + expect(proj.admins).to.be.an("array").that.includes("u/proj_admin"); + + // members + expect(proj.members).to.be.an("array").that.includes("u/member1"); + + // allocs array should exist even if empty + expect(proj).to.have.property("allocs").that.is.an("array"); + }); + + it("should return a list of projects for a client including ownership, admin, and member roles", () => { + // ------------------------------------------------------------------ + // Arrange: setup users and projects + // ------------------------------------------------------------------ + db.u.save({ _key: "proj_owner", is_admin: false }); + db.u.save({ _key: "proj_admin", is_admin: false }); + db.u.save({ _key: "proj_member", is_admin: false }); + + // Project 1: owned by proj_owner + db.p.save({ + _key: "proj1", + title: "Project One", + desc: "First project", + ct: 1, + ut: 1, + owner: "u/proj_owner", + }); + db.owner.save({ _from: "p/proj1", _to: "u/proj_owner" }); + + // Project 2: admin by proj_owner + db.p.save({ + _key: "proj2", + title: "Project Two", + desc: "Second project", + ct: 1, + ut: 1, + owner: "u/proj_admin", + }); + db.admin.save({ _from: "p/proj2", _to: "u/proj_owner" }); + + // Project 3: member role for proj_owner + db.p.save({ + _key: "proj3", + title: "Project Three", + desc: "Third project", + ct: 1, + ut: 1, + owner: "u/proj_admin", + }); + + const membersGroup = db.g.save({ + uid: "p/proj3", + gid: "members", + title: "Project Three Members", + desc: "Member group for proj3", + }); + + db.owner.save({ _from: membersGroup._id, _to: "p/proj3" }); + db.member.save({ _from: membersGroup._id, _to: "u/proj_owner" }); + + // ------------------------------------------------------------------ + // Act: call the /list route + // ------------------------------------------------------------------ + const url = + `${proj_base_url}/list` + + `?client=u/proj_owner` + + `&as_owner=true&as_admin=true&as_member=true`; + + const response = request.get(url, { + headers: { "x-correlation-id": "test-proj-list" }, + }); + + // ------------------------------------------------------------------ + // Assert + // ------------------------------------------------------------------ + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + const ids = body.map((p) => p.id); + + expect(ids).to.include.members(["p/proj1", "p/proj2", "p/proj3"]); + }); + + it("should search projects using a provided AQL query", () => { + // ------------------------------------------------------------------ + // Arrange + // ------------------------------------------------------------------ + db.u.save({ _key: "search_user", is_admin: false }); + + db.p.save({ + _key: "search_proj1", + title: "Alpha Project", + desc: "First searchable project", + ct: 1, + ut: 1, + owner: "u/search_user", + }); + + db.p.save({ + _key: "search_proj2", + title: "Beta Project", + desc: "Second searchable project", + ct: 1, + ut: 1, + owner: "u/search_user", + }); + + // AQL query passed directly to /search + const aql = "FOR p IN p FILTER p.title LIKE '%Project%' RETURN p._id"; + + const url = + `${proj_base_url}/search` + + `?client=u/search_user` + + `&query=${encodeURIComponent(aql)}`; + + // ------------------------------------------------------------------ + // Act + // ------------------------------------------------------------------ + const response = request.get(url, { + headers: { "x-correlation-id": "test-proj-search" }, + }); + + // ------------------------------------------------------------------ + // Assert + // ------------------------------------------------------------------ + expect(response.status).to.equal(200); + + const body = JSON.parse(response.body); + + expect(body).to.be.an("array"); + expect(body).to.include.members(["p/search_proj1", "p/search_proj2"]); + }); + + it("should enqueue a project delete task when client is authorized", () => { + // ------------------------------------------------------------------ + // Arrange + // ------------------------------------------------------------------ + db.u.save({ _key: "delete_admin", is_admin: true }); + + db.p.save({ + _key: "delete_proj1", + title: "Delete Me", + desc: "Project to be deleted", + ct: 1, + ut: 1, + owner: "u/delete_admin", + }); + + db.owner.save({ + _from: "p/delete_proj1", + _to: "u/delete_admin", + }); + + db.admin.save({ + _from: "p/delete_proj1", + _to: "u/delete_admin", + }); + + const url = `${proj_base_url}/delete?client=u/delete_admin`; + + // ------------------------------------------------------------------ + // Act + // ------------------------------------------------------------------ + const response = request.post(url, { + headers: { + "content-type": "application/json", + "x-correlation-id": "test-proj-delete", + }, + body: JSON.stringify({ + ids: ["p/delete_proj1"], + }), + }); + + // ------------------------------------------------------------------ + // Assert + // ------------------------------------------------------------------ + expect(response.status).to.equal(200); + + // Response body is allowed to be null + const body = JSON.parse(response.body); + expect(body).to.exist; + expect(body).to.be.an("object"); + expect(body).to.have.property("task"); + + // Delete is async — project still exists immediately + expect(db.p.exists("p/delete_proj1")).to.exist; + }); + + it("should return the correct project role for client or subject", () => { + // ------------------------------------------------------------------ + // arrange + // ------------------------------------------------------------------ + + db.u.save({ _key: "proj_owner", is_admin: false }); + db.u.save({ _key: "proj_admin", is_admin: false }); + db.u.save({ _key: "proj_member", is_admin: false }); + + db.p.save({ + _key: "role_proj", + title: "Role Test Project", + owner: "u/proj_owner", + ct: 1, + ut: 1, + }); + + // owner edge + db.owner.save({ + _from: "p/role_proj", + _to: "u/proj_owner", + }); + + // admin edge + db.admin.save({ + _from: "p/role_proj", + _to: "u/proj_admin", + }); + + // members group + const memGrp = db.g.save({ + uid: "p/role_proj", + gid: "members", + title: "Project Members", + desc: "Members group", + }); + + db.owner.save({ + _from: memGrp._id, + _to: "p/role_proj", + }); + + db.member.save({ + _from: memGrp._id, + _to: "u/proj_member", + }); + + // ------------------------------------------------------------------ + // OWNER + // ------------------------------------------------------------------ + + let response = request.get( + `${proj_base_url}/get_role` + `?client=u/proj_owner` + `&id=p/role_proj`, + { headers: { "x-correlation-id": "test-proj-get-role-owner" } }, + ); + + expect(response.status).to.equal(200); + let body = JSON.parse(response.body); + expect(body.role).to.equal(3); // owner + + // ------------------------------------------------------------------ + // ADMIN (subject) + // ------------------------------------------------------------------ + + response = request.get( + `${proj_base_url}/get_role` + + `?client=u/proj_owner` + + `&subject=u/proj_admin` + + `&id=p/role_proj`, + { headers: { "x-correlation-id": "test-proj-get-role-admin" } }, + ); + + expect(response.status).to.equal(200); + body = JSON.parse(response.body); + expect(body.role).to.equal(2); // admin + + // ------------------------------------------------------------------ + // MEMBER (subject) + // ------------------------------------------------------------------ + + response = request.get( + `${proj_base_url}/get_role` + + `?client=u/proj_owner` + + `&subject=u/proj_member` + + `&id=p/role_proj`, + { headers: { "x-correlation-id": "test-proj-get-role-member" } }, + ); + + expect(response.status).to.equal(200); + body = JSON.parse(response.body); + expect(body.role).to.equal(1); // member + }); + + it("should handle malformed AQL for /prj/search without crashing and return an error response", () => { + // ------------------------------------------------------------------ + // Arrange + // ------------------------------------------------------------------ + db.u.save({ _key: "search_user_malformed", is_admin: false }); + + const client = "search_user_malformed"; + + // Intentionally malformed AQL (missing RETURN and invalid syntax) + const malformedBody = { + client, + aql: "FOR p IN p FILTER p.title == @title INVALID_SYNTAX", + bindVars: { + title: "Alpha Project", + }, + }; + + // ------------------------------------------------------------------ + // Act + // ------------------------------------------------------------------ + const response = request.get("/prj/search", malformedBody); + + // ------------------------------------------------------------------ + // Assert + // ------------------------------------------------------------------ + // Expect a 400-series error (bad request / invalid query) and a JSON error payload + expect(response.status).to.be.within(400, 499); + + const body = JSON.parse(response.body); + expect(body).to.have.property("error", true); + expect(body).to.have.property("code"); + expect(body).to.have.property("errorMessage"); + }); +}); From f1288916e927d94eb08c46d9021b627509dd2d21 Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Wed, 14 Jan 2026 14:19:41 -0500 Subject: [PATCH 21/65] [DAPS-1522] - refactor: foxx, user router logging improvements (#1809) --- core/database/foxx/api/user_router.js | 395 +++++++++++++------------- 1 file changed, 197 insertions(+), 198 deletions(-) diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 95cd4d368..2a33a984d 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -30,7 +30,7 @@ router } logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/authn/password", @@ -43,23 +43,23 @@ router authorized: true, }); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/authn/password", status: "Success", + extra: `Resolved Client: ${client}`, description: "Authenticating user via password", - extra: "undefined", }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/authn/password", status: "Failure", description: "Authenticating user via password", - extra: "undefined", + extra: `Resolved Client: ${client}`, error: e, }); g_lib.handleException(e, res); @@ -100,7 +100,7 @@ router routePath: basePath + "/authn/token", status: "Success", description: "Authenticating user via access token", - extra: "undefined", + extra: "N/A", }); } catch (e) { logger.logRequestFailure({ @@ -110,7 +110,7 @@ router routePath: basePath + "/authn/token", status: "Failure", description: "Authenticating user via access token", - extra: "undefined", + extra: "N/A", error: e, }); @@ -314,25 +314,26 @@ router router .get("/update", function (req, res) { - let client = null; let result = null; + let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/update", + status: "Started", + description: `Update user information. Subject: ${sub}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn"], write: ["u", "admin"], }, action: function () { - client = g_lib.getUserFromClientID(req.queryParams.client); + const client = g_lib.getUserFromClientID(req.queryParams.client); var user_id; - logger.logRequestStarted({ - client: client?._id, - correlationId: req.headers["x-correlation-id"], - httpVerb: "GET", - routePath: basePath + "/update", - status: "Started", - description: "Update user information", - }); if (req.queryParams.subject) { user_id = req.queryParams.subject; @@ -394,22 +395,22 @@ router }, }); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/update", status: "Success", - description: "Update user information", + description: `Update user information. Subject: ${sub}`, extra: result, }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/update", status: "Failure", - description: "Update user information", + description: `Update user information. Subject: ${sub}`, extra: result, error: e, }); @@ -495,12 +496,12 @@ router try { name = req.queryParams.name_uid.trim(); logger.logRequestStarted({ - client: name, + client: "N/A", correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/find/by_name_uid", status: "Started", - description: "Find users matching partial name and/or uid", + description: `Find users matching partial name and/or uid: ${name}`, }); if (name.length < 2) @@ -540,7 +541,7 @@ router httpVerb: "GET", routePath: basePath + "/find/by_name_uid", status: "Success", - description: "Find users matching partial name and/or uid", + description: `Find users matching partial name and/or uid: ${name}`, extra: result, }); } catch (e) { @@ -550,7 +551,7 @@ router httpVerb: "GET", routePath: basePath + "/find/by_name_uid", status: "Failure", - description: "Find users matching partial name and/or uid", + description: `Find users matching partial name and/or uid: ${name}`, extra: result, error: e, }); @@ -565,7 +566,7 @@ router router .get("/keys/set", function (req, res) { - let client = null; + let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { g_db._executeTransaction({ collections: { @@ -573,15 +574,15 @@ router write: ["u"], }, action: function () { - client = g_lib.getUserFromClientID(req.queryParams.client); + const client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/keys/set", status: "Started", - description: "Set user public and private keys", + description: `Set user public and private keys. Subject: ${sub}`, }); var user_id; @@ -606,23 +607,23 @@ router }); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/keys/set", status: "Success", - description: "Set user public and private keys", - extra: "undefined", + description: `Set user public and private keys. Subject: ${sub}`, + extra: "N/A", }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/keys/set", status: "Failure", - description: "Set user public and private keys", - extra: "undefined", + description: `Set user public and private keys. Subject: ${sub}`, + extra: "N/A", error: e, }); g_lib.handleException(e, res); @@ -637,23 +638,23 @@ router router .get("/keys/clear", function (req, res) { - let client = null; + let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/keys/clear", + status: "Started", + description: `Clear user public and private keys. Subject: ${sub}`, + }); g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn"], write: ["u"], }, action: function () { - client = g_lib.getUserFromClientID(req.queryParams.client); - logger.logRequestStarted({ - client: client?._id, - correlationId: req.headers["x-correlation-id"], - httpVerb: "GET", - routePath: basePath + "/keys/clear", - status: "Started", - description: "Clear user public and private keys", - }); + const client = g_lib.getUserFromClientID(req.queryParams.client); var user_id; @@ -676,23 +677,23 @@ router }, }); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/keys/clear", status: "Success", - description: "Clear user public and private keys", - extra: "undefined", + description: `Clear user public and private keys. Subject: ${sub}`, + extra: "N/A", }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/keys/clear", status: "Failure", - description: "Clear user public and private keys", - extra: "undefined", + description: `Clear user public and private keys. Subject: ${sub}`, + extra: "N/A", error: e, }); g_lib.handleException(e, res); @@ -705,7 +706,7 @@ router router .get("/keys/get", function (req, res) { - let user = null; + let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { if (req.queryParams.subject) { if (!g_db.u.exists(req.queryParams.subject)) @@ -714,19 +715,19 @@ router "No such user '" + req.queryParams.subject + "'", ]; - user = g_db.u.document({ + let user = g_db.u.document({ _id: req.queryParams.subject, }); logger.logRequestStarted({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/keys/get", status: "Started", - description: "Get user public and private keys", + description: `Get user public and private keys. ${sub}`, }); } else { - user = g_lib.getUserFromClientID(req.queryParams.client); + let user = g_lib.getUserFromClientID(req.queryParams.client); } if (!user.pub_key || !user.priv_key) { @@ -736,13 +737,12 @@ router }, ]); logger.logRequestSuccess({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/keys/get", status: "Success", description: "Get user public and private keys", - extra: "undefined", }); } else { res.send([ @@ -753,24 +753,24 @@ router }, ]); logger.logRequestSuccess({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/keys/get", status: "Success", - description: "Get user public and private keys", - extra: "undefined", + description: `Get user public and private keys. ${sub}`, + extra: "N/A", }); } } catch (e) { logger.logRequestFailure({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/keys/get", status: "Failure", - description: "Get user public and private keys", - extra: "undefined", + description: `Get user public and private keys. ${sub}`, + extra: "N/A", error: e, }); g_lib.handleException(e, res); @@ -824,24 +824,24 @@ router router .get("/token/set", function (req, res) { - let client = null; + let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/token/set", + status: "Started", + description: `Setting user token. Subject: ${sub}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "globus_coll"], write: ["u", "globus_coll", "globus_token"], }, action: function () { - client = g_lib.getUserFromClientID(req.queryParams.client); - - logger.logRequestStarted({ - client: client?._id, - correlationId: req.headers["x-correlation-id"], - httpVerb: "GET", - routePath: basePath + "/token/set", - status: "Started", - description: "Setting user token", - }); + const client = g_lib.getUserFromClientID(req.queryParams.client); var user_id; let user_doc; @@ -953,25 +953,25 @@ router ); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/set", status: "Success", - description: "Setting user token", + description: `Setting user token. Subject: ${sub}`, extra: `${tokenTypeName} (${token_type})`, }); }, }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/set", status: "Failure", - description: "Setting user tokens", - extra: "undefined", + description: `Setting user token. Subject: ${sub}`, + extra: "N/A", error: e, }); g_lib.handleException(e, res); @@ -1005,7 +1005,7 @@ router router .get("/token/get", function (req, res) { - let user = null; + let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { const collection_token = UserToken.validateRequestParams(req.queryParams); // TODO: collection type determines logic when mapped vs HA @@ -1018,20 +1018,20 @@ router "No such user '" + req.queryParams.subject + "'", ]; - user = g_db.u.document({ + var user = g_db.u.document({ _id: req.queryParams.subject, }); } else { - user = g_lib.getUserFromClientID(req.queryParams.client); + var user = g_lib.getUserFromClientID(req.queryParams.client); } logger.logRequestStarted({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get", status: "Started", - description: "Getting user token", + description: `Getting user token. Subject: ${sub}`, }); const user_token = new UserToken({ @@ -1052,22 +1052,23 @@ router res.send(result); logger.logRequestSuccess({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get", status: "Success", - description: "Getting user token", + description: `Getting user token. Subject: ${sub}`, + extra: "N/A", }); } catch (e) { logger.logRequestFailure({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get", status: "Failure", - description: "Getting user tokens", - extra: "undefined", + description: `Getting user token. Subject: ${sub}`, + extra: "N/A", error: e, }); g_lib.handleException(e, res); @@ -1090,7 +1091,7 @@ router router .get("/token/get/access", function (req, res) { - let user = null; + let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { if (req.queryParams.subject) { if (!g_db.u.exists(req.queryParams.subject)) @@ -1098,42 +1099,42 @@ router error.ERR_INVALID_PARAM, "No such user '" + req.queryParams.subject + "'", ]; - user = g_db.u.document({ + let user = g_db.u.document({ _id: req.queryParams.subject, }); } else { - user = g_lib.getUserFromClientID(req.queryParams.client); + let user = g_lib.getUserFromClientID(req.queryParams.client); } logger.logRequestStarted({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get/access", status: "Started", - description: "Getting User Access Token", + description: `Getting User Access Token. Subject: ${sub}`, }); if (!user.access) throw [error.ERR_NOT_FOUND, "No access token found"]; res.send(user.access); logger.logRequestSuccess({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get/access", status: "Success", - description: "Getting User Access Token", - extra: "undefined", + description: `Getting User Access Token. Subject: ${sub}`, + extra: "N/A", }); } catch (e) { logger.logRequestFailure({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get/access", status: "Failure", - description: "Getting User Access Token", - extra: "undefined", + description: `Getting User Access Token. Subject: ${sub}`, + extra: "N/A", error: e, }); @@ -1147,11 +1148,10 @@ router router .get("/token/get/expiring", function (req, res) { - let user = null; let result = null; try { logger.logRequestStarted({ - client: user?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get/expiring", @@ -1167,7 +1167,7 @@ router ); res.send(results); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get/expiring", @@ -1177,7 +1177,7 @@ router }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/token/get/expiring", @@ -1196,16 +1196,16 @@ router router .get("/view", function (req, res) { - let client = null; + let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { - client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + const client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/view", status: "Started", - description: "View User Information", + description: `View User Information. Subject: ${sub}`, }); var user, @@ -1288,23 +1288,23 @@ router res.send([user]); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/view", status: "Success", - description: "View User Information", + description: `View User Information. Subject: ${sub}`, extra: `uid=${user.uid}, is_admin=${!!client?.is_admin}`, }); //req.queryParams.details ? } catch (e) { g_lib.handleException(e, res); logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/view", status: "Failure", - description: "View User Information", + description: `View User Information. Subject: ${sub}`, extra: `uid=${user.uid}, is_admin=${!!client?.is_admin}`, error: e, }); @@ -1318,11 +1318,10 @@ router router .get("/list/all", function (req, res) { - let client = null; var qry = "for i in u sort i.name_last, i.name_first"; var result; logger.logRequestStarted({ - client: client?._id, + client: "N/A", correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/list/all", @@ -1361,7 +1360,7 @@ router res.send(result); logger.logRequestSuccess({ - client: client?._id, + client: "N/A", correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/list/all", @@ -1380,7 +1379,7 @@ router var result, client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/list/collab", @@ -1429,7 +1428,7 @@ router res.send(result); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/list/collab", @@ -1452,9 +1451,17 @@ Note: must delete ALL data records and projects owned by the user being deleted */ router .get("/delete", function (req, res) { - let client = null; let user_id = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/delete", + status: "Started", + description: `Remove existing user entry: ${user_id}, Subject: ${sub}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "admin"], @@ -1479,15 +1486,7 @@ router ], }, action: function () { - client = g_lib.getUserFromClientID(req.queryParams.client); - logger.logRequestStarted({ - client: client?._id, - correlationId: req.headers["x-correlation-id"], - httpVerb: "GET", - routePath: basePath + "/delete", - status: "Started", - description: "Remove existing user entry", - }); + const client = g_lib.getUserFromClientID(req.queryParams.client); if (req.queryParams.subject) { user_id = req.queryParams.subject; @@ -1550,23 +1549,21 @@ router }, }); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/delete", status: "Success", - description: "Remove existing user entry", - extra: user_id, + description: `Remove existing user entry: ${user_id}, Subject: ${sub}`, }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/delete", status: "Failure", - description: "Remove existing user entry", - extra: user_id, + description: `Remove existing user entry: ${user_id}, Subject: ${sub}`, error: e, }); g_lib.handleException(e, res); @@ -1579,16 +1576,17 @@ router router .get("/ident/list", function (req, res) { - let client = null; + let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; + let extra_log = []; try { client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ident/list", status: "Started", - description: "List user linked IDs", + description: `List user linked IDs. Subject: ${sub}`, }); if (req.queryParams.subject) { if (!g_db.u.exists(req.queryParams.subject)) @@ -1599,42 +1597,45 @@ router const subject = g_db.u.document(req.queryParams.subject); permissions.ensureAdminPermUser(client, subject._id); - res.send( - g_db._query("for v in 1..1 outbound @client ident return v._key", { - client: subject._id, - }), - ); + var result = g_db._query("for v in 1..1 outbound @client ident return v._key", { + client: subject._id, + }); + extra_log = result.toArray(); + res.send(result); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ident/list", status: "Success", - description: "List user linked IDs", + description: `List user linked IDs. Subject: ${sub}`, + extra: { NumOfIds: extra_log.length }, }); } else { - res.send( - g_db._query("for v in 1..1 outbound @client ident return v._key", { - client: client._id, - }), - ); + var result = g_db._query("for v in 1..1 outbound @client ident return v._key", { + client: client._id, + }); + res.send(result); + extra_log = result.toArray(); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ident/list", status: "Success", - description: "List user linked IDs", + description: `List user linked IDs. Subject: ${sub}`, + extra: { NumOfIds: extra_log.length }, }); } } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ident/list", status: "Failure", - description: "List user linked IDs", + description: `List user linked IDs. Subject: ${sub}`, + extra: { NumOfIds: extra_log.length }, error: e, }); g_lib.handleException(e, res); @@ -1646,24 +1647,25 @@ router router .get("/ident/add", function (req, res) { - let client = null; + let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/add", + status: "Started", + description: `Add new linked identity. Subject: ${sub}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "admin"], write: ["uuid", "accn", "ident"], }, action: function () { - client = g_lib.getUserFromClientID(req.queryParams.client); + const client = g_lib.getUserFromClientID(req.queryParams.client); var id; - logger.logRequestStarted({ - client: client?._id, - correlationId: req.headers["x-correlation-id"], - httpVerb: "GET", - routePath: basePath + "/ident/add", - status: "Started", - description: "Add new linked identity", - }); if (g_lib.isUUID(req.queryParams.ident)) { if ( @@ -1672,12 +1674,12 @@ router }) ) logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ident/add", status: "Success", - description: "Add new linked identity", + description: `Add new linked identity. Subject: ${sub}`, extra: req.queryParams.ident, }); @@ -1709,12 +1711,12 @@ router ); } logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ident/add", status: "Success", - description: "Add new linked identity", + description: `Add new linked identity. Subject: ${sub}`, extra: req.queryParams.ident, }); return; @@ -1759,22 +1761,22 @@ router }, }); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ident/add", status: "Success", - description: "Add new linked identity", + description: `Add new linked identity. Subject: ${sub}`, extra: req.queryParams.ident, }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ident/add", status: "Failure", - description: "Add new linked identity", + description: `Add new linked identity. Subject: ${sub}`, extra: req.queryParams.ident, error: e, }); @@ -1793,8 +1795,15 @@ router router .get("/ident/remove", function (req, res) { - let client = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/ident/remove", + status: "Started", + description: `Remove linked identity ${req.queryParams.ident} from user account.`, + }); g_db._executeTransaction({ collections: { read: ["u", "admin"], @@ -1803,14 +1812,6 @@ router action: function () { client = g_lib.getUserFromClientID(req.queryParams.client); const owner = g_lib.getUserFromClientID(req.queryParams.ident); - logger.logRequestStarted({ - client: client?._id, - correlationId: req.headers["x-correlation-id"], - httpVerb: "GET", - routePath: basePath + "/ident/remove", - status: "Started", - description: "Remove linked identity from user account", - }); permissions.ensureAdminPermUser(client, owner._id); if (g_lib.isUUID(req.queryParams.ident)) { @@ -1825,23 +1826,21 @@ router }, }); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ident/remove", status: "Success", - description: "Remove linked identity from user account", - extra: req.queryParams.ident, + description: `Remove linked identity ${req.queryParams.ident} from user account.`, }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ident/remove", status: "Failure", - description: "Remove linked identity from user account", - extra: req.queryParams.ident, + description: `Remove linked identity ${req.queryParams.ident} from user account.`, error: e, }); g_lib.handleException(e, res); @@ -1854,11 +1853,12 @@ router router .get("/ep/get", function (req, res) { - let client = null; + let first = null; try { - client = g_lib.getUserFromClientID(req.queryParams.client); + const client = g_lib.getUserFromClientID(req.queryParams.client); + first = client?.eps.length ? client?.eps[0] : undefined; logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ep/get", @@ -1868,23 +1868,23 @@ router res.send(client.eps ? client.eps : []); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ep/get", status: "Success", description: "Get recent end-points", - extra: client.eps, + extra: { most_recent: first, count: client?.eps?.length }, }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ep/get", status: "Failure", description: "Get recent end-points", - extra: client.eps, + extra: { most_recent: first, count: client?.eps?.length }, error: e, }); g_lib.handleException(e, res); @@ -1896,11 +1896,10 @@ router router .get("/ep/set", function (req, res) { - let client = null; try { - client = g_lib.getUserFromClientID(req.queryParams.client); + const client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ep/set", @@ -1917,7 +1916,7 @@ router }, ); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ep/set", @@ -1927,7 +1926,7 @@ router }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ep/set", From 226662830aef723ac4838fc621af65e2c00012a2 Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Thu, 15 Jan 2026 08:49:21 -0500 Subject: [PATCH 22/65] [DAPS-1522] - refactor: foxx, collection router logging improvements (#1818) --- core/database/CMakeLists.txt | 2 + core/database/foxx/api/coll_router.js | 346 ++++++++++- core/database/foxx/tests/coll_router.test.js | 592 +++++++++++++++++++ 3 files changed, 921 insertions(+), 19 deletions(-) create mode 100644 core/database/foxx/tests/coll_router.test.js diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index 8ae670db7..f1739bd1b 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -28,6 +28,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_version COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version:") add_test(NAME foxx_support COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_support:") add_test(NAME foxx_user_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_user_router:") + add_test(NAME foxx_coll_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_coll_router:") add_test(NAME foxx_proj_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_proj_router:") add_test(NAME foxx_schema_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_schema_router:") add_test(NAME foxx_acl_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_acl_router:") @@ -64,6 +65,7 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_validation_repo PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_path PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_user_router PROPERTIES FIXTURES_REQUIRED "Foxx;FoxxDBFixtures") + set_tests_properties(foxx_coll_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_proj_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_schema_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_acl_router PROPERTIES FIXTURES_REQUIRED Foxx) diff --git a/core/database/foxx/api/coll_router.js b/core/database/foxx/api/coll_router.js index e3c66b21d..ee37f4739 100644 --- a/core/database/foxx/api/coll_router.js +++ b/core/database/foxx/api/coll_router.js @@ -9,7 +9,8 @@ const g_graph = require("@arangodb/general-graph")._graph("sdmsg"); const g_lib = require("./support"); const error = require("./lib/error_codes"); const permissions = require("./lib/permissions"); - +const logger = require("./lib/logger"); +const basePath = "col"; module.exports = router; //===== COLLECTION API FUNCTIONS ===== @@ -17,9 +18,19 @@ module.exports = router; router .post("/create", function (req, res) { var retry = 10; - + let client = null; + let log_extra = null; for (;;) { try { + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Started", + description: "Create a new data collection", + }); var result = []; g_db._executeTransaction({ @@ -28,7 +39,7 @@ router write: ["c", "a", "alias", "owner", "item", "t", "top", "tag"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); var owner = client, parent_id; @@ -171,8 +182,38 @@ router res.send({ results: result, }); + + const item = result[0]; // the newly created collection + + log_extra = { + owner: item.owner, + creator: item.creator, + title: item.title?.substring(0, 20), + tags: Array.isArray(item.tags) ? item.tags.slice(0, 10) : [], + parent_id: item.parent_id, + id: item.id, + }; + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Success", + description: "Create a new data collection", + extra: log_extra, + }); break; } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Failure", + description: "Create a new data collection", + extra: log_extra, + error: e, + }); if (--retry == 0 || !e.errorNum || e.errorNum != 1200) { g_lib.handleException(e, res); } @@ -199,9 +240,20 @@ router router .post("/update", function (req, res) { var retry = 10; - + let client = null; + let extra_log = null; for (;;) { try { + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Started", + description: `Update an existing collection. ID: ${req.body.id}`, + }); + var result = { results: [], updates: [], @@ -388,8 +440,36 @@ router }); res.send(result); + + extra_log = { + updates: result.updates.map(({ ut, title, desc, ...rest }) => ({ + ...rest, + title: title?.length > 15 ? title.slice(0, 15) + "..." : title, + desc: desc?.length > 15 ? desc.slice(0, 15) + "..." : desc, + })), + }; + + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Success", + description: `Update an existing collection. ID: ${req.body.id}`, + extra: extra_log, + }); break; } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Failure", + description: `Update an existing collection. ID: ${req.body.id}`, + extra: extra_log, + error: e, + }); if (--retry == 0 || !e.errorNum || e.errorNum != 1200) { g_lib.handleException(e, res); } @@ -416,13 +496,24 @@ router router .get("/view", function (req, res) { + let client = null; + let coll = null; + let extra_log = null; try { - const client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: `View collection information by ID or alias. ID: ${req.queryParams.id}`, + }); var coll_id = g_lib.resolveCollID(req.queryParams.id, client), - coll = g_db.c.document(coll_id), admin = false; + coll = g_db.c.document(coll_id); if (client) { admin = permissions.hasAdminPermObject(client, coll_id); @@ -446,7 +537,33 @@ router res.send({ results: [coll], }); + + extra_log = { + ...coll, + title: coll?.title?.length > 15 ? coll.title.slice(0, 15) + "..." : coll?.title, + desc: coll?.desc?.length > 15 ? coll.desc.slice(0, 15) + "..." : coll?.desc, + }; + + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Success", + description: `View collection information by ID or alias. ID: ${req.queryParams.id}`, + extra: extra_log, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: `View collection information by ID or alias. ID: ${req.queryParams.id}`, + extra: extra_log, + error: e, + }); g_lib.handleException(e, res); } }) @@ -457,8 +574,18 @@ router router .get("/read", function (req, res) { + let client = null; + let result = null; try { - const client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/read", + status: "Started", + description: `Read contents of a collection by ID or alias. ID: ${req.queryParams.id}`, + }); var coll_id = g_lib.resolveCollID(req.queryParams.id, client), coll = g_db.c.document(coll_id), @@ -477,7 +604,6 @@ router var qry = "for v in 1..1 outbound @coll item sort is_same_collection('c',v) DESC, v.title", - result, params = { coll: coll_id, }, @@ -518,7 +644,26 @@ router } res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/read", + status: "Success", + description: `Read contents of a collection by ID or alias. ID: ${req.queryParams.id}`, + extra: result, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/read", + status: "Failure", + extra: result, + description: `Read contents of a collection by ID or alias. ID: ${req.queryParams.id}`, + error: e, + }); g_lib.handleException(e, res); } }) @@ -531,15 +676,25 @@ router router .get("/write", function (req, res) { + let client = null; + let loose_res = null; try { + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/write", + status: "Started", + description: "Add/remove items in a collection", + }); + g_db._executeTransaction({ collections: { read: ["u", "c", "uuid", "accn"], write: ["item", "d"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); - if (req.queryParams.add && req.queryParams.remove) { throw [ error.ERR_INVALID_PARAM, @@ -730,8 +885,8 @@ router // 7. Re-link loose items to root if (have_loose) { var root_id = g_lib.getRootID(owner_id), - rctxt = null, - loose_res = []; + rctxt = null; + loose_res = []; cres = g_db._query("for v in 1..1 outbound @coll item return v._id", { coll: root_id, @@ -795,7 +950,34 @@ router } }, }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/write", + status: "Success", + description: "Add/remove items in a collection", + extra: { + addedCount: req.queryParams.add?.length || 0, + removedCount: req.queryParams.remove?.length || 0, + looseCount: loose_res ? loose_res?.length : 0, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/write", + status: "Failure", + description: "Add/remove items in a collection", + extra: { + addedCount: req.queryParams.add?.length || 0, + removedCount: req.queryParams.remove?.length || 0, + looseCount: loose_res ? loose_res?.length : 0, + }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -808,14 +990,27 @@ router router .get("/move", function (req, res) { + let client = null; + const itemCount = Array.isArray(req.queryParams.items) ? req.queryParams.items.length : 0; + try { + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/move", + status: "Started", + description: `Move items from source collection: ${req.queryParams.source} to destination collection: ${req.queryParams.dest}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "c", "uuid", "accn"], write: ["item", "d"], }, action: function () { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); var src_id = g_lib.resolveCollID(req.queryParams.source, client), src = g_db.c.document(src_id), dst_id = g_lib.resolveCollID(req.queryParams.dest, client), @@ -954,7 +1149,26 @@ router res.send({}); }, }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/move", + status: "Success", + description: `Move items from source collection: ${req.queryParams.source} to destination collection: ${req.queryParams.dest}`, + extra: { movedCount: itemCount }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/move", + status: "Failure", + description: `Move items from source collection: ${req.queryParams.source} to destination collection: ${req.queryParams.dest}`, + extra: { movedCount: itemCount }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -967,14 +1181,25 @@ router router .get("/get_parents", function (req, res) { + let client = null; + let results = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/get_parents", + status: "Started", + description: `Get parent collection(s) (path) of item. ID: ${req.queryParams.id}`, + }); + var item_id = g_lib.resolveID(req.queryParams.id, client); if (!item_id.startsWith("d/") && !item_id.startsWith("c/")) throw [error.ERR_INVALID_PARAM, "ID is not a collection or record."]; - var results = g_lib.getParents(item_id); + results = g_lib.getParents(item_id); if (req.queryParams.inclusive) { var item; if (item_id[0] == "c") item = g_db.c.document(item_id); @@ -990,7 +1215,26 @@ router } } res.send(results); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/get_parents", + status: "Success", + description: `Get parent collection(s) (path) of item. ID: ${req.queryParams.id}`, + extra: { NumOfParentColls: results?.length }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/get_parents", + status: "Failure", + description: `Get parent collection(s) (path) of item. ID: ${req.queryParams.id}`, + extra: { NumOfParentColls: results?.length }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -1002,8 +1246,20 @@ router router .get("/get_offset", function (req, res) { + let get_offset = null; + let client = null; + let idx = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/get_offset", + status: "Started", + description: `Get offset to item in collection. ID: ${req.queryParams.id}; Item ID: ${req.queryParams.item}; Page Size: ${req.queryParams.page_sz}`, + }); + client = g_lib.getUserFromClientID(req.queryParams.client); var coll_id = g_lib.resolveID(req.queryParams.id, client); var item_id = g_lib.resolveID(req.queryParams.item, client); @@ -1025,7 +1281,7 @@ router offset: 0, }); else { - var idx = ids.indexOf(item_id); + idx = ids.indexOf(item_id); if (idx < 0) throw [ error.ERR_NOT_FOUND, @@ -1038,8 +1294,31 @@ router res.send({ offset: req.queryParams.page_sz * Math.floor(idx / req.queryParams.page_sz), }); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/get_offset", + status: "Success", + description: `Get offset to item in collection. ID: ${req.queryParams.id}; Item ID: ${req.queryParams.item}; Page Size: ${req.queryParams.page_sz}`, + extra: { + offset: req.queryParams.page_sz * Math.floor(idx / req.queryParams.page_sz), + }, + }); } } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/get_offset", + status: "Failure", + description: `Get offset to item in collection. ID: ${req.queryParams.id}; Item ID: ${req.queryParams.item}; Page Size: ${req.queryParams.page_sz}`, + extra: { + offset: req.queryParams.page_sz * Math.floor(idx / req.queryParams.page_sz), + }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -1054,8 +1333,19 @@ router router .get("/published/list", function (req, res) { + let client = null; + let result = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/published/list", + status: "Started", + description: "Get list of clients published collections.", + }); + var owner_id; if (req.queryParams.subject) { @@ -1066,7 +1356,6 @@ router var qry = "for v in 1..1 inbound @user owner filter is_same_collection('c',v) && v.public sort v.title"; - var result; if (req.queryParams.offset != undefined && req.queryParams.count != undefined) { qry += " limit " + req.queryParams.offset + ", " + req.queryParams.count; @@ -1098,7 +1387,26 @@ router } res.send(result); + logger.logRequestSuccess({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/published/list", + status: "Success", + description: "Get list of clients published collections.", + extra: { total: result._countTotal }, + }); } catch (e) { + logger.logRequestFailure({ + client: client?._id, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/published/list", + status: "Failure", + description: "Get list of clients published collections.", + extra: { total: result._countTotal }, + error: e, + }); g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/tests/coll_router.test.js b/core/database/foxx/tests/coll_router.test.js new file mode 100644 index 000000000..df85b1133 --- /dev/null +++ b/core/database/foxx/tests/coll_router.test.js @@ -0,0 +1,592 @@ +"use strict"; + +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { baseUrl } = module.context; +const { db } = require("@arangodb"); + +// import support utilities if needed +const g_lib = require("../api/support"); + +const coll_base_url = `${baseUrl}/col/create`; + +describe("unit_coll_router: /col/create endpoint", () => { + // + // NOTE: + // The /create route requires many collections and relations: + // - c, owner, alloc, a, alias, item, t, top, tag, uuid, accn + // You must prepare enough minimal fixture data so the transaction succeeds. + // + + beforeEach(() => { + const collections = [ + "c", + "owner", + "alloc", + "a", + "alias", + "item", + "t", + "top", + "tag", + "uuid", + "accn", + "u", + "d", + ]; + + collections.forEach((name) => { + let col = db._collection(name); + if (!col) { + db._create(name); + } else { + col.truncate(); + } + }); + + // + // MINIMAL FIXTURE SETUP REQUIRED + // + + // 1. Create a fake client user + db.u.save({ + _key: "client1", + name: "Test User", + max_coll: 10, + }); + + // 2. Alloc record so "owner" has an allocation + db.alloc.save({ + _from: "u/client1", + _to: "alloc/owner1", + }); + + // 3. Root collection for this user + const root = db.c.save({ + _key: "root1", + owner: "u/client1", + creator: "u/client1", + ct: 0, + ut: 0, + title: "root", + }); + + // 4. Owner edge pointing to root + db.owner.save({ + _from: "c/root1", + _to: "u/client1", + }); + + // (Optional) If your g_lib.getRootID depends on something else, adjust accordingly + }); + + after(() => { + const collections = [ + "c", + "owner", + "alloc", + "a", + "alias", + "item", + "t", + "top", + "tag", + "uuid", + "accn", + "u", + ]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); + }); + + it("should successfully create a new collection", () => { + const body = { + title: "Test Collection", + desc: "Unit Test Desc", + parent: "c/root1", + tags: ["alpha", "beta"], + }; + + // Send POST with query param ?client=client1 + const response = request.post(coll_base_url + "?client=client1", { + json: true, + body, + }); + + expect(response.status).to.equal(200); + expect(response.json).to.have.property("results"); + expect(response.json.results).to.be.an("array").with.length(1); + + const created = response.json.results[0]; + + expect(created).to.have.property("title", "Test Collection"); + expect(created).to.have.property("parent_id", "c/root1"); + }); + + it("should NOT crash if collection creation fails before result is built", () => { + const body = { + title: "Broken Collection", + parent: "c/doesNotExist", + }; + + const response = request.post(`${baseUrl}/col/create?client=client1`, { + json: true, + body, + }); + + // Should return controlled error, not logging crash + expect(response.status).to.not.equal(500); + expect(response.json).to.have.property("error"); + }); + + it("should update an existing collection", () => { + db.c.save({ + _key: "coll1", + owner: "u/client1", + creator: "u/client1", + title: "Old Title", + desc: "Old Desc", + tags: ["old"], + }); + + db.owner.save({ + _from: "c/coll1", + _to: "u/client1", + }); + // + // ---- CALL UPDATE ---- + // + const body = { + id: "c/coll1", + title: "New Title", + desc: "New Desc", + tags: ["x", "y"], + }; + + const response = request.post(`${baseUrl}/col/update?client=client1`, { + json: true, + body, + }); + + expect(response.status).to.equal(200); + expect(response.json.results).to.be.an("array").with.length(1); + + const updated = response.json.results[0]; + + // + // ---- ASSERTIONS ---- + // + expect(updated.title).to.equal("New Title"); + expect(updated.desc).to.equal("New Desc"); + expect(updated.tags).to.deep.equal(["x", "y"]); + }); + + it("should NOT crash when updating a non-existent collection", () => { + const body = { + id: "c/missing", + title: "Nope", + }; + + const response = request.post(`${baseUrl}/col/update?client=client1`, { + json: true, + body, + }); + + expect(response.status).to.not.equal(500); + expect(response.json).to.have.property("error"); + }); + + it("should view an existing collection", () => { + // + // Minimal fixture data required for view route + // + + // The collection we want to view + db.c.save({ + _key: "collview1", + owner: "u/client1", + creator: "u/client1", + title: "View Title", + desc: "View Desc", + tags: ["v1", "v2"], + notes: "This is a test note", + }); + + // Owner edge + db.owner.save({ + _from: "c/collview1", + _to: "u/client1", + }); + + // + // ---- CALL VIEW ---- + // + const response = request.get(`${baseUrl}/col/view?client=client1&id=c/collview1`, { + json: true, + }); + + // + // ---- ASSERTIONS ---- + // + expect(response.status).to.equal(200); + expect(response.json.results).to.be.an("array").with.length(1); + + const viewed = response.json.results[0]; + + expect(viewed.id).to.equal("c/collview1"); + expect(viewed.title).to.equal("View Title"); + expect(viewed.desc).to.equal("View Desc"); + + // tags come through normally + expect(viewed.tags).to.deep.equal(["v1", "v2"]); + + // notes are passed through mask (not null) + expect(viewed.notes).to.exist; + }); + + it("should read the contents of a collection", () => { + // Create parent + db.c.save({ + _key: "readParent", + owner: "u/client1", + creator: "u/client1", + title: "Parent", + }); + + // Allow client1 to list it + db.owner.save({ + _from: "c/readParent", + _to: "u/client1", + }); + + // Create one child in c + db.c.save({ + _key: "readChild", + owner: "u/client1", + creator: "u/client1", + title: "Child", + }); + + // Link parent -> child with item edge + db.item.save({ + _from: "c/readParent", + _to: "c/readChild", + }); + + // ---- Call /read ---- + const response = request.get(`${baseUrl}/col/read?client=client1&id=c/readParent`, { + json: true, + }); + + // ---- Assertions ---- + expect(response.status).to.equal(200); + expect(response.json).to.be.an("array"); + + // Should contain the child + const child = response.json.find((r) => r.id === "c/readChild"); + expect(child).to.exist; + expect(child.title).to.equal("Child"); + }); + + it("should NOT crash when user lacks permission to read collection", () => { + // Create collection owned by someone else + db.c.save({ + _key: "privateColl", + owner: "u/other", + creator: "u/other", + title: "Private", + }); + + const response = request.get(`${baseUrl}/col/read?client=client1&id=c/privateColl`, { + json: true, + }); + + expect(response.status).to.not.equal(500); + expect(response.json).to.have.property("error"); + }); + + it("should add an item to a collection", () => { + // + // --- FIXTURE --- + // + + // Parent collection + db.c.save({ + _key: "wpParent", + owner: "u/client1", + creator: "u/client1", + title: "Parent", + }); + + // Owner edge + db.owner.save({ + _from: "c/wpParent", + _to: "u/client1", + }); + + // Item to add + db.c.save({ + _key: "wpChild", + owner: "u/client1", + creator: "u/client1", + title: "Child", + }); + + // Owner edge for child (required because write route checks owners) + db.owner.save({ + _from: "c/wpChild", + _to: "u/client1", + }); + + // + // --- CALL /write (ADD) --- + // + const response = request.get( + `${baseUrl}/col/write?client=client1&id=c/wpParent&add[]=c/wpChild`, + { json: true }, + ); + + // + // --- ASSERTIONS --- + // + expect(response.status).to.equal(200); + + // Should return empty array because no "loose" items + expect(response.json).to.be.an("array").that.is.empty; + }); + + it("should move an item between collections", () => { + // --- FIXTURE --- + // Source collection + db.c.save({ _key: "srcColl", owner: "u/client1", creator: "u/client1", title: "Source" }); + db.owner.save({ _from: "c/srcColl", _to: "u/client1" }); + + // Destination collection + db.c.save({ + _key: "dstColl", + owner: "u/client1", + creator: "u/client1", + title: "Destination", + }); + db.owner.save({ _from: "c/dstColl", _to: "u/client1" }); + + // Item to move + db.c.save({ _key: "item1", owner: "u/client1", creator: "u/client1", title: "Item" }); + db.owner.save({ _from: "c/item1", _to: "u/client1" }); + + // Link item to source collection (required by /move) + db.item.save({ _from: "c/srcColl", _to: "c/item1" }); + + // --- CALL /move --- + const response = request.get( + `${baseUrl}/col/move?client=client1&source=c/srcColl&dest=c/dstColl&items[]=c/item1`, + { json: true }, + ); + + // --- ASSERTIONS --- + expect(response.status).to.equal(200); + expect(response.json).to.deep.equal({}); // /move returns empty object + }); + + it("should return parent collections for an item", () => { + // --- FIXTURE --- + // Parent collection + db.c.save({ + _key: "parentColl", + owner: "u/client1", + creator: "u/client1", + title: "Parent", + }); + db.owner.save({ _from: "c/parentColl", _to: "u/client1" }); + + // Child item + db.d.save({ _key: "childItem", owner: "u/client1", creator: "u/client1", title: "Child" }); + db.owner.save({ _from: "d/childItem", _to: "u/client1" }); + + // Link child to parent + db.item.save({ _from: "c/parentColl", _to: "d/childItem" }); + + // --- CALL /get_parents --- + const response = request.get(`${baseUrl}/col/get_parents?client=client1&id=d/childItem`, { + json: true, + }); + + // --- ASSERTIONS --- + expect(response.status).to.equal(200); + expect(response.json).to.be.an("array"); + expect(response.json[0][0]).to.have.property("id", "c/parentColl"); + expect(response.json[0][0]).to.have.property("title", "Parent"); + }); + + it("should include the child item if inclusive=true", () => { + db.c.save({ + _key: "parentColl", + owner: "u/client1", + creator: "u/client1", + title: "Parent", + }); + db.owner.save({ _from: "c/parentColl", _to: "u/client1" }); + + db.d.save({ _key: "childItem", owner: "u/client1", creator: "u/client1", title: "Child" }); + db.owner.save({ _from: "d/childItem", _to: "u/client1" }); + + db.item.save({ _from: "c/parentColl", _to: "d/childItem" }); + const response = request.get( + `${baseUrl}/col/get_parents?client=client1&id=d/childItem&inclusive=true`, + { json: true }, + ); + + expect(response.status).to.equal(200); + expect(response.json).to.be.an("array"); + // The first element of the first path should be the child itself + expect(response.json[0][0]).to.have.property("id", "d/childItem"); + expect(response.json[0][0]).to.have.property("title", "Child"); + }); + + it("should return the correct offset of an item in a collection", () => { + // --- FIXTURE --- + const clientId = "client1"; + + // Parent collection + db.c.save({ + _key: "coll1", + owner: "u/client1", + creator: "u/client1", + title: "My Collection", + }); + db.owner.save({ _from: "c/coll1", _to: "u/client1" }); + + // Items in the collection + for (let i = 1; i <= 10; i++) { + const itemId = `d/item${i}`; + db.d.save({ + _key: `item${i}`, + owner: "u/client1", + creator: "u/client1", + title: `Item ${i}`, + }); + db.owner.save({ _from: itemId, _to: "u/client1" }); + db.item.save({ _from: "c/coll1", _to: itemId }); + } + + // --- CALL /get_offset --- + const pageSize = 3; + const targetItem = "d/item5"; + + const response = request.get( + `${baseUrl}/col/get_offset?client=${clientId}&id=c/coll1&item=${targetItem}&page_sz=${pageSize}`, + { json: true }, + ); + + expect(response.status).to.equal(200); + + // Items 1-3 -> offset 0 + // Items 4-6 -> offset 3 + // Item 5 is in second page, offset should be 3 + expect(response.json).to.have.property("offset", 3); + }); + + it("should NOT crash if item is not found when getting offset", () => { + // --- FIXTURE --- + db.c.save({ + _key: "collOffsetFail", + owner: "u/client1", + creator: "u/client1", + title: "Offset Test", + }); + db.owner.save({ _from: "c/collOffsetFail", _to: "u/client1" }); + + // No items added at all + + const response = request.get( + `${baseUrl}/col/get_offset?client=client1&id=c/collOffsetFail&item=d/doesNotExist&page_sz=5`, + { json: true }, + ); + + // Should return a controlled error, NOT a 500 crash + expect(response.status).to.not.equal(500); + expect(response.json).to.have.property("error"); + }); + it("should NOT crash if page_sz is invalid in get_offset", () => { + db.c.save({ + _key: "collBadPage", + owner: "u/client1", + creator: "u/client1", + title: "Bad Page", + }); + db.owner.save({ _from: "c/collBadPage", _to: "u/client1" }); + + const response = request.get( + `${baseUrl}/col/get_offset?client=client1&id=c/collBadPage&item=d/item1&page_sz=0`, + { json: true }, + ); + + expect(response.status).to.not.equal(500); + }); + + it("should return a list of published collections for a client", () => { + const clientId = "client1"; + const userId = `u/${clientId}`; + + // --- Ensure collections exist --- + if (!db._collection("c")) db._createDocumentCollection("c"); + if (!db._collection("u")) db._createDocumentCollection("u"); + if (!db._collection("owner")) db._createEdgeCollection("owner"); + + // --- Ensure test user exists --- + let userDoc = db.u.firstExample({ _key: clientId }); + if (!userDoc) { + userDoc = db.u.save({ _key: clientId, name: "Client One" }); + } + + // --- Clean up previous test data --- + db.c.truncate(); + db.owner.truncate(); + + // --- Create some published collections --- + const publishedColls = [ + { + _key: "pub1", + owner: userDoc._id, + creator: userDoc._id, + title: "Alpha", + public: true, + }, + { _key: "pub2", owner: userDoc._id, creator: userDoc._id, title: "Beta", public: true }, + { + _key: "pub3", + owner: userDoc._id, + creator: userDoc._id, + title: "Gamma", + public: true, + }, + ]; + + publishedColls.forEach((c) => { + const collDoc = db.c.save(c); // Save collection + db.owner.save({ _from: collDoc._id, _to: userDoc._id }); // Edge must use real _id + }); + + // --- CALL /published/list without pagination --- + let response = request.get(`${baseUrl}/col/published/list?client=${clientId}`, { + json: true, + }); + expect(response.status).to.equal(200); + const titles = response.json.map((x) => x.title); + expect(titles).to.include.members(["Alpha", "Beta", "Gamma"]); + + // --- CALL /published/list with pagination --- + const offset = 1; + const count = 2; + response = request.get( + `${baseUrl}/col/published/list?client=${clientId}&offset=${offset}&count=${count}`, + { json: true }, + ); + expect(response.status).to.equal(200); + const paged = response.json; + const pagingInfo = paged.pop().paging; + expect(pagingInfo).to.deep.equal({ off: offset, cnt: count, tot: 3 }); + }); +}); From 957db8e86cf902be4d8daf10b8ca5a0440e04537 Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Wed, 21 Jan 2026 11:31:43 -0500 Subject: [PATCH 23/65] [DAPS-1522] - refactor: foxx, query_router logging improvements, small fixes (#1820) --- core/database/foxx/api/query_router.js | 71 +++++++++---------- core/database/foxx/tests/query_router.test.js | 51 ++++++++++++- 2 files changed, 84 insertions(+), 38 deletions(-) diff --git a/core/database/foxx/api/query_router.js b/core/database/foxx/api/query_router.js index 30de93454..06c48a832 100644 --- a/core/database/foxx/api/query_router.js +++ b/core/database/foxx/api/query_router.js @@ -17,7 +17,6 @@ module.exports = router; router .post("/create", function (req, res) { - let client = undefined; let result = undefined; try { g_db._executeTransaction({ @@ -26,9 +25,9 @@ router write: ["q", "owner"], }, action: function () { - client = g_lib.getUserFromClientID(req.queryParams.client); + const client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "POST", routePath: basePath + "/create", @@ -93,7 +92,7 @@ router res.send(result); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "POST", routePath: basePath + "/create", @@ -103,7 +102,7 @@ router }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "POST", routePath: basePath + "/create", @@ -136,7 +135,6 @@ router router .post("/update", function (req, res) { - let client = undefined; let result = undefined; try { g_db._executeTransaction({ @@ -145,9 +143,9 @@ router write: ["q", "owner"], }, action: function () { - client = g_lib.getUserFromClientID(req.queryParams.client); + const client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "POST", routePath: basePath + "/update", @@ -200,7 +198,7 @@ router }); res.send(result); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "POST", routePath: basePath + "/update", @@ -210,7 +208,7 @@ router }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "POST", routePath: basePath + "/update", @@ -243,12 +241,12 @@ router router .get("/view", function (req, res) { - let client = undefined; let qry = undefined; + let client = null; try { client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/view", @@ -273,7 +271,7 @@ router res.send(qry); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/view", @@ -283,7 +281,7 @@ router }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/view", @@ -303,12 +301,11 @@ router router .get("/delete", function (req, res) { - let client = undefined; try { - client = g_lib.getUserFromClientID(req.queryParams.client); + const client = g_lib.getUserFromClientID(req.queryParams.client); var owner; logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/delete", @@ -340,7 +337,7 @@ router g_graph.q.remove(owner._from); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/delete", @@ -351,7 +348,7 @@ router } } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/delete", @@ -371,12 +368,11 @@ router router .get("/list", function (req, res) { - let client = undefined; let result = undefined; try { - client = g_lib.getUserFromClientID(req.queryParams.client); + const client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/list", @@ -418,7 +414,7 @@ router res.send(result); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/list", @@ -431,7 +427,7 @@ router }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/list", @@ -651,12 +647,11 @@ function execQuery(client, mode, published, orig_query) { router .get("/exec", function (req, res) { - let client = undefined; let results = undefined; try { - client = g_lib.getUserFromClientID(req.queryParams.client); + let client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/exec", @@ -679,7 +674,7 @@ router res.send(results); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/exec", @@ -689,7 +684,7 @@ router }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/exec", @@ -711,11 +706,9 @@ router router .post("/exec/direct", function (req, res) { let results = undefined; - let client = undefined; try { - client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); logger.logRequestStarted({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "POST", routePath: basePath + "/exec/direct", @@ -723,6 +716,8 @@ router description: "Execute published data search query", }); + let client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + const query = { ...req.body, params: JSON.parse(req.body.params), @@ -731,23 +726,27 @@ router res.send(results); logger.logRequestSuccess({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "POST", routePath: basePath + "/exec/direct", status: "Success", description: "Execute published data search query", - extra: results, + extra: { + count: Array.isArray(results) ? results.length : undefined, + }, }); } catch (e) { logger.logRequestFailure({ - client: client?._id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "POST", routePath: basePath + "/exec/direct", status: "Failure", description: "Execute published data search query", - extra: results, + extra: { + count: Array.isArray(results) ? results.length : undefined, + }, error: e, }); g_lib.handleException(e, res); diff --git a/core/database/foxx/tests/query_router.test.js b/core/database/foxx/tests/query_router.test.js index b075e6790..97ad2b52b 100644 --- a/core/database/foxx/tests/query_router.test.js +++ b/core/database/foxx/tests/query_router.test.js @@ -14,7 +14,7 @@ const qry_base_url = `${baseUrl}/qry`; describe("unit_query_router: the Foxx microservice qry_router endpoints", () => { after(function () { - const collections = ["u", "qry"]; + const collections = ["u", "qry", "c", "note", "fake"]; collections.forEach((name) => { let col = db._collection(name); if (col) col.truncate(); @@ -22,7 +22,7 @@ describe("unit_query_router: the Foxx microservice qry_router endpoints", () => }); beforeEach(() => { - const collections = ["u", "qry"]; + const collections = ["u", "qry", "c", "note", "fake"]; collections.forEach((name) => { let col = db._collection(name); if (col) { @@ -163,4 +163,51 @@ describe("unit_query_router: the Foxx microservice qry_router endpoints", () => expect(parsed).to.be.an("array"); expect(parsed.length).to.be.greaterThan(0); }); + + it("should execute a query directly", () => { + // arrange + const fakeUser = { + _key: "fakeUser", + _id: "u/fakeUser", + name: "Fake User", + email: "fakeuser@datadev.org", + is_admin: true, + max_coll: 5, + max_proj: 5, + max_sav_qry: 10, + }; + + const fakeCol = { + _key: "fakeCol", + _id: "col/fakeCol", + title: "fakeCol", + desc: "This is a fake col", + }; + + db.u.save(fakeUser); + + // Save the query and the edge between the query and the user + var request_string = `${qry_base_url}/exec/direct?client=u/fakeUser&owner=u/fakeUser&cols=c/fakeCol&cnt=1&off=0`; + var body = { + qry_begin: "FOR i in fake filter i.owner == @owner ", + qry_end: " sort @off,@cnt RETURN distinct i", + qry_filter: "", + params: '{ "cnt": 1, "off": 0, "owner": "u/fakeUser"}', + limit: 10, + mode: 1, + published: false, + }; + + // act + var response = request.post(request_string, { + json: true, + body: body, + headers: { + "x-correlation-id": "test-correlation-id", + }, + }); + + // Assert + expect(response.status).to.equal(200); + }); }); From d2a253ea906e1eee6cb428c10b3649340185db38 Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Wed, 28 Jan 2026 14:50:44 -0500 Subject: [PATCH 24/65] [DAPS-1833] - refactor: test_api_record.py add env vars for specifying endpoint and file in end to end test. (#1834) --- tests/end-to-end/test_api_record.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/end-to-end/test_api_record.py b/tests/end-to-end/test_api_record.py index 71fae9999..296975e36 100755 --- a/tests/end-to-end/test_api_record.py +++ b/tests/end-to-end/test_api_record.py @@ -176,8 +176,13 @@ def test_record_create_delete(self): # May not work depending on traffic esnet_uuid = "ece400da-0182-4777-91d6-27a1808f8371" + endpoint_uuid = os.environ.get('DATAFED_TEST_GLOBUS_ENDPOINT', esnet_uuid) + test_file_path_on_endpoint = os.environ.get('DATAFED_TEST_GLOBUS_ENDPOINT_FILE', "/1M.dat") - put_task = self._df_api.dataPut(new_alias, esnet_uuid + "/1M.dat") + if not test_file_path_on_endpoint.startswith("/"): + test_file_path_on_endpoint = "/" + test_file_path_on_endpoint + + put_task = self._df_api.dataPut(new_alias, endpoint_uuid + test_file_path_on_endpoint) task_id = put_task[0].task.id From 6f4692e3f58ed8a323ef982ca70b146591219340 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 16 Jan 2026 14:02:00 -0500 Subject: [PATCH 25/65] fixed typo --- web/datafed-ws.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/datafed-ws.js b/web/datafed-ws.js index d4373c707..e452d4bc6 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -624,7 +624,7 @@ the registration page. // Note: context/optional params for arbitrary input const token_context = { // passed values are mutable - resource_server: client_token.data.resource_sever, + resource_server: client_token.data.resource_server, collection_id: a_req.session.collection_id, scope: xfr_token.scope, }; From 19a73b1900a5c07b854f62062069dac373076fa6 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Tue, 20 Jan 2026 04:19:35 -0500 Subject: [PATCH 26/65] fix(auth): resolve infinite consent loop and improve scope handling --- core/database/foxx/api/user_router.js | 14 ++++++++++---- web/services/auth/ConsentHandler.js | 8 ++++++-- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 2a33a984d..249eff6d5 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -901,10 +901,16 @@ router other_token_data, ); // TODO: the call site and function and docs will need to be updated if changes are made to assumed data // GLOBUS_TRANSFER parse currently assumes uuid and scopes exist - let globus_collection = g_db.globus_coll.exists({ - _key: collection_search_key, - }); - if (!globus_collection) { + let globus_collection; + if ( + g_db.globus_coll.exists({ + _key: collection_search_key, + }) + ) { + globus_collection = g_db.globus_coll.document({ + _key: collection_search_key, + }); + } else { globus_collection = g_db.globus_coll.save({ _key: collection_search_key, name: "Newly Inserted Collection", diff --git a/web/services/auth/ConsentHandler.js b/web/services/auth/ConsentHandler.js index 9cee4b3ae..e80f0e81e 100644 --- a/web/services/auth/ConsentHandler.js +++ b/web/services/auth/ConsentHandler.js @@ -24,9 +24,13 @@ export const generateConsentURL = ( query_params, state, ) => { - const scopes = requested_scopes || ["openid", "profile", "email", "urn:globus:auth:scope:transfer.api.globus.org:all"]; + const scopes = Array.isArray(requested_scopes) + ? requested_scopes + : typeof requested_scopes === "string" + ? [requested_scopes] + : ["openid", "profile", "email", "urn:globus:auth:scope:transfer.api.globus.org:all"]; - if (refresh_tokens) { + if (refresh_tokens && !scopes.includes("offline_access")) { scopes.push("offline_access"); } From 0fb8771004f45d4811b457fd3c82d8c7a127639b Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Tue, 20 Jan 2026 05:07:07 -0500 Subject: [PATCH 27/65] fix(auth): resolve infinite consent loop via db, race condition, and scope fixes --- core/database/foxx/api/user_router.js | 2 +- web/datafed-ws.js | 66 +++++++++++++++++++-------- 2 files changed, 49 insertions(+), 19 deletions(-) diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 249eff6d5..01d2128e5 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -931,7 +931,7 @@ router _from: user_id, // the uid field _to: globus_collection._id, type: token_type, - dependent_scopes: scopes, + dependent_scopes: scopes || globus_collection.required_scopes, request_time: Math.floor(Date.now() / 1000), last_used: Math.floor(Date.now() / 1000), status: diff --git a/web/datafed-ws.js b/web/datafed-ws.js index e452d4bc6..d07be3582 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -641,15 +641,26 @@ the registration page. xfr_token.refresh_token, xfr_token.expires_in, optional_data, + (err) => { + if (err) { + redirect_path = "/ui/error"; + logger.error( + "/ui/authn", + getCurrentLineNumber(), + err, + ); + delete a_req.session.collection_id; + } + // TODO Account may be disable from SDMS (active = false) + a_resp.redirect(redirect_path); + }, ); } catch (err) { redirect_path = "/ui/error"; logger.error("/ui/authn", getCurrentLineNumber(), err); delete a_req.session.collection_id; + a_resp.redirect(redirect_path); } - - // TODO Account may be disable from SDMS (active = false) - a_resp.redirect(redirect_path); } }, ); @@ -751,25 +762,32 @@ app.get("/api/usr/register", (a_req, a_resp) => { a_req.session.acc_tok, a_req.session.ref_tok, a_req.session.acc_tok_ttl, + {}, + (err) => { + if (err) { + logger.error("/api/usr/register", getCurrentLineNumber(), err); + a_resp.status(500).send("Registration failed during token set"); + return; + } + // Remove data not needed for active session + delete a_req.session.name; + delete a_req.session.email; + delete a_req.session.uuids; + delete a_req.session.acc_tok; + delete a_req.session.acc_tok_ttl; + delete a_req.session.ref_tok; + delete a_req.session.uuids; + + // Set session as registered user + a_req.session.reg = true; + + a_resp.send(reply); + }, ); } catch (err) { logger.error("/api/usr/register", getCurrentLineNumber(), err); throw err; - } finally { - // Remove data not needed for active session - delete a_req.session.name; - delete a_req.session.email; - delete a_req.session.uuids; - delete a_req.session.acc_tok; - delete a_req.session.acc_tok_ttl; - delete a_req.session.ref_tok; - delete a_req.session.uuids; } - - // Set session as registered user - a_req.session.reg = true; - - a_resp.send(reply); } }, ); @@ -2005,7 +2023,14 @@ app.get("/ui/theme/save", (a_req, a_resp) => { * * @throws Error - When a reply is not received from sendMessageDirect */ -function setAccessToken(a_uid, a_acc_tok, a_ref_tok, a_expires_sec, token_optional_params = {}) { +function setAccessToken( + a_uid, + a_acc_tok, + a_ref_tok, + a_expires_sec, + token_optional_params = {}, + a_cb = null, +) { logger.info( setAccessToken.name, getCurrentLineNumber(), @@ -2019,8 +2044,13 @@ function setAccessToken(a_uid, a_acc_tok, a_ref_tok, a_expires_sec, token_option // Should be an AckReply if (!reply) { logger.error("setAccessToken", getCurrentLineNumber(), "failed."); + if (a_cb) { + a_cb(new Error("setAccessToken failed")); + return; + } throw new Error("setAccessToken failed"); } + if (a_cb) a_cb(null, reply); }); } From 7df3ff9e92229cbe7bb959ec6f4befb91f6ee895 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Tue, 20 Jan 2026 05:42:39 -0500 Subject: [PATCH 28/65] added debug information --- core/database/foxx/api/user_router.js | 54 ++++++++++++++++++- web/datafed-ws.js | 75 ++++++++++++++++++++++++++- 2 files changed, 126 insertions(+), 3 deletions(-) diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 01d2128e5..49152ac23 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -902,16 +902,32 @@ router ); // TODO: the call site and function and docs will need to be updated if changes are made to assumed data // GLOBUS_TRANSFER parse currently assumes uuid and scopes exist let globus_collection; + + console.log( + "DEBUG: GLOBUS_TRANSFER - Starting token set. Collection Search Key:", + collection_search_key, + "Scopes:", + scopes, + ); + if ( g_db.globus_coll.exists({ _key: collection_search_key, }) ) { + console.log( + "DEBUG: Collection exists. Fetching document for key:", + collection_search_key, + ); globus_collection = g_db.globus_coll.document({ _key: collection_search_key, }); } else { - globus_collection = g_db.globus_coll.save({ + console.log( + "DEBUG: Collection does not exist. Creating new collection for key:", + collection_search_key, + ); + const meta = g_db.globus_coll.save({ _key: collection_search_key, name: "Newly Inserted Collection", description: "The collection description", @@ -922,7 +938,43 @@ router type: "mapped", // mapped/guest TODO: to be pulled from token data on follow-up ticket ha_enabled: false, // boolean - TODO: to be pulled from token data on follow-up ticket }); + console.log("DEBUG: Collection created. Metadata:", meta); + globus_collection = g_db.globus_coll.document(meta); } + + console.log( + "DEBUG: Globus Collection Object:", + globus_collection, + ); + + const token_key = + globus_collection._key + "_" + token_type + "_" + user_doc._key; + + const dependent_scopes_val = scopes || globus_collection.required_scopes; + console.log("DEBUG: Calculated dependent_scopes:", dependent_scopes_val); + + const token_doc = { + _key: token_key, + _from: user_id, + _to: globus_collection._id, + type: token_type, + dependent_scopes: dependent_scopes_val, + request_time: Math.floor(Date.now() / 1000), + last_used: Math.floor(Date.now() / 1000), + status: + obj.expiration > Math.floor(Date.now() / 1000) + ? "active" + : "inactive", + ...obj, + }; + + console.log("DEBUG: Saving Token Document:", token_doc); + + const token_doc_upsert = g_db.globus_token.insert(token_doc, { + overwriteMode: "replace", + }); + console.log("DEBUG: Token Insert Result:", token_doc_upsert); + break; const token_key = globus_collection._key + "_" + token_type + "_" + user_doc._key; diff --git a/web/datafed-ws.js b/web/datafed-ws.js index d07be3582..1ef19bdc0 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -312,8 +312,33 @@ function storeCollectionId(req, res, next) { req.session.collection_id = req.query.collection_id; // TODO: assuming collection is specifically mapped and not HA/other variants req.session.collection_type = "mapped"; + logger.info( + "storeCollectionId", + getCurrentLineNumber(), + "DEBUG: Storing Collection ID: " + + req.query.collection_id + + " to session.", + ); + req.session.save((err) => { + if (err) { + logger.error( + "storeCollectionId", + getCurrentLineNumber(), + "DEBUG: Session save error:", + err, + ); + } else { + logger.info( + "storeCollectionId", + getCurrentLineNumber(), + "DEBUG: Session saved successfully.", + ); + } + next(); + }); + } else { + next(); } - next(); } app.use(cookieParser(g_session_secret)); @@ -619,6 +644,15 @@ the registration page. a_req.session.uid = username; a_req.session.reg = true; + logger.info( + "/ui/authn", + getCurrentLineNumber(), + "DEBUG: Session Updated. UID: " + + username + + ", CollectionID: " + + a_req.session.collection_id, + ); + let redirect_path = "/ui/main"; // Note: context/optional params for arbitrary input @@ -628,13 +662,29 @@ the registration page. collection_id: a_req.session.collection_id, scope: xfr_token.scope, }; + logger.info( + "/ui/authn", + getCurrentLineNumber(), + "DEBUG: Token Context Constructed: " + JSON.stringify(token_context), + ); try { const optional_data = token_handler.constructOptionalData(token_context); + logger.info( + "/ui/authn", + getCurrentLineNumber(), + "DEBUG: Optional Data Constructed: " + JSON.stringify(optional_data), + ); + // Refresh Globus access & refresh tokens to Core/DB // NOTE: core services seem entirely in charge of refreshing tokens once they are set (ClientWorker.cpp). // This should only be triggered when new tokens are coming in, like when a token expires or a transfer token is created. + logger.info( + "/ui/authn", + getCurrentLineNumber(), + "DEBUG: Calling setAccessToken...", + ); setAccessToken( a_req.session.uid, xfr_token.access_token, @@ -647,14 +697,35 @@ the registration page. logger.error( "/ui/authn", getCurrentLineNumber(), - err, + "DEBUG: setAccessToken Failed: " + err, ); delete a_req.session.collection_id; + } else { + logger.info( + "/ui/authn", + getCurrentLineNumber(), + "DEBUG: setAccessToken Success. Redirecting to " + + redirect_path, + ); } // TODO Account may be disable from SDMS (active = false) a_resp.redirect(redirect_path); }, ); + } catch (err) { + redirect_path = "/ui/error"; + logger.error( + "/ui/authn", + getCurrentLineNumber(), + "DEBUG: Exception in token handling: " + err, + ); + delete a_req.session.collection_id; + a_resp.redirect(redirect_path); + } + // TODO Account may be disable from SDMS (active = false) + a_resp.redirect(redirect_path); + }, + ); } catch (err) { redirect_path = "/ui/error"; logger.error("/ui/authn", getCurrentLineNumber(), err); From 662d51809ddb62348becc9fe0fb455a3a0d81588 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Tue, 20 Jan 2026 05:57:16 -0500 Subject: [PATCH 29/65] removed duplicated code --- core/database/foxx/api/user_router.js | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 49152ac23..534fd4496 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -976,27 +976,6 @@ router console.log("DEBUG: Token Insert Result:", token_doc_upsert); break; - const token_key = - globus_collection._key + "_" + token_type + "_" + user_doc._key; - const token_doc = { - _key: token_key, - _from: user_id, // the uid field - _to: globus_collection._id, - type: token_type, - dependent_scopes: scopes || globus_collection.required_scopes, - request_time: Math.floor(Date.now() / 1000), - last_used: Math.floor(Date.now() / 1000), - status: - obj.expiration > Math.floor(Date.now() / 1000) - ? "active" - : "inactive", - ...obj, - }; - - const token_doc_upsert = g_db.globus_token.insert(token_doc, { - overwriteMode: "replace", // TODO: perhaps use 'update' and specify values for true upsert. - }); - break; } case g_lib.AccessTokenType.GLOBUS_DEFAULT: { // Existing logic, default actions From a71c1296237070927c97372926f49d9e30f7d09a Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Tue, 20 Jan 2026 06:01:02 -0500 Subject: [PATCH 30/65] removed error --- web/datafed-ws.js | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 1ef19bdc0..8bf9237e0 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -315,9 +315,7 @@ function storeCollectionId(req, res, next) { logger.info( "storeCollectionId", getCurrentLineNumber(), - "DEBUG: Storing Collection ID: " + - req.query.collection_id + - " to session.", + "DEBUG: Storing Collection ID: " + req.query.collection_id + " to session.", ); req.session.save((err) => { if (err) { @@ -665,7 +663,8 @@ the registration page. logger.info( "/ui/authn", getCurrentLineNumber(), - "DEBUG: Token Context Constructed: " + JSON.stringify(token_context), + "DEBUG: Token Context Constructed: " + + JSON.stringify(token_context), ); try { const optional_data = @@ -674,7 +673,8 @@ the registration page. logger.info( "/ui/authn", getCurrentLineNumber(), - "DEBUG: Optional Data Constructed: " + JSON.stringify(optional_data), + "DEBUG: Optional Data Constructed: " + + JSON.stringify(optional_data), ); // Refresh Globus access & refresh tokens to Core/DB @@ -721,16 +721,6 @@ the registration page. ); delete a_req.session.collection_id; a_resp.redirect(redirect_path); - } - // TODO Account may be disable from SDMS (active = false) - a_resp.redirect(redirect_path); - }, - ); - } catch (err) { - redirect_path = "/ui/error"; - logger.error("/ui/authn", getCurrentLineNumber(), err); - delete a_req.session.collection_id; - a_resp.redirect(redirect_path); } } }, From 30345c683d9210f82852239519ae15801240d158 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Tue, 20 Jan 2026 11:01:32 +0000 Subject: [PATCH 31/65] chore: Auto-format JavaScript files with Prettier --- core/database/foxx/api/user_router.js | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 534fd4496..52bd8435b 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -941,17 +941,18 @@ router console.log("DEBUG: Collection created. Metadata:", meta); globus_collection = g_db.globus_coll.document(meta); } - - console.log( - "DEBUG: Globus Collection Object:", - globus_collection, - ); + + console.log("DEBUG: Globus Collection Object:", globus_collection); const token_key = globus_collection._key + "_" + token_type + "_" + user_doc._key; - - const dependent_scopes_val = scopes || globus_collection.required_scopes; - console.log("DEBUG: Calculated dependent_scopes:", dependent_scopes_val); + + const dependent_scopes_val = + scopes || globus_collection.required_scopes; + console.log( + "DEBUG: Calculated dependent_scopes:", + dependent_scopes_val, + ); const token_doc = { _key: token_key, @@ -975,7 +976,6 @@ router }); console.log("DEBUG: Token Insert Result:", token_doc_upsert); break; - } case g_lib.AccessTokenType.GLOBUS_DEFAULT: { // Existing logic, default actions From 0e1c78d641d892fbbf732998e014b5b4341d8253 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Tue, 20 Jan 2026 06:20:09 -0500 Subject: [PATCH 32/65] try client fix --- core/database/foxx/api/support.js | 4 ++++ core/database/foxx/api/user_router.js | 14 ++++++++------ web/datafed-ws.js | 10 ++++++++++ 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/core/database/foxx/api/support.js b/core/database/foxx/api/support.js index 8f7ec612e..19b99202f 100644 --- a/core/database/foxx/api/support.js +++ b/core/database/foxx/api/support.js @@ -623,6 +623,10 @@ module.exports = (function () { // Client ID can be an SDMS uname (xxxxx...), a UUID (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx), or an account (domain.uname) // UUID are defined by length and format, accounts have a "." (and known domains), SDMS unames have no "." or "-" characters + if (!a_client_id) { + throw [error.ERR_INVALID_PARAM, "Client ID must be provided"]; + } + var params; console.log("getUserFromClient id: ", a_client_id); diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 52bd8435b..4e0ced69a 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -1890,10 +1890,9 @@ router router .get("/ep/get", function (req, res) { + let client = null; let first = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); - first = client?.eps.length ? client?.eps[0] : undefined; logger.logRequestStarted({ client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], @@ -1902,6 +1901,8 @@ router status: "Started", description: "Get recent end-points", }); + client = g_lib.getUserFromClientID(req.queryParams.client); + first = client.eps && client.eps.length ? client.eps[0] : undefined; res.send(client.eps ? client.eps : []); logger.logRequestSuccess({ @@ -1911,7 +1912,7 @@ router routePath: basePath + "/ep/get", status: "Success", description: "Get recent end-points", - extra: { most_recent: first, count: client?.eps?.length }, + extra: { most_recent: first, count: client.eps ? client.eps.length : 0 }, }); } catch (e) { logger.logRequestFailure({ @@ -1921,7 +1922,7 @@ router routePath: basePath + "/ep/get", status: "Failure", description: "Get recent end-points", - extra: { most_recent: first, count: client?.eps?.length }, + extra: { most_recent: first, count: client && client.eps ? client.eps.length : 0 }, error: e, }); g_lib.handleException(e, res); @@ -1933,8 +1934,8 @@ router router .get("/ep/set", function (req, res) { + let client = null; try { - const client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], @@ -1943,6 +1944,7 @@ router status: "Started", description: "Set recent end-points", }); + client = g_lib.getUserFromClientID(req.queryParams.client); g_db._update( client._id, { @@ -1969,7 +1971,7 @@ router routePath: basePath + "/ep/set", status: "Failure", description: "Set recent end-points", - extra: client.eps, + extra: client && client.eps ? client.eps : undefined, error: e, }); g_lib.handleException(e, res); diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 8bf9237e0..7056ac51c 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -623,6 +623,16 @@ the registration page. ); } let username = reply.user[0]?.uid?.replace(/^u\//, ""); + if (!username) { + logger.error( + "/ui/authn", + getCurrentLineNumber(), + "Error: User identity found but UID is missing or invalid.", + reply.user, + ); + a_resp.redirect("/ui/error"); + return; + } logger.info( "/ui/authn", getCurrentLineNumber(), From 5a6d17ec4aaab4f057c7442727bc33a7775f7139 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 12:02:49 -0500 Subject: [PATCH 33/65] added handler for globus default token --- .gitignore | 2 +- web/services/auth/TokenHandler.js | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index b5450c6c5..468b4bfa3 100644 --- a/.gitignore +++ b/.gitignore @@ -52,7 +52,7 @@ scripts/globus/mapping.json scripts/admin_datafed_backup.sh scripts/admin_refresh_certs.sh scripts/globus/__pycache__ -services/ +/services/ tests/mock_core/Version.hpp tmp web/SDMS.proto diff --git a/web/services/auth/TokenHandler.js b/web/services/auth/TokenHandler.js index ac678be3e..370b6181d 100644 --- a/web/services/auth/TokenHandler.js +++ b/web/services/auth/TokenHandler.js @@ -167,6 +167,14 @@ export default class OAuthTokenHandler { break; } case AccessTokenType.GLOBUS_DEFAULT: { + if (token_context.collection_id) { + const { collection_id, scope } = token_context; + if (!scope) { + throw new Error("Transfer token received without scope context"); + } + optional_data.type = AccessTokenType.GLOBUS_TRANSFER; + optional_data.other = collection_id + "|" + scope; + } break; } default: { From e214ba0a5a3077906417e79736ef43f6c06962cf Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 12:20:02 -0500 Subject: [PATCH 34/65] single domain req --- web/datafed-ws.js | 14 +++++++++++++- .../components/endpoint-browse/index.js | 19 +++++++++++++++++-- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 7056ac51c..1de280b5a 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -1665,7 +1665,19 @@ app.post("/api/cat/search", (a_req, a_resp) => { }); app.get("/api/globus/consent_url", storeCollectionId, (a_req, a_resp) => { - const { requested_scopes, state, refresh_tokens, query_params } = a_req.query; + let { requested_scopes, state, refresh_tokens, query_params } = a_req.query; + + if (typeof query_params === "string") { + try { + query_params = JSON.parse(query_params); + } catch (e) { + logger.error( + "/api/globus/consent_url", + getCurrentLineNumber(), + "Failed to parse query_params: " + e, + ); + } + } const consent_url = generateConsentURL( g_oauth_credentials.clientId, diff --git a/web/static/components/endpoint-browse/index.js b/web/static/components/endpoint-browse/index.js index 17910b2ce..38008b3a0 100644 --- a/web/static/components/endpoint-browse/index.js +++ b/web/static/components/endpoint-browse/index.js @@ -302,15 +302,30 @@ class EndpointBrowser { let title; // Generate consent URL for consent required errors if (error instanceof ApiError) { - if (error.code === "ConsentRequired" || error.data?.needs_consent === true) { + // Check for explicit consent requirement OR permission denied with auth parameters (530) + if ( + error.code === "ConsentRequired" || + error.data?.needs_consent === true || + (error.data?.code === "permission_denied" && + error.data?.authorization_parameters?.session_required_single_domain) + ) { const data = await new Promise((resolve) => { + // Extract query params from authorization_parameters if available + const queryParams = {}; + if (error.data?.authorization_parameters?.session_required_single_domain) { + queryParams.session_required_single_domain = + error.data.authorization_parameters.session_required_single_domain; + } + api.getGlobusConsentURL( (_, data) => resolve(data), this.props.endpoint.id, error.data.required_scopes, + false, // refresh_tokens + queryParams, ); }); - title = `Consent Required: Please provide consent.`; + title = `Consent/Login Required: Please login with required identity.`; } else { title = `Error: ${ error.data.message || "Unknown API error" From f424480f4968d2423924ca23696317762564691e Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 12:33:40 -0500 Subject: [PATCH 35/65] added state restore feature --- web/datafed-ws.js | 34 +++++++++++++++++++ .../components/endpoint-browse/index.js | 10 ++++++ web/static/main.js | 10 ++++++ web/views/main.ect | 3 +- 4 files changed, 56 insertions(+), 1 deletion(-) diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 1de280b5a..5ce28870f 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -412,12 +412,21 @@ app.get("/ui/main", (a_req, a_resp) => { const nonce = crypto.randomBytes(16).toString("base64"); a_resp.locals.nonce = nonce; a_resp.setHeader("Content-Security-Policy", `script-src 'nonce-${nonce}'`); + + // Extract restore_state from session if present + let restore_state = null; + if (a_req.session.restore_state) { + restore_state = JSON.stringify(a_req.session.restore_state); + delete a_req.session.restore_state; + } + a_resp.render("main", { nonce: a_resp.locals.nonce, user_uid: a_req.session.uid, theme: theme, version: g_version, test_mode: g_test, + restore_state: restore_state, ...g_google_analytics, }); } else { @@ -652,6 +661,31 @@ the registration page. a_req.session.uid = username; a_req.session.reg = true; + if (a_req.query.state) { + try { + const state_obj = JSON.parse(a_req.query.state); + // Validate state structure to prevent arbitrary session pollution + if ( + state_obj.endpoint_browser || + state_obj.restore_state + ) { + a_req.session.restore_state = state_obj; + logger.info( + "/ui/authn", + getCurrentLineNumber(), + "Restorable state found and saved to session.", + ); + } + } catch (e) { + // State was not JSON or valid, ignore + logger.warning( + "/ui/authn", + getCurrentLineNumber(), + "Failed to parse state parameter: " + e, + ); + } + } + logger.info( "/ui/authn", getCurrentLineNumber(), diff --git a/web/static/components/endpoint-browse/index.js b/web/static/components/endpoint-browse/index.js index 38008b3a0..729b5b404 100644 --- a/web/static/components/endpoint-browse/index.js +++ b/web/static/components/endpoint-browse/index.js @@ -317,12 +317,22 @@ class EndpointBrowser { error.data.authorization_parameters.session_required_single_domain; } + // Serialize current state for restoration after consent flow + const state = JSON.stringify({ + endpoint_browser: { + endpoint: this.props.endpoint.rawData, + path: this.state.path, + mode: this.props.mode, + }, + }); + api.getGlobusConsentURL( (_, data) => resolve(data), this.props.endpoint.id, error.data.required_scopes, false, // refresh_tokens queryParams, + state, ); }); title = `Consent/Login Required: Please login with required identity.`; diff --git a/web/static/main.js b/web/static/main.js index 4bf2964bb..389513a04 100644 --- a/web/static/main.js +++ b/web/static/main.js @@ -61,6 +61,16 @@ $(document).ready(function () { browser_tab.init(); + // Restore state if present + if (tmpl_data.restore_state && tmpl_data.restore_state.endpoint_browser) { + import("/components/endpoint-browse/index.js").then((module) => { + const { endpoint, path, mode } = tmpl_data.restore_state.endpoint_browser; + module.show(endpoint, path, mode, (selectedPath) => { + console.log("Restored selection:", selectedPath); + }); + }); + } + util.setStatusText("DataFed Ready"); } else { dialogs.dlgAlert("System Error", "Unable to access user record"); diff --git a/web/views/main.ect b/web/views/main.ect index 0368e5d79..35894c412 100644 --- a/web/views/main.ect +++ b/web/views/main.ect @@ -7,7 +7,8 @@ { "test_mode" : "<%- @test_mode %>", "user_uid" : "<%- @user_uid %>", - "theme" : "<%- @theme %>" + "theme" : "<%- @theme %>", + "restore_state" : <%- @restore_state || 'null' %> } From 760f4bafbfd8e0e3f813f302e3329f01f226d770 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 13:14:14 -0500 Subject: [PATCH 36/65] added upload data modal restore feature --- .../components/endpoint-browse/index.js | 24 +++++++++++++-- web/static/main.js | 30 ++++++++++++++----- 2 files changed, 45 insertions(+), 9 deletions(-) diff --git a/web/static/components/endpoint-browse/index.js b/web/static/components/endpoint-browse/index.js index 729b5b404..96150537a 100644 --- a/web/static/components/endpoint-browse/index.js +++ b/web/static/components/endpoint-browse/index.js @@ -318,13 +318,33 @@ class EndpointBrowser { } // Serialize current state for restoration after consent flow - const state = JSON.stringify({ + const stateObj = { endpoint_browser: { endpoint: this.props.endpoint.rawData, path: this.state.path, mode: this.props.mode, }, - }); + }; + + // Check if "New Data Record" dialog is open and save its state + const new_data_dlg = $("#d_new_edit"); + if (new_data_dlg.length && new_data_dlg.dialog("isOpen")) { + const metadata_editor = ace.edit(new_data_dlg.find("#md")[0]); + stateObj.parent_dialog = { + type: "d_new_edit", + mode: 0, // DLG_DATA_MODE_NEW + data: { + title: new_data_dlg.find("#title").val(), + alias: new_data_dlg.find("#alias").val(), + desc: new_data_dlg.find("#desc").val(), + metadata: metadata_editor ? metadata_editor.getValue() : "", + // Use parentId for the collection + parentId: new_data_dlg.find("#coll").val(), + }, + }; + } + + const state = JSON.stringify(stateObj); api.getGlobusConsentURL( (_, data) => resolve(data), diff --git a/web/static/main.js b/web/static/main.js index 389513a04..1f741e292 100644 --- a/web/static/main.js +++ b/web/static/main.js @@ -61,14 +61,30 @@ $(document).ready(function () { browser_tab.init(); - // Restore state if present - if (tmpl_data.restore_state && tmpl_data.restore_state.endpoint_browser) { - import("/components/endpoint-browse/index.js").then((module) => { - const { endpoint, path, mode } = tmpl_data.restore_state.endpoint_browser; - module.show(endpoint, path, mode, (selectedPath) => { - console.log("Restored selection:", selectedPath); + if (tmpl_data.restore_state) { + if ( + tmpl_data.restore_state.parent_dialog && + tmpl_data.restore_state.parent_dialog.type === "d_new_edit" + ) { + import("/dlg_data_new_edit.js").then((module) => { + const { mode, data } = tmpl_data.restore_state.parent_dialog; + module.show(mode, data, data.parentId); }); - }); + } + + if (tmpl_data.restore_state.endpoint_browser) { + import("/components/endpoint-browse/index.js").then((module) => { + const { endpoint, path, mode } = tmpl_data.restore_state.endpoint_browser; + module.show(endpoint, path, mode, (selectedPath) => { + const new_data_dlg = $("#d_new_edit"); + if (new_data_dlg.length && new_data_dlg.dialog("isOpen")) { + new_data_dlg.find("#source_file").val(selectedPath); + } else { + console.log("Restored selection:", selectedPath); + } + }); + }); + } } util.setStatusText("DataFed Ready"); From af34dfd066f5bc2ef1d7e97fc2d396a68b3220a3 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 13:27:18 -0500 Subject: [PATCH 37/65] (hopefully) fixed session restore feature --- .../components/endpoint-browse/index.js | 14 ++++++ .../transfer/transfer-ui-manager.js | 2 + web/static/dlg_data_new_edit.js | 2 +- web/static/main.js | 49 ++++++++++++++----- 4 files changed, 55 insertions(+), 12 deletions(-) diff --git a/web/static/components/endpoint-browse/index.js b/web/static/components/endpoint-browse/index.js index 96150537a..3cbd4c601 100644 --- a/web/static/components/endpoint-browse/index.js +++ b/web/static/components/endpoint-browse/index.js @@ -328,6 +328,8 @@ class EndpointBrowser { // Check if "New Data Record" dialog is open and save its state const new_data_dlg = $("#d_new_edit"); + const transfer_dlg_content = $("#records").closest(".ui-dialog-content"); + if (new_data_dlg.length && new_data_dlg.dialog("isOpen")) { const metadata_editor = ace.edit(new_data_dlg.find("#md")[0]); stateObj.parent_dialog = { @@ -342,6 +344,18 @@ class EndpointBrowser { parentId: new_data_dlg.find("#coll").val(), }, }; + } else if ( + transfer_dlg_content.length && + transfer_dlg_content.dialog("isOpen") + ) { + const controller = transfer_dlg_content.data("controller"); + if (controller) { + stateObj.parent_dialog = { + type: "transfer", + mode: controller.model.mode, + records: controller.ids, + }; + } } const state = JSON.stringify(stateObj); diff --git a/web/static/components/transfer/transfer-ui-manager.js b/web/static/components/transfer/transfer-ui-manager.js index d290b5af8..4d80f58ff 100644 --- a/web/static/components/transfer/transfer-ui-manager.js +++ b/web/static/components/transfer/transfer-ui-manager.js @@ -222,6 +222,8 @@ export class TransferUIManager { createDialog(labels) { this.state.frame = $(document.createElement("div")); this.state.frame.html(getDialogTemplate(labels, this.#controller.model.mode)); + // Attach controller to frame for state persistence + this.state.frame.data("controller", this.#controller); return this.state.frame; } diff --git a/web/static/dlg_data_new_edit.js b/web/static/dlg_data_new_edit.js index f354cf6f8..36a2ac07f 100644 --- a/web/static/dlg_data_new_edit.js +++ b/web/static/dlg_data_new_edit.js @@ -16,7 +16,7 @@ const DLG_DATA_BTN_LABEL = ["Create", "Update", "Create"]; export function show(a_mode, a_data, a_parent, a_upd_perms, a_cb) { var ele = document.createElement("div"); - ele.id = (a_data ? a_data.id.replace("/", "_") : "d_new") + "_edit"; + ele.id = (a_data && a_data.id ? a_data.id.replace("/", "_") : "d_new") + "_edit"; var frame = $(ele), dlg_inst, jsoned, diff --git a/web/static/main.js b/web/static/main.js index 1f741e292..4f6bdf7a4 100644 --- a/web/static/main.js +++ b/web/static/main.js @@ -62,23 +62,50 @@ $(document).ready(function () { browser_tab.init(); if (tmpl_data.restore_state) { - if ( - tmpl_data.restore_state.parent_dialog && - tmpl_data.restore_state.parent_dialog.type === "d_new_edit" - ) { - import("/dlg_data_new_edit.js").then((module) => { - const { mode, data } = tmpl_data.restore_state.parent_dialog; - module.show(mode, data, data.parentId); - }); + const parentState = tmpl_data.restore_state.parent_dialog; + + if (parentState) { + if (parentState.type === "d_new_edit") { + import("/dlg_data_new_edit.js").then((module) => { + const { mode, data } = parentState; + module.show(mode, data, data.parentId); + }); + } else if (parentState.type === "transfer") { + import("/components/transfer/index.js").then((module) => { + const { mode, records } = parentState; + // Re-open transfer dialog + module.transferDialog.show(mode, records, () => { + // Default callback if needed, usually this refreshes view + // but we might not have context. + console.log("Restored transfer dialog completed"); + }); + }); + } } if (tmpl_data.restore_state.endpoint_browser) { import("/components/endpoint-browse/index.js").then((module) => { const { endpoint, path, mode } = tmpl_data.restore_state.endpoint_browser; module.show(endpoint, path, mode, (selectedPath) => { - const new_data_dlg = $("#d_new_edit"); - if (new_data_dlg.length && new_data_dlg.dialog("isOpen")) { - new_data_dlg.find("#source_file").val(selectedPath); + // Update parent dialog if open + if (parentState?.type === "d_new_edit") { + const new_data_dlg = $("#d_new_edit"); + if (new_data_dlg.length && new_data_dlg.dialog("isOpen")) { + new_data_dlg.find("#source_file").val(selectedPath); + } + } else if (parentState?.type === "transfer") { + // Update path in transfer dialog + const transfer_dlg_content = $("#records").closest( + ".ui-dialog-content", + ); + if ( + transfer_dlg_content.length && + transfer_dlg_content.dialog("isOpen") + ) { + transfer_dlg_content.find("#path").val(selectedPath); + // Trigger input event to update browse logic if needed + // But typically just setting value is enough for user confirmation + } } else { console.log("Restored selection:", selectedPath); } From 13cc3a4f6f44d0fbe6e46c864a1dacd40dedca9e Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 18:28:02 +0000 Subject: [PATCH 38/65] chore: Auto-format JavaScript files with Prettier --- web/static/main.js | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/web/static/main.js b/web/static/main.js index 4f6bdf7a4..24646c092 100644 --- a/web/static/main.js +++ b/web/static/main.js @@ -95,9 +95,8 @@ $(document).ready(function () { } } else if (parentState?.type === "transfer") { // Update path in transfer dialog - const transfer_dlg_content = $("#records").closest( - ".ui-dialog-content", - ); + const transfer_dlg_content = + $("#records").closest(".ui-dialog-content"); if ( transfer_dlg_content.length && transfer_dlg_content.dialog("isOpen") From 658dec899e5f20722e4db461f2273f645d2eff9f Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 13:36:41 -0500 Subject: [PATCH 39/65] dont overwrite path --- web/static/main.js | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/web/static/main.js b/web/static/main.js index 24646c092..b3ef865bd 100644 --- a/web/static/main.js +++ b/web/static/main.js @@ -101,13 +101,26 @@ $(document).ready(function () { transfer_dlg_content.length && transfer_dlg_content.dialog("isOpen") ) { - transfer_dlg_content.find("#path").val(selectedPath); - // Trigger input event to update browse logic if needed - // But typically just setting value is enough for user confirmation + const currentVal = transfer_dlg_content.find("#path").val(); + const prefix = + endpoint.name && + currentVal.startsWith(endpoint.name) + ? endpoint.name + : ""; + + let newVal = selectedPath; + if (prefix && !selectedPath.startsWith(prefix)) { + newVal = prefix + selectedPath; + } + + transfer_dlg_content.find("#path").val(newVal); } } else { console.log("Restored selection:", selectedPath); } + } else { + console.log("Restored selection:", selectedPath); + } }); }); } From 248d8b916d8faff383e7001ef7d91f8e5967f0a1 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 13:50:57 -0500 Subject: [PATCH 40/65] fix syntax error --- web/static/main.js | 1 - 1 file changed, 1 deletion(-) diff --git a/web/static/main.js b/web/static/main.js index b3ef865bd..d84395c42 100644 --- a/web/static/main.js +++ b/web/static/main.js @@ -117,7 +117,6 @@ $(document).ready(function () { } } else { console.log("Restored selection:", selectedPath); - } } else { console.log("Restored selection:", selectedPath); } From 428de95c2c779b618b68a29408e3d30ecd95a178 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 13:55:29 -0500 Subject: [PATCH 41/65] fix syntax error 2 --- web/static/main.js | 2 -- 1 file changed, 2 deletions(-) diff --git a/web/static/main.js b/web/static/main.js index d84395c42..758e94178 100644 --- a/web/static/main.js +++ b/web/static/main.js @@ -117,8 +117,6 @@ $(document).ready(function () { } } else { console.log("Restored selection:", selectedPath); - } else { - console.log("Restored selection:", selectedPath); } }); }); From 26e06a88b44708b34a10addd21b3865392d8803f Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 18:56:18 +0000 Subject: [PATCH 42/65] chore: Auto-format JavaScript files with Prettier --- web/static/main.js | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/web/static/main.js b/web/static/main.js index 758e94178..0553f9ae9 100644 --- a/web/static/main.js +++ b/web/static/main.js @@ -103,16 +103,15 @@ $(document).ready(function () { ) { const currentVal = transfer_dlg_content.find("#path").val(); const prefix = - endpoint.name && - currentVal.startsWith(endpoint.name) + endpoint.name && currentVal.startsWith(endpoint.name) ? endpoint.name : ""; - + let newVal = selectedPath; if (prefix && !selectedPath.startsWith(prefix)) { - newVal = prefix + selectedPath; + newVal = prefix + selectedPath; } - + transfer_dlg_content.find("#path").val(newVal); } } else { From d2072ce5680dbbc1633c02d6ccd426ce10f9e631 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 14:08:14 -0500 Subject: [PATCH 43/65] change how endpoint id is restored --- web/static/main.js | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/web/static/main.js b/web/static/main.js index 0553f9ae9..6e6f85b7d 100644 --- a/web/static/main.js +++ b/web/static/main.js @@ -101,18 +101,32 @@ $(document).ready(function () { transfer_dlg_content.length && transfer_dlg_content.dialog("isOpen") ) { - const currentVal = transfer_dlg_content.find("#path").val(); - const prefix = - endpoint.name && currentVal.startsWith(endpoint.name) - ? endpoint.name - : ""; + const controller = transfer_dlg_content.data("controller"); + const epName = + endpoint.canonical_name || endpoint.name || endpoint.id; + + if (controller && epName) { + // Update controller state to match the browsed endpoint + if ( + !controller.endpointManager.state.currentEndpoint || + controller.endpointManager.state.currentEndpoint.id !== + endpoint.id + ) { + controller.endpointManager.state.currentEndpoint = { + ...endpoint, + name: epName, + }; + } + } + // Construct full path + // Browse callback returns path with leading slash usually let newVal = selectedPath; - if (prefix && !selectedPath.startsWith(prefix)) { - newVal = prefix + selectedPath; + if (epName && !selectedPath.startsWith(epName)) { + newVal = epName + selectedPath; } - transfer_dlg_content.find("#path").val(newVal); + transfer_dlg_content.find("#path").val(newVal).trigger("input"); } } else { console.log("Restored selection:", selectedPath); From ab08cecb2ac0fdcbf82a1b72e312c2d5f6133f4c Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 14:16:01 -0500 Subject: [PATCH 44/65] removed endpoint browser restore --- web/static/main.js | 52 ---------------------------------------------- 1 file changed, 52 deletions(-) diff --git a/web/static/main.js b/web/static/main.js index 6e6f85b7d..97450735f 100644 --- a/web/static/main.js +++ b/web/static/main.js @@ -82,58 +82,6 @@ $(document).ready(function () { }); } } - - if (tmpl_data.restore_state.endpoint_browser) { - import("/components/endpoint-browse/index.js").then((module) => { - const { endpoint, path, mode } = tmpl_data.restore_state.endpoint_browser; - module.show(endpoint, path, mode, (selectedPath) => { - // Update parent dialog if open - if (parentState?.type === "d_new_edit") { - const new_data_dlg = $("#d_new_edit"); - if (new_data_dlg.length && new_data_dlg.dialog("isOpen")) { - new_data_dlg.find("#source_file").val(selectedPath); - } - } else if (parentState?.type === "transfer") { - // Update path in transfer dialog - const transfer_dlg_content = - $("#records").closest(".ui-dialog-content"); - if ( - transfer_dlg_content.length && - transfer_dlg_content.dialog("isOpen") - ) { - const controller = transfer_dlg_content.data("controller"); - const epName = - endpoint.canonical_name || endpoint.name || endpoint.id; - - if (controller && epName) { - // Update controller state to match the browsed endpoint - if ( - !controller.endpointManager.state.currentEndpoint || - controller.endpointManager.state.currentEndpoint.id !== - endpoint.id - ) { - controller.endpointManager.state.currentEndpoint = { - ...endpoint, - name: epName, - }; - } - } - - // Construct full path - // Browse callback returns path with leading slash usually - let newVal = selectedPath; - if (epName && !selectedPath.startsWith(epName)) { - newVal = epName + selectedPath; - } - - transfer_dlg_content.find("#path").val(newVal).trigger("input"); - } - } else { - console.log("Restored selection:", selectedPath); - } - }); - }); - } } util.setStatusText("DataFed Ready"); From 1d12612126a0d000893ecc1f032a99c669147bad Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 14:36:15 -0500 Subject: [PATCH 45/65] remove debug info --- web/datafed-ws.js | 26 ++++---------------------- web/static/main.js | 1 - 2 files changed, 4 insertions(+), 23 deletions(-) diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 5ce28870f..78290a462 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -689,7 +689,7 @@ the registration page. logger.info( "/ui/authn", getCurrentLineNumber(), - "DEBUG: Session Updated. UID: " + + "Session Updated. UID: " + username + ", CollectionID: " + a_req.session.collection_id, @@ -704,31 +704,13 @@ the registration page. collection_id: a_req.session.collection_id, scope: xfr_token.scope, }; - logger.info( - "/ui/authn", - getCurrentLineNumber(), - "DEBUG: Token Context Constructed: " + - JSON.stringify(token_context), - ); try { const optional_data = token_handler.constructOptionalData(token_context); - logger.info( - "/ui/authn", - getCurrentLineNumber(), - "DEBUG: Optional Data Constructed: " + - JSON.stringify(optional_data), - ); - // Refresh Globus access & refresh tokens to Core/DB // NOTE: core services seem entirely in charge of refreshing tokens once they are set (ClientWorker.cpp). // This should only be triggered when new tokens are coming in, like when a token expires or a transfer token is created. - logger.info( - "/ui/authn", - getCurrentLineNumber(), - "DEBUG: Calling setAccessToken...", - ); setAccessToken( a_req.session.uid, xfr_token.access_token, @@ -741,14 +723,14 @@ the registration page. logger.error( "/ui/authn", getCurrentLineNumber(), - "DEBUG: setAccessToken Failed: " + err, + "setAccessToken Failed: " + err, ); delete a_req.session.collection_id; } else { logger.info( "/ui/authn", getCurrentLineNumber(), - "DEBUG: setAccessToken Success. Redirecting to " + + "setAccessToken Success. Redirecting to " + redirect_path, ); } @@ -761,7 +743,7 @@ the registration page. logger.error( "/ui/authn", getCurrentLineNumber(), - "DEBUG: Exception in token handling: " + err, + "Exception in token handling: " + err, ); delete a_req.session.collection_id; a_resp.redirect(redirect_path); diff --git a/web/static/main.js b/web/static/main.js index 97450735f..7a4e4dd49 100644 --- a/web/static/main.js +++ b/web/static/main.js @@ -77,7 +77,6 @@ $(document).ready(function () { module.transferDialog.show(mode, records, () => { // Default callback if needed, usually this refreshes view // but we might not have context. - console.log("Restored transfer dialog completed"); }); }); } From 0355e62acf7da7b2c09ea0b608908aa21519f722 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Fri, 23 Jan 2026 14:42:10 -0500 Subject: [PATCH 46/65] removed more debug info --- core/database/foxx/api/user_router.js | 25 ------------------------- web/datafed-ws.js | 21 --------------------- web/static/dlg_data_new_edit.js | 1 - 3 files changed, 47 deletions(-) diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 4e0ced69a..440db93f3 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -903,30 +903,15 @@ router // GLOBUS_TRANSFER parse currently assumes uuid and scopes exist let globus_collection; - console.log( - "DEBUG: GLOBUS_TRANSFER - Starting token set. Collection Search Key:", - collection_search_key, - "Scopes:", - scopes, - ); - if ( g_db.globus_coll.exists({ _key: collection_search_key, }) ) { - console.log( - "DEBUG: Collection exists. Fetching document for key:", - collection_search_key, - ); globus_collection = g_db.globus_coll.document({ _key: collection_search_key, }); } else { - console.log( - "DEBUG: Collection does not exist. Creating new collection for key:", - collection_search_key, - ); const meta = g_db.globus_coll.save({ _key: collection_search_key, name: "Newly Inserted Collection", @@ -938,21 +923,14 @@ router type: "mapped", // mapped/guest TODO: to be pulled from token data on follow-up ticket ha_enabled: false, // boolean - TODO: to be pulled from token data on follow-up ticket }); - console.log("DEBUG: Collection created. Metadata:", meta); globus_collection = g_db.globus_coll.document(meta); } - console.log("DEBUG: Globus Collection Object:", globus_collection); - const token_key = globus_collection._key + "_" + token_type + "_" + user_doc._key; const dependent_scopes_val = scopes || globus_collection.required_scopes; - console.log( - "DEBUG: Calculated dependent_scopes:", - dependent_scopes_val, - ); const token_doc = { _key: token_key, @@ -969,12 +947,9 @@ router ...obj, }; - console.log("DEBUG: Saving Token Document:", token_doc); - const token_doc_upsert = g_db.globus_token.insert(token_doc, { overwriteMode: "replace", }); - console.log("DEBUG: Token Insert Result:", token_doc_upsert); break; } case g_lib.AccessTokenType.GLOBUS_DEFAULT: { diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 78290a462..9bf4a1640 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -670,11 +670,6 @@ the registration page. state_obj.restore_state ) { a_req.session.restore_state = state_obj; - logger.info( - "/ui/authn", - getCurrentLineNumber(), - "Restorable state found and saved to session.", - ); } } catch (e) { // State was not JSON or valid, ignore @@ -686,15 +681,6 @@ the registration page. } } - logger.info( - "/ui/authn", - getCurrentLineNumber(), - "Session Updated. UID: " + - username + - ", CollectionID: " + - a_req.session.collection_id, - ); - let redirect_path = "/ui/main"; // Note: context/optional params for arbitrary input @@ -726,13 +712,6 @@ the registration page. "setAccessToken Failed: " + err, ); delete a_req.session.collection_id; - } else { - logger.info( - "/ui/authn", - getCurrentLineNumber(), - "setAccessToken Success. Redirecting to " + - redirect_path, - ); } // TODO Account may be disable from SDMS (active = false) a_resp.redirect(redirect_path); diff --git a/web/static/dlg_data_new_edit.js b/web/static/dlg_data_new_edit.js index 36a2ac07f..ef38d5d32 100644 --- a/web/static/dlg_data_new_edit.js +++ b/web/static/dlg_data_new_edit.js @@ -147,7 +147,6 @@ export function show(a_mode, a_data, a_parent, a_upd_perms, a_cb) { } api.metadataValidate(sch_id, jsoned.getValue(), function (ok, data) { - //console.log("val res:", ok, data ); if (ok) { if (data.errors) { $("#md_err_msg", frame).text(data.errors).show(); From e03b420eb86b9b1369a4e2c6ca155cc60a601788 Mon Sep 17 00:00:00 2001 From: nedvedba <145805866+nedvedba@users.noreply.github.com> Date: Mon, 2 Feb 2026 04:10:28 -0500 Subject: [PATCH 47/65] Update datafed-ws.js fix jsdoc commend --- web/datafed-ws.js | 1 + 1 file changed, 1 insertion(+) diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 9bf4a1640..e87216277 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -2098,6 +2098,7 @@ app.get("/ui/theme/save", (a_req, a_resp) => { * @param {string} a_ref_tok - Refresh token for access token * @param {number} a_expires_sec - Time until expiration of access token * @param {OptionalData} [token_optional_params] - Optional params for DataFed to process access token accordingly + * @param {RequestCallback} [a_cb] - Optional callback function * * @throws Error - When a reply is not received from sendMessageDirect */ From 7f34078b3ee24b27ccce988de1fe36de96acb7d2 Mon Sep 17 00:00:00 2001 From: nedvedba <145805866+nedvedba@users.noreply.github.com> Date: Mon, 2 Feb 2026 04:13:23 -0500 Subject: [PATCH 48/65] Add back finally block after finishing debugging --- web/datafed-ws.js | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/web/datafed-ws.js b/web/datafed-ws.js index e87216277..d7968f9db 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -835,14 +835,6 @@ app.get("/api/usr/register", (a_req, a_resp) => { a_resp.status(500).send("Registration failed during token set"); return; } - // Remove data not needed for active session - delete a_req.session.name; - delete a_req.session.email; - delete a_req.session.uuids; - delete a_req.session.acc_tok; - delete a_req.session.acc_tok_ttl; - delete a_req.session.ref_tok; - delete a_req.session.uuids; // Set session as registered user a_req.session.reg = true; @@ -853,6 +845,15 @@ app.get("/api/usr/register", (a_req, a_resp) => { } catch (err) { logger.error("/api/usr/register", getCurrentLineNumber(), err); throw err; + } finally { + // Remove data not needed for active session + delete a_req.session.name; + delete a_req.session.email; + delete a_req.session.uuids; + delete a_req.session.acc_tok; + delete a_req.session.acc_tok_ttl; + delete a_req.session.ref_tok; + delete a_req.session.uuids; } } }, From d2d95962eec447869a95671e3c73cbb3d6fc8cf0 Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Mon, 2 Feb 2026 14:58:28 -0500 Subject: [PATCH 49/65] [DAPS-1836] - feat: add protobuf3 replacement files. (#1836) --- common/proto3/common/CMakeLists.txt | 35 ++ common/proto3/common/anon/ack_reply.proto | 9 + .../common/anon/auth_status_reply.proto | 10 + .../authenticate_by_password_request.proto | 10 + .../anon/authenticate_by_token_request.proto | 9 + .../common/anon/daily_message_reply.proto | 9 + .../common/anon/daily_message_request.proto | 8 + .../common/anon/get_auth_status_request.proto | 8 + common/proto3/common/anon/nack_reply.proto | 13 + common/proto3/common/anon/version_reply.proto | 19 + .../proto3/common/anon/version_request.proto | 8 + .../proto3/common/auth/acl_data_reply.proto | 11 + .../auth/acl_shared_list_items_request.proto | 9 + .../common/auth/acl_shared_list_request.proto | 10 + .../common/auth/acl_update_request.proto | 10 + .../proto3/common/auth/acl_view_request.proto | 9 + .../common/auth/check_perms_reply.proto | 9 + .../common/auth/check_perms_request.proto | 10 + .../common/auth/coll_create_request.proto | 14 + .../proto3/common/auth/coll_data_reply.proto | 13 + .../common/auth/coll_delete_request.proto | 9 + .../common/auth/coll_get_offset_reply.proto | 11 + .../common/auth/coll_get_offset_request.proto | 11 + .../auth/coll_get_parents_request.proto | 10 + .../auth/coll_list_published_request.proto | 11 + .../common/auth/coll_move_request.proto | 11 + .../proto3/common/auth/coll_path_reply.proto | 11 + .../common/auth/coll_read_request.proto | 12 + .../common/auth/coll_update_request.proto | 15 + .../common/auth/coll_view_request.proto | 9 + .../common/auth/coll_write_request.proto | 12 + .../common/auth/data_delete_request.proto | 9 + .../proto3/common/auth/data_get_reply.proto | 13 + .../proto3/common/auth/data_get_request.proto | 17 + .../proto3/common/auth/data_path_reply.proto | 9 + .../common/auth/data_path_request.proto | 10 + .../proto3/common/auth/data_put_reply.proto | 13 + .../proto3/common/auth/data_put_request.proto | 17 + .../auth/generate_credentials_reply.proto | 10 + .../auth/generate_credentials_request.proto | 10 + .../proto3/common/auth/get_perms_reply.proto | 9 + .../common/auth/get_perms_request.proto | 10 + .../common/auth/group_create_request.proto | 11 + .../proto3/common/auth/group_data_reply.proto | 11 + .../common/auth/group_delete_request.proto | 10 + .../common/auth/group_list_request.proto | 9 + .../common/auth/group_update_request.proto | 14 + .../common/auth/group_view_request.proto | 10 + common/proto3/common/auth/listing_reply.proto | 14 + .../common/auth/metadata_validate_reply.proto | 9 + .../auth/metadata_validate_request.proto | 10 + .../auth/note_comment_edit_request.proto | 11 + .../common/auth/note_create_request.proto | 15 + .../proto3/common/auth/note_data_reply.proto | 13 + .../auth/note_list_by_subject_request.proto | 9 + .../common/auth/note_update_request.proto | 16 + .../common/auth/note_view_request.proto | 9 + .../common/auth/project_create_request.proto | 13 + .../common/auth/project_data_reply.proto | 11 + .../common/auth/project_delete_request.proto | 9 + .../common/auth/project_get_role_reply.proto | 11 + .../auth/project_get_role_request.proto | 10 + .../common/auth/project_list_request.proto | 18 + .../common/auth/project_search_request.proto | 10 + .../common/auth/project_update_request.proto | 15 + .../common/auth/project_view_request.proto | 9 + .../common/auth/query_create_request.proto | 12 + .../proto3/common/auth/query_data_reply.proto | 16 + .../common/auth/query_delete_request.proto | 9 + .../common/auth/query_exec_request.proto | 11 + .../common/auth/query_list_request.proto | 10 + .../common/auth/query_update_request.proto | 13 + .../common/auth/query_view_request.proto | 9 + .../auth/record_alloc_change_reply.proto | 18 + .../auth/record_alloc_change_request.proto | 12 + .../auth/record_create_batch_request.proto | 9 + .../common/auth/record_create_request.proto | 24 ++ .../common/auth/record_data_reply.proto | 13 + .../common/auth/record_delete_request.proto | 9 + .../common/auth/record_export_reply.proto | 9 + .../common/auth/record_export_request.proto | 9 + .../record_get_dependency_graph_request.proto | 9 + .../auth/record_list_by_alloc_request.proto | 12 + .../common/auth/record_lock_request.proto | 10 + .../auth/record_owner_change_reply.proto | 16 + .../auth/record_owner_change_request.proto | 13 + .../auth/record_update_batch_request.proto | 9 + .../common/auth/record_update_request.proto | 25 ++ .../common/auth/record_view_request.proto | 10 + .../auth/repo_allocation_create_request.proto | 12 + .../repo_allocation_create_response.proto | 15 + .../auth/repo_allocation_delete_request.proto | 10 + .../repo_allocation_set_default_request.proto | 10 + .../auth/repo_allocation_set_request.proto | 12 + .../auth/repo_allocation_stats_reply.proto | 11 + .../auth/repo_allocation_stats_request.proto | 10 + .../common/auth/repo_allocations_reply.proto | 11 + .../common/auth/repo_authz_request.proto | 12 + .../common/auth/repo_calc_size_reply.proto | 11 + .../common/auth/repo_calc_size_request.proto | 10 + .../common/auth/repo_create_request.proto | 20 + .../auth/repo_data_delete_request.proto | 11 + .../auth/repo_data_get_size_request.proto | 11 + .../proto3/common/auth/repo_data_reply.proto | 11 + .../common/auth/repo_data_size_reply.proto | 11 + .../common/auth/repo_delete_request.proto | 9 + .../auth/repo_list_allocations_request.proto | 9 + ...repo_list_object_allocations_request.proto | 9 + .../common/auth/repo_list_request.proto | 10 + ...epo_list_subject_allocations_request.proto | 10 + .../auth/repo_path_create_request.proto | 9 + .../auth/repo_path_delete_request.proto | 9 + .../common/auth/repo_update_request.proto | 20 + .../auth/repo_view_allocation_request.proto | 10 + .../common/auth/repo_view_request.proto | 9 + .../auth/revoke_credentials_request.proto | 8 + .../common/auth/schema_create_request.proto | 13 + .../common/auth/schema_data_reply.proto | 14 + .../common/auth/schema_delete_request.proto | 9 + .../common/auth/schema_revise_request.proto | 13 + .../common/auth/schema_search_request.proto | 17 + .../common/auth/schema_update_request.proto | 14 + .../common/auth/schema_view_request.proto | 10 + .../proto3/common/auth/search_request.proto | 29 ++ .../proto3/common/auth/tag_data_reply.proto | 14 + .../auth/tag_list_by_count_request.proto | 10 + .../common/auth/tag_search_request.proto | 11 + .../proto3/common/auth/task_data_reply.proto | 11 + .../common/auth/task_list_request.proto | 16 + .../common/auth/task_view_request.proto | 9 + .../proto3/common/auth/topic_data_reply.proto | 14 + .../auth/topic_list_topics_request.proto | 11 + .../common/auth/topic_search_request.proto | 9 + .../common/auth/topic_view_request.proto | 9 + .../common/auth/user_access_token_reply.proto | 11 + .../common/auth/user_create_request.proto | 15 + .../proto3/common/auth/user_data_reply.proto | 14 + .../auth/user_find_by_name_uid_request.proto | 11 + .../auth/user_find_by_uuids_request.proto | 9 + .../auth/user_get_access_token_request.proto | 10 + .../auth/user_get_recent_ep_reply.proto | 9 + .../auth/user_get_recent_ep_request.proto | 8 + .../common/auth/user_list_all_request.proto | 10 + .../auth/user_list_collab_request.proto | 10 + .../auth/user_set_access_token_request.proto | 15 + .../auth/user_set_recent_ep_request.proto | 9 + .../common/auth/user_update_request.proto | 12 + .../common/auth/user_view_request.proto | 10 + .../common/enums/access_token_type.proto | 14 + .../proto3/common/enums/dependency_dir.proto | 10 + .../proto3/common/enums/dependency_type.proto | 11 + common/proto3/common/enums/encryption.proto | 11 + common/proto3/common/enums/error_code.proto | 17 + .../common/enums/execution_method.proto | 11 + common/proto3/common/enums/note_state.proto | 11 + common/proto3/common/enums/note_type.proto | 12 + common/proto3/common/enums/project_role.proto | 12 + common/proto3/common/enums/search_mode.proto | 10 + .../proto3/common/enums/service_status.proto | 12 + common/proto3/common/enums/sort_option.proto | 14 + common/proto3/common/enums/task_command.proto | 14 + common/proto3/common/enums/task_status.proto | 13 + common/proto3/common/enums/task_type.proto | 18 + common/proto3/common/envelope.proto | 393 ++++++++++++++++++ common/proto3/common/messages/acl_rule.proto | 11 + .../proto3/common/messages/alloc_data.proto | 20 + .../common/messages/alloc_stats_data.proto | 13 + common/proto3/common/messages/coll_data.proto | 20 + .../common/messages/dependency_data.proto | 16 + .../messages/dependency_spec_data.proto | 12 + .../proto3/common/messages/group_data.proto | 13 + .../proto3/common/messages/listing_data.proto | 24 ++ .../proto3/common/messages/note_comment.proto | 16 + common/proto3/common/messages/note_data.proto | 22 + common/proto3/common/messages/path_data.proto | 11 + .../proto3/common/messages/project_data.proto | 19 + .../proto3/common/messages/record_data.proto | 34 ++ .../messages/record_data_location.proto | 10 + .../common/messages/record_data_size.proto | 10 + common/proto3/common/messages/repo_data.proto | 20 + .../messages/repo_record_data_locations.proto | 12 + .../proto3/common/messages/schema_data.proto | 20 + common/proto3/common/messages/tag_data.proto | 10 + common/proto3/common/messages/task_data.proto | 22 + .../proto3/common/messages/topic_data.proto | 15 + common/proto3/common/messages/user_data.proto | 19 + 186 files changed, 2684 insertions(+) create mode 100644 common/proto3/common/CMakeLists.txt create mode 100644 common/proto3/common/anon/ack_reply.proto create mode 100644 common/proto3/common/anon/auth_status_reply.proto create mode 100644 common/proto3/common/anon/authenticate_by_password_request.proto create mode 100644 common/proto3/common/anon/authenticate_by_token_request.proto create mode 100644 common/proto3/common/anon/daily_message_reply.proto create mode 100644 common/proto3/common/anon/daily_message_request.proto create mode 100644 common/proto3/common/anon/get_auth_status_request.proto create mode 100644 common/proto3/common/anon/nack_reply.proto create mode 100644 common/proto3/common/anon/version_reply.proto create mode 100644 common/proto3/common/anon/version_request.proto create mode 100644 common/proto3/common/auth/acl_data_reply.proto create mode 100644 common/proto3/common/auth/acl_shared_list_items_request.proto create mode 100644 common/proto3/common/auth/acl_shared_list_request.proto create mode 100644 common/proto3/common/auth/acl_update_request.proto create mode 100644 common/proto3/common/auth/acl_view_request.proto create mode 100644 common/proto3/common/auth/check_perms_reply.proto create mode 100644 common/proto3/common/auth/check_perms_request.proto create mode 100644 common/proto3/common/auth/coll_create_request.proto create mode 100644 common/proto3/common/auth/coll_data_reply.proto create mode 100644 common/proto3/common/auth/coll_delete_request.proto create mode 100644 common/proto3/common/auth/coll_get_offset_reply.proto create mode 100644 common/proto3/common/auth/coll_get_offset_request.proto create mode 100644 common/proto3/common/auth/coll_get_parents_request.proto create mode 100644 common/proto3/common/auth/coll_list_published_request.proto create mode 100644 common/proto3/common/auth/coll_move_request.proto create mode 100644 common/proto3/common/auth/coll_path_reply.proto create mode 100644 common/proto3/common/auth/coll_read_request.proto create mode 100644 common/proto3/common/auth/coll_update_request.proto create mode 100644 common/proto3/common/auth/coll_view_request.proto create mode 100644 common/proto3/common/auth/coll_write_request.proto create mode 100644 common/proto3/common/auth/data_delete_request.proto create mode 100644 common/proto3/common/auth/data_get_reply.proto create mode 100644 common/proto3/common/auth/data_get_request.proto create mode 100644 common/proto3/common/auth/data_path_reply.proto create mode 100644 common/proto3/common/auth/data_path_request.proto create mode 100644 common/proto3/common/auth/data_put_reply.proto create mode 100644 common/proto3/common/auth/data_put_request.proto create mode 100644 common/proto3/common/auth/generate_credentials_reply.proto create mode 100644 common/proto3/common/auth/generate_credentials_request.proto create mode 100644 common/proto3/common/auth/get_perms_reply.proto create mode 100644 common/proto3/common/auth/get_perms_request.proto create mode 100644 common/proto3/common/auth/group_create_request.proto create mode 100644 common/proto3/common/auth/group_data_reply.proto create mode 100644 common/proto3/common/auth/group_delete_request.proto create mode 100644 common/proto3/common/auth/group_list_request.proto create mode 100644 common/proto3/common/auth/group_update_request.proto create mode 100644 common/proto3/common/auth/group_view_request.proto create mode 100644 common/proto3/common/auth/listing_reply.proto create mode 100644 common/proto3/common/auth/metadata_validate_reply.proto create mode 100644 common/proto3/common/auth/metadata_validate_request.proto create mode 100644 common/proto3/common/auth/note_comment_edit_request.proto create mode 100644 common/proto3/common/auth/note_create_request.proto create mode 100644 common/proto3/common/auth/note_data_reply.proto create mode 100644 common/proto3/common/auth/note_list_by_subject_request.proto create mode 100644 common/proto3/common/auth/note_update_request.proto create mode 100644 common/proto3/common/auth/note_view_request.proto create mode 100644 common/proto3/common/auth/project_create_request.proto create mode 100644 common/proto3/common/auth/project_data_reply.proto create mode 100644 common/proto3/common/auth/project_delete_request.proto create mode 100644 common/proto3/common/auth/project_get_role_reply.proto create mode 100644 common/proto3/common/auth/project_get_role_request.proto create mode 100644 common/proto3/common/auth/project_list_request.proto create mode 100644 common/proto3/common/auth/project_search_request.proto create mode 100644 common/proto3/common/auth/project_update_request.proto create mode 100644 common/proto3/common/auth/project_view_request.proto create mode 100644 common/proto3/common/auth/query_create_request.proto create mode 100644 common/proto3/common/auth/query_data_reply.proto create mode 100644 common/proto3/common/auth/query_delete_request.proto create mode 100644 common/proto3/common/auth/query_exec_request.proto create mode 100644 common/proto3/common/auth/query_list_request.proto create mode 100644 common/proto3/common/auth/query_update_request.proto create mode 100644 common/proto3/common/auth/query_view_request.proto create mode 100644 common/proto3/common/auth/record_alloc_change_reply.proto create mode 100644 common/proto3/common/auth/record_alloc_change_request.proto create mode 100644 common/proto3/common/auth/record_create_batch_request.proto create mode 100644 common/proto3/common/auth/record_create_request.proto create mode 100644 common/proto3/common/auth/record_data_reply.proto create mode 100644 common/proto3/common/auth/record_delete_request.proto create mode 100644 common/proto3/common/auth/record_export_reply.proto create mode 100644 common/proto3/common/auth/record_export_request.proto create mode 100644 common/proto3/common/auth/record_get_dependency_graph_request.proto create mode 100644 common/proto3/common/auth/record_list_by_alloc_request.proto create mode 100644 common/proto3/common/auth/record_lock_request.proto create mode 100644 common/proto3/common/auth/record_owner_change_reply.proto create mode 100644 common/proto3/common/auth/record_owner_change_request.proto create mode 100644 common/proto3/common/auth/record_update_batch_request.proto create mode 100644 common/proto3/common/auth/record_update_request.proto create mode 100644 common/proto3/common/auth/record_view_request.proto create mode 100644 common/proto3/common/auth/repo_allocation_create_request.proto create mode 100644 common/proto3/common/auth/repo_allocation_create_response.proto create mode 100644 common/proto3/common/auth/repo_allocation_delete_request.proto create mode 100644 common/proto3/common/auth/repo_allocation_set_default_request.proto create mode 100644 common/proto3/common/auth/repo_allocation_set_request.proto create mode 100644 common/proto3/common/auth/repo_allocation_stats_reply.proto create mode 100644 common/proto3/common/auth/repo_allocation_stats_request.proto create mode 100644 common/proto3/common/auth/repo_allocations_reply.proto create mode 100644 common/proto3/common/auth/repo_authz_request.proto create mode 100644 common/proto3/common/auth/repo_calc_size_reply.proto create mode 100644 common/proto3/common/auth/repo_calc_size_request.proto create mode 100644 common/proto3/common/auth/repo_create_request.proto create mode 100644 common/proto3/common/auth/repo_data_delete_request.proto create mode 100644 common/proto3/common/auth/repo_data_get_size_request.proto create mode 100644 common/proto3/common/auth/repo_data_reply.proto create mode 100644 common/proto3/common/auth/repo_data_size_reply.proto create mode 100644 common/proto3/common/auth/repo_delete_request.proto create mode 100644 common/proto3/common/auth/repo_list_allocations_request.proto create mode 100644 common/proto3/common/auth/repo_list_object_allocations_request.proto create mode 100644 common/proto3/common/auth/repo_list_request.proto create mode 100644 common/proto3/common/auth/repo_list_subject_allocations_request.proto create mode 100644 common/proto3/common/auth/repo_path_create_request.proto create mode 100644 common/proto3/common/auth/repo_path_delete_request.proto create mode 100644 common/proto3/common/auth/repo_update_request.proto create mode 100644 common/proto3/common/auth/repo_view_allocation_request.proto create mode 100644 common/proto3/common/auth/repo_view_request.proto create mode 100644 common/proto3/common/auth/revoke_credentials_request.proto create mode 100644 common/proto3/common/auth/schema_create_request.proto create mode 100644 common/proto3/common/auth/schema_data_reply.proto create mode 100644 common/proto3/common/auth/schema_delete_request.proto create mode 100644 common/proto3/common/auth/schema_revise_request.proto create mode 100644 common/proto3/common/auth/schema_search_request.proto create mode 100644 common/proto3/common/auth/schema_update_request.proto create mode 100644 common/proto3/common/auth/schema_view_request.proto create mode 100644 common/proto3/common/auth/search_request.proto create mode 100644 common/proto3/common/auth/tag_data_reply.proto create mode 100644 common/proto3/common/auth/tag_list_by_count_request.proto create mode 100644 common/proto3/common/auth/tag_search_request.proto create mode 100644 common/proto3/common/auth/task_data_reply.proto create mode 100644 common/proto3/common/auth/task_list_request.proto create mode 100644 common/proto3/common/auth/task_view_request.proto create mode 100644 common/proto3/common/auth/topic_data_reply.proto create mode 100644 common/proto3/common/auth/topic_list_topics_request.proto create mode 100644 common/proto3/common/auth/topic_search_request.proto create mode 100644 common/proto3/common/auth/topic_view_request.proto create mode 100644 common/proto3/common/auth/user_access_token_reply.proto create mode 100644 common/proto3/common/auth/user_create_request.proto create mode 100644 common/proto3/common/auth/user_data_reply.proto create mode 100644 common/proto3/common/auth/user_find_by_name_uid_request.proto create mode 100644 common/proto3/common/auth/user_find_by_uuids_request.proto create mode 100644 common/proto3/common/auth/user_get_access_token_request.proto create mode 100644 common/proto3/common/auth/user_get_recent_ep_reply.proto create mode 100644 common/proto3/common/auth/user_get_recent_ep_request.proto create mode 100644 common/proto3/common/auth/user_list_all_request.proto create mode 100644 common/proto3/common/auth/user_list_collab_request.proto create mode 100644 common/proto3/common/auth/user_set_access_token_request.proto create mode 100644 common/proto3/common/auth/user_set_recent_ep_request.proto create mode 100644 common/proto3/common/auth/user_update_request.proto create mode 100644 common/proto3/common/auth/user_view_request.proto create mode 100644 common/proto3/common/enums/access_token_type.proto create mode 100644 common/proto3/common/enums/dependency_dir.proto create mode 100644 common/proto3/common/enums/dependency_type.proto create mode 100644 common/proto3/common/enums/encryption.proto create mode 100644 common/proto3/common/enums/error_code.proto create mode 100644 common/proto3/common/enums/execution_method.proto create mode 100644 common/proto3/common/enums/note_state.proto create mode 100644 common/proto3/common/enums/note_type.proto create mode 100644 common/proto3/common/enums/project_role.proto create mode 100644 common/proto3/common/enums/search_mode.proto create mode 100644 common/proto3/common/enums/service_status.proto create mode 100644 common/proto3/common/enums/sort_option.proto create mode 100644 common/proto3/common/enums/task_command.proto create mode 100644 common/proto3/common/enums/task_status.proto create mode 100644 common/proto3/common/enums/task_type.proto create mode 100644 common/proto3/common/envelope.proto create mode 100644 common/proto3/common/messages/acl_rule.proto create mode 100644 common/proto3/common/messages/alloc_data.proto create mode 100644 common/proto3/common/messages/alloc_stats_data.proto create mode 100644 common/proto3/common/messages/coll_data.proto create mode 100644 common/proto3/common/messages/dependency_data.proto create mode 100644 common/proto3/common/messages/dependency_spec_data.proto create mode 100644 common/proto3/common/messages/group_data.proto create mode 100644 common/proto3/common/messages/listing_data.proto create mode 100644 common/proto3/common/messages/note_comment.proto create mode 100644 common/proto3/common/messages/note_data.proto create mode 100644 common/proto3/common/messages/path_data.proto create mode 100644 common/proto3/common/messages/project_data.proto create mode 100644 common/proto3/common/messages/record_data.proto create mode 100644 common/proto3/common/messages/record_data_location.proto create mode 100644 common/proto3/common/messages/record_data_size.proto create mode 100644 common/proto3/common/messages/repo_data.proto create mode 100644 common/proto3/common/messages/repo_record_data_locations.proto create mode 100644 common/proto3/common/messages/schema_data.proto create mode 100644 common/proto3/common/messages/tag_data.proto create mode 100644 common/proto3/common/messages/task_data.proto create mode 100644 common/proto3/common/messages/topic_data.proto create mode 100644 common/proto3/common/messages/user_data.proto diff --git a/common/proto3/common/CMakeLists.txt b/common/proto3/common/CMakeLists.txt new file mode 100644 index 000000000..367c0801c --- /dev/null +++ b/common/proto3/common/CMakeLists.txt @@ -0,0 +1,35 @@ +cmake_minimum_required(VERSION 3.17.0) + +# Generate C++ from protos +protobuf_generate( + LANGUAGE cpp + PROTOS ${ProtoFiles} + IMPORT_DIRS "${CMAKE_CURRENT_SOURCE_DIR}" + OUT_VAR protobuf-generated-files +) + +add_custom_target(protobuf-gen-target DEPENDS ${protobuf-generated-files}) + +# Create library (respects parent's BUILD_SHARED_LIBS setting) +if(BUILD_SHARED_LIBS) + add_library(datafed-protobuf SHARED ${protobuf-generated-files}) +else() + add_library(datafed-protobuf STATIC ${protobuf-generated-files}) +endif() + +add_dependencies(datafed-protobuf protobuf-gen-target) + +set_target_properties(datafed-protobuf PROPERTIES + POSITION_INDEPENDENT_CODE ON + SOVERSION ${DATAFED_COMMON_PROTOCOL_API_MAJOR} + VERSION ${DATAFED_COMMON_PROTOCOL_API_MAJOR}.${DATAFED_COMMON_PROTOCOL_API_MINOR}.${DATAFED_COMMON_PROTOCOL_API_PATCH} +) + +target_link_libraries(datafed-protobuf + PUBLIC protobuf::libprotobuf +) + +target_include_directories(datafed-protobuf + PUBLIC ${CMAKE_CURRENT_BINARY_DIR} # Where generated files go + INTERFACE ${PROJECT_BINARY_DIR}/common/proto3 +) diff --git a/common/proto3/common/anon/ack_reply.proto b/common/proto3/common/anon/ack_reply.proto new file mode 100644 index 000000000..1cc67cf67 --- /dev/null +++ b/common/proto3/common/anon/ack_reply.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +// Simple positive acknowledgement +message AckReply { +} diff --git a/common/proto3/common/anon/auth_status_reply.proto b/common/proto3/common/anon/auth_status_reply.proto new file mode 100644 index 000000000..99e2e9742 --- /dev/null +++ b/common/proto3/common/anon/auth_status_reply.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message AuthStatusReply { + bool auth = 1; + string uid = 2; +} diff --git a/common/proto3/common/anon/authenticate_by_password_request.proto b/common/proto3/common/anon/authenticate_by_password_request.proto new file mode 100644 index 000000000..22c569c98 --- /dev/null +++ b/common/proto3/common/anon/authenticate_by_password_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message AuthenticateByPasswordRequest { + string uid = 1; + string password = 2; +} diff --git a/common/proto3/common/anon/authenticate_by_token_request.proto b/common/proto3/common/anon/authenticate_by_token_request.proto new file mode 100644 index 000000000..55af17724 --- /dev/null +++ b/common/proto3/common/anon/authenticate_by_token_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message AuthenticateByTokenRequest { + string token = 1; +} diff --git a/common/proto3/common/anon/daily_message_reply.proto b/common/proto3/common/anon/daily_message_reply.proto new file mode 100644 index 000000000..8ae2ad11b --- /dev/null +++ b/common/proto3/common/anon/daily_message_reply.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message DailyMessageReply { + string message = 1; +} diff --git a/common/proto3/common/anon/daily_message_request.proto b/common/proto3/common/anon/daily_message_request.proto new file mode 100644 index 000000000..3060a7a2a --- /dev/null +++ b/common/proto3/common/anon/daily_message_request.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message DailyMessageRequest { +} diff --git a/common/proto3/common/anon/get_auth_status_request.proto b/common/proto3/common/anon/get_auth_status_request.proto new file mode 100644 index 000000000..c573803a6 --- /dev/null +++ b/common/proto3/common/anon/get_auth_status_request.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message GetAuthStatusRequest { +} diff --git a/common/proto3/common/anon/nack_reply.proto b/common/proto3/common/anon/nack_reply.proto new file mode 100644 index 000000000..b4a318466 --- /dev/null +++ b/common/proto3/common/anon/nack_reply.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/error_code.proto"; + +option cc_enable_arenas = true; + +// Error response +message NackReply { + ErrorCode err_code = 1; + string err_msg = 2; +} diff --git a/common/proto3/common/anon/version_reply.proto b/common/proto3/common/anon/version_reply.proto new file mode 100644 index 000000000..d921394c5 --- /dev/null +++ b/common/proto3/common/anon/version_reply.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message VersionReply { + uint32 release_year = 1; + uint32 release_month = 2; + uint32 release_day = 3; + uint32 release_hour = 4; + uint32 release_minute = 5; + uint32 api_major = 6; + uint32 api_minor = 7; + uint32 api_patch = 8; + uint32 component_major = 9; + uint32 component_minor = 10; + uint32 component_patch = 11; +} diff --git a/common/proto3/common/anon/version_request.proto b/common/proto3/common/anon/version_request.proto new file mode 100644 index 000000000..f5843c779 --- /dev/null +++ b/common/proto3/common/anon/version_request.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message VersionRequest { +} diff --git a/common/proto3/common/auth/acl_data_reply.proto b/common/proto3/common/auth/acl_data_reply.proto new file mode 100644 index 000000000..8b07ff7cf --- /dev/null +++ b/common/proto3/common/auth/acl_data_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/acl_rule.proto"; + +option cc_enable_arenas = true; + +message ACLDataReply { + repeated ACLRule rule = 1; +} diff --git a/common/proto3/common/auth/acl_shared_list_items_request.proto b/common/proto3/common/auth/acl_shared_list_items_request.proto new file mode 100644 index 000000000..12a6198a0 --- /dev/null +++ b/common/proto3/common/auth/acl_shared_list_items_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message ACLSharedListItemsRequest { + string owner = 2; +} diff --git a/common/proto3/common/auth/acl_shared_list_request.proto b/common/proto3/common/auth/acl_shared_list_request.proto new file mode 100644 index 000000000..e26889812 --- /dev/null +++ b/common/proto3/common/auth/acl_shared_list_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message ACLSharedListRequest { + bool inc_users = 2; + bool inc_projects = 3; +} diff --git a/common/proto3/common/auth/acl_update_request.proto b/common/proto3/common/auth/acl_update_request.proto new file mode 100644 index 000000000..713cd455f --- /dev/null +++ b/common/proto3/common/auth/acl_update_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message ACLUpdateRequest { + string id = 1; + string rules = 2; +} diff --git a/common/proto3/common/auth/acl_view_request.proto b/common/proto3/common/auth/acl_view_request.proto new file mode 100644 index 000000000..6acb5e4bf --- /dev/null +++ b/common/proto3/common/auth/acl_view_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message ACLViewRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/check_perms_reply.proto b/common/proto3/common/auth/check_perms_reply.proto new file mode 100644 index 000000000..1a1aa1f44 --- /dev/null +++ b/common/proto3/common/auth/check_perms_reply.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CheckPermsReply { + bool granted = 1; +} diff --git a/common/proto3/common/auth/check_perms_request.proto b/common/proto3/common/auth/check_perms_request.proto new file mode 100644 index 000000000..731c47107 --- /dev/null +++ b/common/proto3/common/auth/check_perms_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CheckPermsRequest { + string id = 1; + uint32 perms = 2; +} diff --git a/common/proto3/common/auth/coll_create_request.proto b/common/proto3/common/auth/coll_create_request.proto new file mode 100644 index 000000000..e24d6fb98 --- /dev/null +++ b/common/proto3/common/auth/coll_create_request.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollCreateRequest { + string title = 1; + string desc = 2; + string alias = 3; + string parent_id = 4; + string topic = 6; + repeated string tags = 7; +} diff --git a/common/proto3/common/auth/coll_data_reply.proto b/common/proto3/common/auth/coll_data_reply.proto new file mode 100644 index 000000000..27592d05b --- /dev/null +++ b/common/proto3/common/auth/coll_data_reply.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/coll_data.proto"; +import "messages/listing_data.proto"; + +option cc_enable_arenas = true; + +message CollDataReply { + repeated CollData coll = 1; + repeated ListingData update = 2; +} diff --git a/common/proto3/common/auth/coll_delete_request.proto b/common/proto3/common/auth/coll_delete_request.proto new file mode 100644 index 000000000..437626405 --- /dev/null +++ b/common/proto3/common/auth/coll_delete_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollDeleteRequest { + repeated string id = 1; +} diff --git a/common/proto3/common/auth/coll_get_offset_reply.proto b/common/proto3/common/auth/coll_get_offset_reply.proto new file mode 100644 index 000000000..594037161 --- /dev/null +++ b/common/proto3/common/auth/coll_get_offset_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollGetOffsetReply { + string id = 1; + string item = 2; + uint32 offset = 3; +} diff --git a/common/proto3/common/auth/coll_get_offset_request.proto b/common/proto3/common/auth/coll_get_offset_request.proto new file mode 100644 index 000000000..e25538065 --- /dev/null +++ b/common/proto3/common/auth/coll_get_offset_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollGetOffsetRequest { + string id = 1; + string item = 2; + uint32 page_sz = 3; +} diff --git a/common/proto3/common/auth/coll_get_parents_request.proto b/common/proto3/common/auth/coll_get_parents_request.proto new file mode 100644 index 000000000..6a87d2d85 --- /dev/null +++ b/common/proto3/common/auth/coll_get_parents_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollGetParentsRequest { + string id = 1; + bool inclusive = 2; +} diff --git a/common/proto3/common/auth/coll_list_published_request.proto b/common/proto3/common/auth/coll_list_published_request.proto new file mode 100644 index 000000000..90c7ceced --- /dev/null +++ b/common/proto3/common/auth/coll_list_published_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollListPublishedRequest { + string subject = 1; + uint32 offset = 2; + uint32 count = 3; +} diff --git a/common/proto3/common/auth/coll_move_request.proto b/common/proto3/common/auth/coll_move_request.proto new file mode 100644 index 000000000..c4de4badd --- /dev/null +++ b/common/proto3/common/auth/coll_move_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollMoveRequest { + string src_id = 1; + string dst_id = 2; + repeated string item = 3; +} diff --git a/common/proto3/common/auth/coll_path_reply.proto b/common/proto3/common/auth/coll_path_reply.proto new file mode 100644 index 000000000..96c708dd8 --- /dev/null +++ b/common/proto3/common/auth/coll_path_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/path_data.proto"; + +option cc_enable_arenas = true; + +message CollPathReply { + repeated PathData path = 1; +} diff --git a/common/proto3/common/auth/coll_read_request.proto b/common/proto3/common/auth/coll_read_request.proto new file mode 100644 index 000000000..658233744 --- /dev/null +++ b/common/proto3/common/auth/coll_read_request.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollReadRequest { + string id = 1; + bool details = 3; + uint32 offset = 4; + uint32 count = 5; +} diff --git a/common/proto3/common/auth/coll_update_request.proto b/common/proto3/common/auth/coll_update_request.proto new file mode 100644 index 000000000..2e2c9bc39 --- /dev/null +++ b/common/proto3/common/auth/coll_update_request.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollUpdateRequest { + string id = 1; + string title = 2; + string desc = 3; + string alias = 4; + string topic = 6; + repeated string tags = 7; + bool tags_clear = 8; +} diff --git a/common/proto3/common/auth/coll_view_request.proto b/common/proto3/common/auth/coll_view_request.proto new file mode 100644 index 000000000..93493b8cb --- /dev/null +++ b/common/proto3/common/auth/coll_view_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollViewRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/coll_write_request.proto b/common/proto3/common/auth/coll_write_request.proto new file mode 100644 index 000000000..e8fe8c93f --- /dev/null +++ b/common/proto3/common/auth/coll_write_request.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollWriteRequest { + string id = 1; + repeated string add = 2; + repeated string rem = 3; + bool rem_all = 4; +} diff --git a/common/proto3/common/auth/data_delete_request.proto b/common/proto3/common/auth/data_delete_request.proto new file mode 100644 index 000000000..073051292 --- /dev/null +++ b/common/proto3/common/auth/data_delete_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message DataDeleteRequest { + repeated string id = 1; +} diff --git a/common/proto3/common/auth/data_get_reply.proto b/common/proto3/common/auth/data_get_reply.proto new file mode 100644 index 000000000..c2f574971 --- /dev/null +++ b/common/proto3/common/auth/data_get_reply.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/listing_data.proto"; +import "messages/task_data.proto"; + +option cc_enable_arenas = true; + +message DataGetReply { + repeated ListingData item = 1; + TaskData task = 2; +} diff --git a/common/proto3/common/auth/data_get_request.proto b/common/proto3/common/auth/data_get_request.proto new file mode 100644 index 000000000..8c582d0b4 --- /dev/null +++ b/common/proto3/common/auth/data_get_request.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/encryption.proto"; + +option cc_enable_arenas = true; + +message DataGetRequest { + repeated string id = 1; + string path = 2; + Encryption encrypt = 3; + bool orig_fname = 4; + bool check = 5; + string collection_id = 6; + string collection_type = 7; +} diff --git a/common/proto3/common/auth/data_path_reply.proto b/common/proto3/common/auth/data_path_reply.proto new file mode 100644 index 000000000..21764fb38 --- /dev/null +++ b/common/proto3/common/auth/data_path_reply.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message DataPathReply { + string path = 1; +} diff --git a/common/proto3/common/auth/data_path_request.proto b/common/proto3/common/auth/data_path_request.proto new file mode 100644 index 000000000..d0533abaa --- /dev/null +++ b/common/proto3/common/auth/data_path_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message DataPathRequest { + string id = 1; + string domain = 2; +} diff --git a/common/proto3/common/auth/data_put_reply.proto b/common/proto3/common/auth/data_put_reply.proto new file mode 100644 index 000000000..f5bcbbe21 --- /dev/null +++ b/common/proto3/common/auth/data_put_reply.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/record_data.proto"; +import "messages/task_data.proto"; + +option cc_enable_arenas = true; + +message DataPutReply { + RecordData item = 1; + TaskData task = 2; +} diff --git a/common/proto3/common/auth/data_put_request.proto b/common/proto3/common/auth/data_put_request.proto new file mode 100644 index 000000000..bde29f1c5 --- /dev/null +++ b/common/proto3/common/auth/data_put_request.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/encryption.proto"; + +option cc_enable_arenas = true; + +message DataPutRequest { + string id = 1; + string path = 2; + Encryption encrypt = 3; + string ext = 4; + bool check = 5; + string collection_id = 6; + string collection_type = 7; +} diff --git a/common/proto3/common/auth/generate_credentials_reply.proto b/common/proto3/common/auth/generate_credentials_reply.proto new file mode 100644 index 000000000..f50d5204b --- /dev/null +++ b/common/proto3/common/auth/generate_credentials_reply.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message GenerateCredentialsReply { + string pub_key = 1; + string priv_key = 2; +} diff --git a/common/proto3/common/auth/generate_credentials_request.proto b/common/proto3/common/auth/generate_credentials_request.proto new file mode 100644 index 000000000..774cd354a --- /dev/null +++ b/common/proto3/common/auth/generate_credentials_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message GenerateCredentialsRequest { + string domain = 1; + uint32 uid = 2; +} diff --git a/common/proto3/common/auth/get_perms_reply.proto b/common/proto3/common/auth/get_perms_reply.proto new file mode 100644 index 000000000..dd5e4b8c6 --- /dev/null +++ b/common/proto3/common/auth/get_perms_reply.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message GetPermsReply { + uint32 granted = 1; +} diff --git a/common/proto3/common/auth/get_perms_request.proto b/common/proto3/common/auth/get_perms_request.proto new file mode 100644 index 000000000..23d6da93a --- /dev/null +++ b/common/proto3/common/auth/get_perms_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message GetPermsRequest { + string id = 1; + uint32 perms = 2; +} diff --git a/common/proto3/common/auth/group_create_request.proto b/common/proto3/common/auth/group_create_request.proto new file mode 100644 index 000000000..d97434720 --- /dev/null +++ b/common/proto3/common/auth/group_create_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/group_data.proto"; + +option cc_enable_arenas = true; + +message GroupCreateRequest { + GroupData group = 1; +} diff --git a/common/proto3/common/auth/group_data_reply.proto b/common/proto3/common/auth/group_data_reply.proto new file mode 100644 index 000000000..7f2416e50 --- /dev/null +++ b/common/proto3/common/auth/group_data_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/group_data.proto"; + +option cc_enable_arenas = true; + +message GroupDataReply { + repeated GroupData group = 1; +} diff --git a/common/proto3/common/auth/group_delete_request.proto b/common/proto3/common/auth/group_delete_request.proto new file mode 100644 index 000000000..1cd8d3c86 --- /dev/null +++ b/common/proto3/common/auth/group_delete_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message GroupDeleteRequest { + string uid = 1; + string gid = 2; +} diff --git a/common/proto3/common/auth/group_list_request.proto b/common/proto3/common/auth/group_list_request.proto new file mode 100644 index 000000000..453dde364 --- /dev/null +++ b/common/proto3/common/auth/group_list_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message GroupListRequest { + string uid = 1; +} diff --git a/common/proto3/common/auth/group_update_request.proto b/common/proto3/common/auth/group_update_request.proto new file mode 100644 index 000000000..f6a701134 --- /dev/null +++ b/common/proto3/common/auth/group_update_request.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message GroupUpdateRequest { + string uid = 1; + string gid = 2; + string title = 3; + string desc = 4; + repeated string add_uid = 5; + repeated string rem_uid = 6; +} diff --git a/common/proto3/common/auth/group_view_request.proto b/common/proto3/common/auth/group_view_request.proto new file mode 100644 index 000000000..b7480ea61 --- /dev/null +++ b/common/proto3/common/auth/group_view_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message GroupViewRequest { + string uid = 1; + string gid = 2; +} diff --git a/common/proto3/common/auth/listing_reply.proto b/common/proto3/common/auth/listing_reply.proto new file mode 100644 index 000000000..c45e4dcc6 --- /dev/null +++ b/common/proto3/common/auth/listing_reply.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/listing_data.proto"; + +option cc_enable_arenas = true; + +message ListingReply { + repeated ListingData item = 1; + uint32 offset = 2; + uint32 count = 3; + uint32 total = 4; +} diff --git a/common/proto3/common/auth/metadata_validate_reply.proto b/common/proto3/common/auth/metadata_validate_reply.proto new file mode 100644 index 000000000..a3ce92e42 --- /dev/null +++ b/common/proto3/common/auth/metadata_validate_reply.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message MetadataValidateReply { + string errors = 1; +} diff --git a/common/proto3/common/auth/metadata_validate_request.proto b/common/proto3/common/auth/metadata_validate_request.proto new file mode 100644 index 000000000..2ccfcc9e4 --- /dev/null +++ b/common/proto3/common/auth/metadata_validate_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message MetadataValidateRequest { + string metadata = 1; + string sch_id = 2; +} diff --git a/common/proto3/common/auth/note_comment_edit_request.proto b/common/proto3/common/auth/note_comment_edit_request.proto new file mode 100644 index 000000000..cde74c660 --- /dev/null +++ b/common/proto3/common/auth/note_comment_edit_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message NoteCommentEditRequest { + string id = 1; + uint32 comment_idx = 2; + string comment = 3; +} diff --git a/common/proto3/common/auth/note_create_request.proto b/common/proto3/common/auth/note_create_request.proto new file mode 100644 index 000000000..3e18e0ed7 --- /dev/null +++ b/common/proto3/common/auth/note_create_request.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/note_type.proto"; + +option cc_enable_arenas = true; + +message NoteCreateRequest { + NoteType type = 1; + string subject = 2; + string title = 3; + string comment = 4; + bool activate = 5; +} diff --git a/common/proto3/common/auth/note_data_reply.proto b/common/proto3/common/auth/note_data_reply.proto new file mode 100644 index 000000000..3e1c6b1ba --- /dev/null +++ b/common/proto3/common/auth/note_data_reply.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/note_data.proto"; +import "messages/listing_data.proto"; + +option cc_enable_arenas = true; + +message NoteDataReply { + repeated NoteData note = 1; + repeated ListingData update = 2; +} diff --git a/common/proto3/common/auth/note_list_by_subject_request.proto b/common/proto3/common/auth/note_list_by_subject_request.proto new file mode 100644 index 000000000..2655e5b8f --- /dev/null +++ b/common/proto3/common/auth/note_list_by_subject_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message NoteListBySubjectRequest { + string subject = 1; +} diff --git a/common/proto3/common/auth/note_update_request.proto b/common/proto3/common/auth/note_update_request.proto new file mode 100644 index 000000000..21c7552fb --- /dev/null +++ b/common/proto3/common/auth/note_update_request.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/note_type.proto"; +import "enums/note_state.proto"; + +option cc_enable_arenas = true; + +message NoteUpdateRequest { + string id = 1; + string comment = 2; + NoteType new_type = 3; + NoteState new_state = 4; + string new_title = 5; +} diff --git a/common/proto3/common/auth/note_view_request.proto b/common/proto3/common/auth/note_view_request.proto new file mode 100644 index 000000000..1bfed1c3e --- /dev/null +++ b/common/proto3/common/auth/note_view_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message NoteViewRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/project_create_request.proto b/common/proto3/common/auth/project_create_request.proto new file mode 100644 index 000000000..7dc6f233d --- /dev/null +++ b/common/proto3/common/auth/project_create_request.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message ProjectCreateRequest { + string id = 1; + string title = 2; + string desc = 3; + repeated string admin = 4; + repeated string member = 5; +} diff --git a/common/proto3/common/auth/project_data_reply.proto b/common/proto3/common/auth/project_data_reply.proto new file mode 100644 index 000000000..66d0ed0cc --- /dev/null +++ b/common/proto3/common/auth/project_data_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/project_data.proto"; + +option cc_enable_arenas = true; + +message ProjectDataReply { + repeated ProjectData proj = 1; +} diff --git a/common/proto3/common/auth/project_delete_request.proto b/common/proto3/common/auth/project_delete_request.proto new file mode 100644 index 000000000..faeb1d1f9 --- /dev/null +++ b/common/proto3/common/auth/project_delete_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message ProjectDeleteRequest { + repeated string id = 1; +} diff --git a/common/proto3/common/auth/project_get_role_reply.proto b/common/proto3/common/auth/project_get_role_reply.proto new file mode 100644 index 000000000..311871326 --- /dev/null +++ b/common/proto3/common/auth/project_get_role_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/project_role.proto"; + +option cc_enable_arenas = true; + +message ProjectGetRoleReply { + ProjectRole role = 1; +} diff --git a/common/proto3/common/auth/project_get_role_request.proto b/common/proto3/common/auth/project_get_role_request.proto new file mode 100644 index 000000000..c6d7f2abc --- /dev/null +++ b/common/proto3/common/auth/project_get_role_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message ProjectGetRoleRequest { + string id = 1; + string subject = 2; +} diff --git a/common/proto3/common/auth/project_list_request.proto b/common/proto3/common/auth/project_list_request.proto new file mode 100644 index 000000000..a3598fd9b --- /dev/null +++ b/common/proto3/common/auth/project_list_request.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/sort_option.proto"; + +option cc_enable_arenas = true; + +message ProjectListRequest { + string subject = 1; + bool as_owner = 2; + bool as_admin = 3; + bool as_member = 4; + SortOption sort = 5; + bool sort_rev = 6; + uint32 offset = 7; + uint32 count = 8; +} diff --git a/common/proto3/common/auth/project_search_request.proto b/common/proto3/common/auth/project_search_request.proto new file mode 100644 index 000000000..4862a16ee --- /dev/null +++ b/common/proto3/common/auth/project_search_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message ProjectSearchRequest { + string text_query = 1; + repeated string scope = 2; +} diff --git a/common/proto3/common/auth/project_update_request.proto b/common/proto3/common/auth/project_update_request.proto new file mode 100644 index 000000000..c7be4896a --- /dev/null +++ b/common/proto3/common/auth/project_update_request.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message ProjectUpdateRequest { + string id = 1; + string title = 2; + string desc = 3; + bool admin_set = 4; + repeated string admin = 5; + bool member_set = 6; + repeated string member = 7; +} diff --git a/common/proto3/common/auth/project_view_request.proto b/common/proto3/common/auth/project_view_request.proto new file mode 100644 index 000000000..07432ec57 --- /dev/null +++ b/common/proto3/common/auth/project_view_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message ProjectViewRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/query_create_request.proto b/common/proto3/common/auth/query_create_request.proto new file mode 100644 index 000000000..0b52c65e5 --- /dev/null +++ b/common/proto3/common/auth/query_create_request.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +import "auth/search_request.proto"; + +option cc_enable_arenas = true; + +message QueryCreateRequest { + string title = 1; + SearchRequest query = 2; +} diff --git a/common/proto3/common/auth/query_data_reply.proto b/common/proto3/common/auth/query_data_reply.proto new file mode 100644 index 000000000..371a631f9 --- /dev/null +++ b/common/proto3/common/auth/query_data_reply.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package SDMS; + +import "auth/search_request.proto"; + +option cc_enable_arenas = true; + +message QueryDataReply { + string id = 1; + string title = 2; + string owner = 4; + uint32 ct = 5; + uint32 ut = 6; + SearchRequest query = 7; +} diff --git a/common/proto3/common/auth/query_delete_request.proto b/common/proto3/common/auth/query_delete_request.proto new file mode 100644 index 000000000..0bcada00d --- /dev/null +++ b/common/proto3/common/auth/query_delete_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message QueryDeleteRequest { + repeated string id = 1; +} diff --git a/common/proto3/common/auth/query_exec_request.proto b/common/proto3/common/auth/query_exec_request.proto new file mode 100644 index 000000000..84c0fe3a2 --- /dev/null +++ b/common/proto3/common/auth/query_exec_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message QueryExecRequest { + string id = 1; + uint32 offset = 2; + uint32 count = 3; +} diff --git a/common/proto3/common/auth/query_list_request.proto b/common/proto3/common/auth/query_list_request.proto new file mode 100644 index 000000000..fc14850ed --- /dev/null +++ b/common/proto3/common/auth/query_list_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message QueryListRequest { + uint32 offset = 1; + uint32 count = 2; +} diff --git a/common/proto3/common/auth/query_update_request.proto b/common/proto3/common/auth/query_update_request.proto new file mode 100644 index 000000000..54d3f584a --- /dev/null +++ b/common/proto3/common/auth/query_update_request.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +import "auth/search_request.proto"; + +option cc_enable_arenas = true; + +message QueryUpdateRequest { + string id = 1; + string title = 2; + SearchRequest query = 3; +} diff --git a/common/proto3/common/auth/query_view_request.proto b/common/proto3/common/auth/query_view_request.proto new file mode 100644 index 000000000..c4b4a7985 --- /dev/null +++ b/common/proto3/common/auth/query_view_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message QueryViewRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/record_alloc_change_reply.proto b/common/proto3/common/auth/record_alloc_change_reply.proto new file mode 100644 index 000000000..4863be29f --- /dev/null +++ b/common/proto3/common/auth/record_alloc_change_reply.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/task_data.proto"; + +option cc_enable_arenas = true; + +message RecordAllocChangeReply { + uint32 act_cnt = 1; + uint64 act_size = 2; + uint32 tot_cnt = 3; + uint64 data_limit = 4; + uint64 data_size = 5; + uint32 rec_limit = 6; + uint32 rec_count = 7; + TaskData task = 8; +} diff --git a/common/proto3/common/auth/record_alloc_change_request.proto b/common/proto3/common/auth/record_alloc_change_request.proto new file mode 100644 index 000000000..b0a96b6dc --- /dev/null +++ b/common/proto3/common/auth/record_alloc_change_request.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordAllocChangeRequest { + repeated string id = 1; + string repo_id = 2; + string proj_id = 3; + bool check = 4; +} diff --git a/common/proto3/common/auth/record_create_batch_request.proto b/common/proto3/common/auth/record_create_batch_request.proto new file mode 100644 index 000000000..8d244016e --- /dev/null +++ b/common/proto3/common/auth/record_create_batch_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordCreateBatchRequest { + string records = 1; // JSON array +} diff --git a/common/proto3/common/auth/record_create_request.proto b/common/proto3/common/auth/record_create_request.proto new file mode 100644 index 000000000..824e9ff2f --- /dev/null +++ b/common/proto3/common/auth/record_create_request.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/dependency_spec_data.proto"; + +option cc_enable_arenas = true; + +message RecordCreateRequest { + string title = 1; + string desc = 2; + repeated string tags = 3; + string alias = 4; + string metadata = 5; + string parent_id = 6; + bool external = 7; + string source = 8; + string repo_id = 9; + string ext = 10; + bool ext_auto = 11; + repeated DependencySpecData deps = 12; + string sch_id = 13; + bool sch_enforce = 14; +} diff --git a/common/proto3/common/auth/record_data_reply.proto b/common/proto3/common/auth/record_data_reply.proto new file mode 100644 index 000000000..09132fd36 --- /dev/null +++ b/common/proto3/common/auth/record_data_reply.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/record_data.proto"; +import "messages/listing_data.proto"; + +option cc_enable_arenas = true; + +message RecordDataReply { + repeated RecordData data = 1; + repeated ListingData update = 2; +} diff --git a/common/proto3/common/auth/record_delete_request.proto b/common/proto3/common/auth/record_delete_request.proto new file mode 100644 index 000000000..3cdc56839 --- /dev/null +++ b/common/proto3/common/auth/record_delete_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordDeleteRequest { + repeated string id = 1; +} diff --git a/common/proto3/common/auth/record_export_reply.proto b/common/proto3/common/auth/record_export_reply.proto new file mode 100644 index 000000000..17f91703a --- /dev/null +++ b/common/proto3/common/auth/record_export_reply.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordExportReply { + repeated string record = 1; +} diff --git a/common/proto3/common/auth/record_export_request.proto b/common/proto3/common/auth/record_export_request.proto new file mode 100644 index 000000000..53ab49213 --- /dev/null +++ b/common/proto3/common/auth/record_export_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordExportRequest { + repeated string id = 1; +} diff --git a/common/proto3/common/auth/record_get_dependency_graph_request.proto b/common/proto3/common/auth/record_get_dependency_graph_request.proto new file mode 100644 index 000000000..4b7ce1ac2 --- /dev/null +++ b/common/proto3/common/auth/record_get_dependency_graph_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordGetDependencyGraphRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/record_list_by_alloc_request.proto b/common/proto3/common/auth/record_list_by_alloc_request.proto new file mode 100644 index 000000000..68d6d2464 --- /dev/null +++ b/common/proto3/common/auth/record_list_by_alloc_request.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordListByAllocRequest { + string repo = 1; + string subject = 2; + uint32 offset = 3; + uint32 count = 4; +} diff --git a/common/proto3/common/auth/record_lock_request.proto b/common/proto3/common/auth/record_lock_request.proto new file mode 100644 index 000000000..b6513fe18 --- /dev/null +++ b/common/proto3/common/auth/record_lock_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordLockRequest { + repeated string id = 1; + bool lock = 2; +} diff --git a/common/proto3/common/auth/record_owner_change_reply.proto b/common/proto3/common/auth/record_owner_change_reply.proto new file mode 100644 index 000000000..f6598457f --- /dev/null +++ b/common/proto3/common/auth/record_owner_change_reply.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/alloc_data.proto"; +import "messages/task_data.proto"; + +option cc_enable_arenas = true; + +message RecordOwnerChangeReply { + uint32 act_cnt = 1; + uint64 act_size = 2; + uint32 tot_cnt = 3; + repeated AllocData alloc = 4; + TaskData task = 8; +} diff --git a/common/proto3/common/auth/record_owner_change_request.proto b/common/proto3/common/auth/record_owner_change_request.proto new file mode 100644 index 000000000..a0b03137a --- /dev/null +++ b/common/proto3/common/auth/record_owner_change_request.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordOwnerChangeRequest { + repeated string id = 1; + string coll_id = 2; + string repo_id = 3; + string proj_id = 4; + bool check = 5; +} diff --git a/common/proto3/common/auth/record_update_batch_request.proto b/common/proto3/common/auth/record_update_batch_request.proto new file mode 100644 index 000000000..1653e5cc1 --- /dev/null +++ b/common/proto3/common/auth/record_update_batch_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordUpdateBatchRequest { + string records = 1; // JSON array +} diff --git a/common/proto3/common/auth/record_update_request.proto b/common/proto3/common/auth/record_update_request.proto new file mode 100644 index 000000000..b4dfedf29 --- /dev/null +++ b/common/proto3/common/auth/record_update_request.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/dependency_spec_data.proto"; + +option cc_enable_arenas = true; + +message RecordUpdateRequest { + string id = 1; + string title = 2; + string desc = 3; + repeated string tags = 4; + bool tags_clear = 5; + string alias = 6; + string metadata = 7; + bool mdset = 8; + string sch_id = 9; + bool sch_enforce = 10; + string source = 11; + string ext = 12; + bool ext_auto = 13; + repeated DependencySpecData dep_add = 16; + repeated DependencySpecData dep_rem = 17; +} diff --git a/common/proto3/common/auth/record_view_request.proto b/common/proto3/common/auth/record_view_request.proto new file mode 100644 index 000000000..fa6344fbb --- /dev/null +++ b/common/proto3/common/auth/record_view_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordViewRequest { + string id = 1; + bool details = 2; +} diff --git a/common/proto3/common/auth/repo_allocation_create_request.proto b/common/proto3/common/auth/repo_allocation_create_request.proto new file mode 100644 index 000000000..876f87f7f --- /dev/null +++ b/common/proto3/common/auth/repo_allocation_create_request.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoAllocationCreateRequest { + string repo = 1; + string subject = 2; + uint64 data_limit = 3; + uint32 rec_limit = 4; +} diff --git a/common/proto3/common/auth/repo_allocation_create_response.proto b/common/proto3/common/auth/repo_allocation_create_response.proto new file mode 100644 index 000000000..cdd56a18a --- /dev/null +++ b/common/proto3/common/auth/repo_allocation_create_response.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/execution_method.proto"; +import "messages/task_data.proto"; +import "messages/alloc_data.proto"; + +option cc_enable_arenas = true; + +message RepoAllocationCreateResponse { + ExecutionMethod execution_method = 1; + TaskData task = 2; + AllocData result = 3; +} diff --git a/common/proto3/common/auth/repo_allocation_delete_request.proto b/common/proto3/common/auth/repo_allocation_delete_request.proto new file mode 100644 index 000000000..454842e4c --- /dev/null +++ b/common/proto3/common/auth/repo_allocation_delete_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoAllocationDeleteRequest { + string repo = 1; + string subject = 2; +} diff --git a/common/proto3/common/auth/repo_allocation_set_default_request.proto b/common/proto3/common/auth/repo_allocation_set_default_request.proto new file mode 100644 index 000000000..2947fdc43 --- /dev/null +++ b/common/proto3/common/auth/repo_allocation_set_default_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoAllocationSetDefaultRequest { + string repo = 1; + string subject = 2; +} diff --git a/common/proto3/common/auth/repo_allocation_set_request.proto b/common/proto3/common/auth/repo_allocation_set_request.proto new file mode 100644 index 000000000..2a2e883d4 --- /dev/null +++ b/common/proto3/common/auth/repo_allocation_set_request.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoAllocationSetRequest { + string repo = 1; + string subject = 2; + uint64 data_limit = 3; + uint32 rec_limit = 4; +} diff --git a/common/proto3/common/auth/repo_allocation_stats_reply.proto b/common/proto3/common/auth/repo_allocation_stats_reply.proto new file mode 100644 index 000000000..068883291 --- /dev/null +++ b/common/proto3/common/auth/repo_allocation_stats_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/alloc_stats_data.proto"; + +option cc_enable_arenas = true; + +message RepoAllocationStatsReply { + AllocStatsData alloc = 1; +} diff --git a/common/proto3/common/auth/repo_allocation_stats_request.proto b/common/proto3/common/auth/repo_allocation_stats_request.proto new file mode 100644 index 000000000..d76e3e651 --- /dev/null +++ b/common/proto3/common/auth/repo_allocation_stats_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoAllocationStatsRequest { + string repo = 1; + string subject = 2; +} diff --git a/common/proto3/common/auth/repo_allocations_reply.proto b/common/proto3/common/auth/repo_allocations_reply.proto new file mode 100644 index 000000000..fa8a28931 --- /dev/null +++ b/common/proto3/common/auth/repo_allocations_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/alloc_data.proto"; + +option cc_enable_arenas = true; + +message RepoAllocationsReply { + repeated AllocData alloc = 1; +} diff --git a/common/proto3/common/auth/repo_authz_request.proto b/common/proto3/common/auth/repo_authz_request.proto new file mode 100644 index 000000000..2c077e588 --- /dev/null +++ b/common/proto3/common/auth/repo_authz_request.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoAuthzRequest { + string repo = 1; + string client = 2; + string file = 3; + string action = 4; +} diff --git a/common/proto3/common/auth/repo_calc_size_reply.proto b/common/proto3/common/auth/repo_calc_size_reply.proto new file mode 100644 index 000000000..47abbdb87 --- /dev/null +++ b/common/proto3/common/auth/repo_calc_size_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/alloc_stats_data.proto"; + +option cc_enable_arenas = true; + +message RepoCalcSizeReply { + repeated AllocStatsData stats = 1; +} diff --git a/common/proto3/common/auth/repo_calc_size_request.proto b/common/proto3/common/auth/repo_calc_size_request.proto new file mode 100644 index 000000000..69ae02b40 --- /dev/null +++ b/common/proto3/common/auth/repo_calc_size_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoCalcSizeRequest { + bool recurse = 1; + repeated string item = 2; +} diff --git a/common/proto3/common/auth/repo_create_request.proto b/common/proto3/common/auth/repo_create_request.proto new file mode 100644 index 000000000..e4df95856 --- /dev/null +++ b/common/proto3/common/auth/repo_create_request.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoCreateRequest { + string id = 1; + string title = 2; + string desc = 3; + string domain = 5; + string path = 6; + string exp_path = 7; + string address = 8; + string endpoint = 9; + string pub_key = 10; + uint64 capacity = 11; + repeated string admin = 12; + string type = 13; +} diff --git a/common/proto3/common/auth/repo_data_delete_request.proto b/common/proto3/common/auth/repo_data_delete_request.proto new file mode 100644 index 000000000..5a9c5e962 --- /dev/null +++ b/common/proto3/common/auth/repo_data_delete_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/record_data_location.proto"; + +option cc_enable_arenas = true; + +message RepoDataDeleteRequest { + repeated RecordDataLocation loc = 1; +} diff --git a/common/proto3/common/auth/repo_data_get_size_request.proto b/common/proto3/common/auth/repo_data_get_size_request.proto new file mode 100644 index 000000000..ff1333b86 --- /dev/null +++ b/common/proto3/common/auth/repo_data_get_size_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/record_data_location.proto"; + +option cc_enable_arenas = true; + +message RepoDataGetSizeRequest { + repeated RecordDataLocation loc = 1; +} diff --git a/common/proto3/common/auth/repo_data_reply.proto b/common/proto3/common/auth/repo_data_reply.proto new file mode 100644 index 000000000..f4b2875be --- /dev/null +++ b/common/proto3/common/auth/repo_data_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/repo_data.proto"; + +option cc_enable_arenas = true; + +message RepoDataReply { + repeated RepoData repo = 1; +} diff --git a/common/proto3/common/auth/repo_data_size_reply.proto b/common/proto3/common/auth/repo_data_size_reply.proto new file mode 100644 index 000000000..87d4ea370 --- /dev/null +++ b/common/proto3/common/auth/repo_data_size_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/record_data_size.proto"; + +option cc_enable_arenas = true; + +message RepoDataSizeReply { + repeated RecordDataSize size = 1; +} diff --git a/common/proto3/common/auth/repo_delete_request.proto b/common/proto3/common/auth/repo_delete_request.proto new file mode 100644 index 000000000..e83e83c83 --- /dev/null +++ b/common/proto3/common/auth/repo_delete_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoDeleteRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/repo_list_allocations_request.proto b/common/proto3/common/auth/repo_list_allocations_request.proto new file mode 100644 index 000000000..304052578 --- /dev/null +++ b/common/proto3/common/auth/repo_list_allocations_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoListAllocationsRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/repo_list_object_allocations_request.proto b/common/proto3/common/auth/repo_list_object_allocations_request.proto new file mode 100644 index 000000000..ec8ced43b --- /dev/null +++ b/common/proto3/common/auth/repo_list_object_allocations_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoListObjectAllocationsRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/repo_list_request.proto b/common/proto3/common/auth/repo_list_request.proto new file mode 100644 index 000000000..7e904a2bc --- /dev/null +++ b/common/proto3/common/auth/repo_list_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoListRequest { + bool details = 1; + bool all = 2; +} diff --git a/common/proto3/common/auth/repo_list_subject_allocations_request.proto b/common/proto3/common/auth/repo_list_subject_allocations_request.proto new file mode 100644 index 000000000..c5a3f8baf --- /dev/null +++ b/common/proto3/common/auth/repo_list_subject_allocations_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoListSubjectAllocationsRequest { + string subject = 1; + bool stats = 2; +} diff --git a/common/proto3/common/auth/repo_path_create_request.proto b/common/proto3/common/auth/repo_path_create_request.proto new file mode 100644 index 000000000..494122e4f --- /dev/null +++ b/common/proto3/common/auth/repo_path_create_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoPathCreateRequest { + string path = 1; +} diff --git a/common/proto3/common/auth/repo_path_delete_request.proto b/common/proto3/common/auth/repo_path_delete_request.proto new file mode 100644 index 000000000..5935c7612 --- /dev/null +++ b/common/proto3/common/auth/repo_path_delete_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoPathDeleteRequest { + string path = 1; +} diff --git a/common/proto3/common/auth/repo_update_request.proto b/common/proto3/common/auth/repo_update_request.proto new file mode 100644 index 000000000..7218713cd --- /dev/null +++ b/common/proto3/common/auth/repo_update_request.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoUpdateRequest { + string id = 1; + string title = 2; + string desc = 3; + string domain = 5; + string path = 6; + string exp_path = 7; + string address = 8; + string endpoint = 9; + string pub_key = 10; + uint64 capacity = 11; + repeated string admin = 12; + string type = 13; +} diff --git a/common/proto3/common/auth/repo_view_allocation_request.proto b/common/proto3/common/auth/repo_view_allocation_request.proto new file mode 100644 index 000000000..30e244cba --- /dev/null +++ b/common/proto3/common/auth/repo_view_allocation_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoViewAllocationRequest { + string repo = 1; + string subject = 2; +} diff --git a/common/proto3/common/auth/repo_view_request.proto b/common/proto3/common/auth/repo_view_request.proto new file mode 100644 index 000000000..2e8bdd00b --- /dev/null +++ b/common/proto3/common/auth/repo_view_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoViewRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/revoke_credentials_request.proto b/common/proto3/common/auth/revoke_credentials_request.proto new file mode 100644 index 000000000..40dd4f3ba --- /dev/null +++ b/common/proto3/common/auth/revoke_credentials_request.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RevokeCredentialsRequest { +} diff --git a/common/proto3/common/auth/schema_create_request.proto b/common/proto3/common/auth/schema_create_request.proto new file mode 100644 index 000000000..79def7813 --- /dev/null +++ b/common/proto3/common/auth/schema_create_request.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message SchemaCreateRequest { + string id = 1; + string desc = 2; + bool pub = 3; + bool sys = 4; + string def = 5; +} diff --git a/common/proto3/common/auth/schema_data_reply.proto b/common/proto3/common/auth/schema_data_reply.proto new file mode 100644 index 000000000..301de1feb --- /dev/null +++ b/common/proto3/common/auth/schema_data_reply.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/schema_data.proto"; + +option cc_enable_arenas = true; + +message SchemaDataReply { + repeated SchemaData schema = 1; + uint32 offset = 2; + uint32 count = 3; + uint32 total = 4; +} diff --git a/common/proto3/common/auth/schema_delete_request.proto b/common/proto3/common/auth/schema_delete_request.proto new file mode 100644 index 000000000..8d93645d6 --- /dev/null +++ b/common/proto3/common/auth/schema_delete_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message SchemaDeleteRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/schema_revise_request.proto b/common/proto3/common/auth/schema_revise_request.proto new file mode 100644 index 000000000..7eb565b7a --- /dev/null +++ b/common/proto3/common/auth/schema_revise_request.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message SchemaReviseRequest { + string id = 1; + string desc = 2; + bool pub = 3; + bool sys = 4; + string def = 5; +} diff --git a/common/proto3/common/auth/schema_search_request.proto b/common/proto3/common/auth/schema_search_request.proto new file mode 100644 index 000000000..a621e39a4 --- /dev/null +++ b/common/proto3/common/auth/schema_search_request.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/sort_option.proto"; + +option cc_enable_arenas = true; + +message SchemaSearchRequest { + string id = 1; + string text = 2; + string owner = 3; + SortOption sort = 4; + bool sort_rev = 5; + uint32 offset = 6; + uint32 count = 7; +} diff --git a/common/proto3/common/auth/schema_update_request.proto b/common/proto3/common/auth/schema_update_request.proto new file mode 100644 index 000000000..82bf40b5f --- /dev/null +++ b/common/proto3/common/auth/schema_update_request.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message SchemaUpdateRequest { + string id = 1; + string id_new = 2; + string desc = 3; + bool pub = 4; + bool sys = 5; + string def = 6; +} diff --git a/common/proto3/common/auth/schema_view_request.proto b/common/proto3/common/auth/schema_view_request.proto new file mode 100644 index 000000000..a6518eca2 --- /dev/null +++ b/common/proto3/common/auth/schema_view_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message SchemaViewRequest { + string id = 1; + bool resolve = 2; +} diff --git a/common/proto3/common/auth/search_request.proto b/common/proto3/common/auth/search_request.proto new file mode 100644 index 000000000..eaaa72c40 --- /dev/null +++ b/common/proto3/common/auth/search_request.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/search_mode.proto"; +import "enums/sort_option.proto"; + +option cc_enable_arenas = true; + +message SearchRequest { + SearchMode mode = 1; + bool published = 2; + string id = 3; + string text = 4; + repeated string tags = 5; + repeated string cat_tags = 6; + uint32 from = 7; + uint32 to = 8; + string owner = 9; + string creator = 10; + repeated string coll = 11; + string sch_id = 12; + string meta = 13; + bool meta_err = 14; + SortOption sort = 15; + bool sort_rev = 16; + uint32 offset = 17; + uint32 count = 18; +} diff --git a/common/proto3/common/auth/tag_data_reply.proto b/common/proto3/common/auth/tag_data_reply.proto new file mode 100644 index 000000000..d789311c2 --- /dev/null +++ b/common/proto3/common/auth/tag_data_reply.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/tag_data.proto"; + +option cc_enable_arenas = true; + +message TagDataReply { + repeated TagData tag = 1; + uint32 offset = 2; + uint32 count = 3; + uint32 total = 4; +} diff --git a/common/proto3/common/auth/tag_list_by_count_request.proto b/common/proto3/common/auth/tag_list_by_count_request.proto new file mode 100644 index 000000000..91a684a34 --- /dev/null +++ b/common/proto3/common/auth/tag_list_by_count_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message TagListByCountRequest { + uint32 offset = 1; + uint32 count = 2; +} diff --git a/common/proto3/common/auth/tag_search_request.proto b/common/proto3/common/auth/tag_search_request.proto new file mode 100644 index 000000000..fbff65c4b --- /dev/null +++ b/common/proto3/common/auth/tag_search_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message TagSearchRequest { + string name = 1; + uint32 offset = 2; + uint32 count = 3; +} diff --git a/common/proto3/common/auth/task_data_reply.proto b/common/proto3/common/auth/task_data_reply.proto new file mode 100644 index 000000000..543dd944f --- /dev/null +++ b/common/proto3/common/auth/task_data_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/task_data.proto"; + +option cc_enable_arenas = true; + +message TaskDataReply { + repeated TaskData task = 1; +} diff --git a/common/proto3/common/auth/task_list_request.proto b/common/proto3/common/auth/task_list_request.proto new file mode 100644 index 000000000..fbf399e05 --- /dev/null +++ b/common/proto3/common/auth/task_list_request.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/task_status.proto"; + +option cc_enable_arenas = true; + +message TaskListRequest { + uint32 since = 1; + uint32 from = 2; + uint32 to = 3; + repeated TaskStatus status = 4; + uint32 offset = 5; + uint32 count = 6; +} diff --git a/common/proto3/common/auth/task_view_request.proto b/common/proto3/common/auth/task_view_request.proto new file mode 100644 index 000000000..25553ce47 --- /dev/null +++ b/common/proto3/common/auth/task_view_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message TaskViewRequest { + string task_id = 1; +} diff --git a/common/proto3/common/auth/topic_data_reply.proto b/common/proto3/common/auth/topic_data_reply.proto new file mode 100644 index 000000000..593b18ac6 --- /dev/null +++ b/common/proto3/common/auth/topic_data_reply.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/topic_data.proto"; + +option cc_enable_arenas = true; + +message TopicDataReply { + repeated TopicData topic = 1; + uint32 offset = 2; + uint32 count = 3; + uint32 total = 4; +} diff --git a/common/proto3/common/auth/topic_list_topics_request.proto b/common/proto3/common/auth/topic_list_topics_request.proto new file mode 100644 index 000000000..ddadce07a --- /dev/null +++ b/common/proto3/common/auth/topic_list_topics_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message TopicListTopicsRequest { + string topic_id = 1; + uint32 offset = 2; + uint32 count = 3; +} diff --git a/common/proto3/common/auth/topic_search_request.proto b/common/proto3/common/auth/topic_search_request.proto new file mode 100644 index 000000000..90c269264 --- /dev/null +++ b/common/proto3/common/auth/topic_search_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message TopicSearchRequest { + string phrase = 1; +} diff --git a/common/proto3/common/auth/topic_view_request.proto b/common/proto3/common/auth/topic_view_request.proto new file mode 100644 index 000000000..5c16a624e --- /dev/null +++ b/common/proto3/common/auth/topic_view_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message TopicViewRequest { + string id = 1; +} diff --git a/common/proto3/common/auth/user_access_token_reply.proto b/common/proto3/common/auth/user_access_token_reply.proto new file mode 100644 index 000000000..2d2c9f333 --- /dev/null +++ b/common/proto3/common/auth/user_access_token_reply.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserAccessTokenReply { + string access = 1; + uint32 expires_in = 2; + bool needs_consent = 3; +} diff --git a/common/proto3/common/auth/user_create_request.proto b/common/proto3/common/auth/user_create_request.proto new file mode 100644 index 000000000..8fe964982 --- /dev/null +++ b/common/proto3/common/auth/user_create_request.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserCreateRequest { + string uid = 1; + string password = 2; + string name = 3; + string email = 4; + repeated string uuid = 5; + string options = 6; + string secret = 7; +} diff --git a/common/proto3/common/auth/user_data_reply.proto b/common/proto3/common/auth/user_data_reply.proto new file mode 100644 index 000000000..4350bf024 --- /dev/null +++ b/common/proto3/common/auth/user_data_reply.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/user_data.proto"; + +option cc_enable_arenas = true; + +message UserDataReply { + repeated UserData user = 1; + uint32 offset = 2; + uint32 count = 3; + uint32 total = 4; +} diff --git a/common/proto3/common/auth/user_find_by_name_uid_request.proto b/common/proto3/common/auth/user_find_by_name_uid_request.proto new file mode 100644 index 000000000..1bccffee8 --- /dev/null +++ b/common/proto3/common/auth/user_find_by_name_uid_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserFindByNameUIDRequest { + string name_uid = 1; + uint32 offset = 2; + uint32 count = 3; +} diff --git a/common/proto3/common/auth/user_find_by_uuids_request.proto b/common/proto3/common/auth/user_find_by_uuids_request.proto new file mode 100644 index 000000000..5dbccdd10 --- /dev/null +++ b/common/proto3/common/auth/user_find_by_uuids_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserFindByUUIDsRequest { + repeated string uuid = 1; +} diff --git a/common/proto3/common/auth/user_get_access_token_request.proto b/common/proto3/common/auth/user_get_access_token_request.proto new file mode 100644 index 000000000..813bf255e --- /dev/null +++ b/common/proto3/common/auth/user_get_access_token_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserGetAccessTokenRequest { + string collection_id = 1; + string collection_type = 2; +} diff --git a/common/proto3/common/auth/user_get_recent_ep_reply.proto b/common/proto3/common/auth/user_get_recent_ep_reply.proto new file mode 100644 index 000000000..ed3609e84 --- /dev/null +++ b/common/proto3/common/auth/user_get_recent_ep_reply.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserGetRecentEPReply { + repeated string ep = 1; +} diff --git a/common/proto3/common/auth/user_get_recent_ep_request.proto b/common/proto3/common/auth/user_get_recent_ep_request.proto new file mode 100644 index 000000000..357385708 --- /dev/null +++ b/common/proto3/common/auth/user_get_recent_ep_request.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserGetRecentEPRequest { +} diff --git a/common/proto3/common/auth/user_list_all_request.proto b/common/proto3/common/auth/user_list_all_request.proto new file mode 100644 index 000000000..9e409044b --- /dev/null +++ b/common/proto3/common/auth/user_list_all_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserListAllRequest { + uint32 offset = 1; + uint32 count = 2; +} diff --git a/common/proto3/common/auth/user_list_collab_request.proto b/common/proto3/common/auth/user_list_collab_request.proto new file mode 100644 index 000000000..303571242 --- /dev/null +++ b/common/proto3/common/auth/user_list_collab_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserListCollabRequest { + uint32 offset = 1; + uint32 count = 2; +} diff --git a/common/proto3/common/auth/user_set_access_token_request.proto b/common/proto3/common/auth/user_set_access_token_request.proto new file mode 100644 index 000000000..c3e29cd4a --- /dev/null +++ b/common/proto3/common/auth/user_set_access_token_request.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/access_token_type.proto"; + +option cc_enable_arenas = true; + +message UserSetAccessTokenRequest { + string access = 1; + uint32 expires_in = 2; + string refresh = 3; + AccessTokenType type = 4; + string other = 5; +} diff --git a/common/proto3/common/auth/user_set_recent_ep_request.proto b/common/proto3/common/auth/user_set_recent_ep_request.proto new file mode 100644 index 000000000..839b734fa --- /dev/null +++ b/common/proto3/common/auth/user_set_recent_ep_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserSetRecentEPRequest { + repeated string ep = 1; +} diff --git a/common/proto3/common/auth/user_update_request.proto b/common/proto3/common/auth/user_update_request.proto new file mode 100644 index 000000000..bfb2db56b --- /dev/null +++ b/common/proto3/common/auth/user_update_request.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserUpdateRequest { + string uid = 1; + string email = 2; + string password = 3; + string options = 4; +} diff --git a/common/proto3/common/auth/user_view_request.proto b/common/proto3/common/auth/user_view_request.proto new file mode 100644 index 000000000..d404eaf1b --- /dev/null +++ b/common/proto3/common/auth/user_view_request.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message UserViewRequest { + string uid = 1; + bool details = 2; +} diff --git a/common/proto3/common/enums/access_token_type.proto b/common/proto3/common/enums/access_token_type.proto new file mode 100644 index 000000000..8b23ac985 --- /dev/null +++ b/common/proto3/common/enums/access_token_type.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum AccessTokenType { + ACCESS_TOKEN_TYPE_UNSPECIFIED = 0; + ACCESS_TOKEN_TYPE_GENERIC = 1; + ACCESS_TOKEN_TYPE_GLOBUS = 2; + ACCESS_TOKEN_TYPE_GLOBUS_AUTH = 3; + ACCESS_TOKEN_TYPE_GLOBUS_TRANSFER = 4; + ACCESS_TOKEN_TYPE_GLOBUS_DEFAULT = 5; +} diff --git a/common/proto3/common/enums/dependency_dir.proto b/common/proto3/common/enums/dependency_dir.proto new file mode 100644 index 000000000..1cd722921 --- /dev/null +++ b/common/proto3/common/enums/dependency_dir.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum DependencyDir { + DEPENDENCY_DIR_IN = 0; + DEPENDENCY_DIR_OUT = 1; +} diff --git a/common/proto3/common/enums/dependency_type.proto b/common/proto3/common/enums/dependency_type.proto new file mode 100644 index 000000000..795eeff79 --- /dev/null +++ b/common/proto3/common/enums/dependency_type.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum DependencyType { + DEPENDENCY_TYPE_IS_DERIVED_FROM = 0; + DEPENDENCY_TYPE_IS_COMPONENT_OF = 1; + DEPENDENCY_TYPE_IS_NEW_VERSION_OF = 2; +} diff --git a/common/proto3/common/enums/encryption.proto b/common/proto3/common/enums/encryption.proto new file mode 100644 index 000000000..b843452f8 --- /dev/null +++ b/common/proto3/common/enums/encryption.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum Encryption { + ENCRYPTION_NONE = 0; + ENCRYPTION_AVAIL = 1; + ENCRYPTION_FORCE = 2; +} diff --git a/common/proto3/common/enums/error_code.proto b/common/proto3/common/enums/error_code.proto new file mode 100644 index 000000000..2d643629f --- /dev/null +++ b/common/proto3/common/enums/error_code.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum ErrorCode { + ERROR_CODE_UNSPECIFIED = 0; + ERROR_CODE_BAD_REQUEST = 1; + ERROR_CODE_INTERNAL_ERROR = 2; + ERROR_CODE_CLIENT_ERROR = 3; + ERROR_CODE_SERVICE_ERROR = 4; + ERROR_CODE_AUTHN_REQUIRED = 5; + ERROR_CODE_AUTHN_ERROR = 6; + ERROR_CODE_DEST_PATH_ERROR = 7; + ERROR_CODE_DEST_FILE_ERROR = 8; +} diff --git a/common/proto3/common/enums/execution_method.proto b/common/proto3/common/enums/execution_method.proto new file mode 100644 index 000000000..dff667350 --- /dev/null +++ b/common/proto3/common/enums/execution_method.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum ExecutionMethod { + EXECUTION_METHOD_UNSPECIFIED = 0; + EXECUTION_METHOD_DIRECT = 1; + EXECUTION_METHOD_DEFERRED = 2; +} diff --git a/common/proto3/common/enums/note_state.proto b/common/proto3/common/enums/note_state.proto new file mode 100644 index 000000000..d76bae2c1 --- /dev/null +++ b/common/proto3/common/enums/note_state.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum NoteState { + NOTE_STATE_CLOSED = 0; + NOTE_STATE_OPEN = 1; + NOTE_STATE_ACTIVE = 2; +} diff --git a/common/proto3/common/enums/note_type.proto b/common/proto3/common/enums/note_type.proto new file mode 100644 index 000000000..48f8e64fe --- /dev/null +++ b/common/proto3/common/enums/note_type.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum NoteType { + NOTE_TYPE_QUESTION = 0; + NOTE_TYPE_INFO = 1; + NOTE_TYPE_WARN = 2; + NOTE_TYPE_ERROR = 3; +} diff --git a/common/proto3/common/enums/project_role.proto b/common/proto3/common/enums/project_role.proto new file mode 100644 index 000000000..e13d94d35 --- /dev/null +++ b/common/proto3/common/enums/project_role.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum ProjectRole { + PROJECT_ROLE_NO_ROLE = 0; + PROJECT_ROLE_MEMBER = 1; + PROJECT_ROLE_MANAGER = 2; + PROJECT_ROLE_ADMIN = 3; +} diff --git a/common/proto3/common/enums/search_mode.proto b/common/proto3/common/enums/search_mode.proto new file mode 100644 index 000000000..cf6b1f42a --- /dev/null +++ b/common/proto3/common/enums/search_mode.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum SearchMode { + SEARCH_MODE_DATA = 0; + SEARCH_MODE_COLLECTION = 1; +} diff --git a/common/proto3/common/enums/service_status.proto b/common/proto3/common/enums/service_status.proto new file mode 100644 index 000000000..473d784a2 --- /dev/null +++ b/common/proto3/common/enums/service_status.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum ServiceStatus { + SERVICE_STATUS_NORMAL = 0; + SERVICE_STATUS_DEGRADED = 1; + SERVICE_STATUS_FAILED = 2; + SERVICE_STATUS_OFFLINE = 3; +} diff --git a/common/proto3/common/enums/sort_option.proto b/common/proto3/common/enums/sort_option.proto new file mode 100644 index 000000000..11acfb432 --- /dev/null +++ b/common/proto3/common/enums/sort_option.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum SortOption { + SORT_OPTION_ID = 0; + SORT_OPTION_TITLE = 1; + SORT_OPTION_OWNER = 2; + SORT_OPTION_TIME_CREATE = 3; + SORT_OPTION_TIME_UPDATE = 4; + SORT_OPTION_RELEVANCE = 5; +} diff --git a/common/proto3/common/enums/task_command.proto b/common/proto3/common/enums/task_command.proto new file mode 100644 index 000000000..fc4ba3f8e --- /dev/null +++ b/common/proto3/common/enums/task_command.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum TaskCommand { + TASK_COMMAND_STOP = 0; + TASK_COMMAND_RAW_DATA_TRANSFER = 1; + TASK_COMMAND_RAW_DATA_DELETE = 2; + TASK_COMMAND_RAW_DATA_UPDATE_SIZE = 3; + TASK_COMMAND_ALLOC_CREATE = 4; + TASK_COMMAND_ALLOC_DELETE = 5; +} diff --git a/common/proto3/common/enums/task_status.proto b/common/proto3/common/enums/task_status.proto new file mode 100644 index 000000000..25973da5a --- /dev/null +++ b/common/proto3/common/enums/task_status.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum TaskStatus { + TASK_STATUS_BLOCKED = 0; + TASK_STATUS_READY = 1; + TASK_STATUS_RUNNING = 2; + TASK_STATUS_SUCCEEDED = 3; + TASK_STATUS_FAILED = 4; +} diff --git a/common/proto3/common/enums/task_type.proto b/common/proto3/common/enums/task_type.proto new file mode 100644 index 000000000..b6819e403 --- /dev/null +++ b/common/proto3/common/enums/task_type.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +enum TaskType { + TASK_TYPE_DATA_GET = 0; + TASK_TYPE_DATA_PUT = 1; + TASK_TYPE_DATA_DEL = 2; + TASK_TYPE_REC_CHG_ALLOC = 3; + TASK_TYPE_REC_CHG_OWNER = 4; + TASK_TYPE_REC_DEL = 5; + TASK_TYPE_ALLOC_CREATE = 6; + TASK_TYPE_ALLOC_DEL = 7; + TASK_TYPE_USER_DEL = 8; + TASK_TYPE_PROJ_DEL = 9; +} diff --git a/common/proto3/common/envelope.proto b/common/proto3/common/envelope.proto new file mode 100644 index 000000000..20233f220 --- /dev/null +++ b/common/proto3/common/envelope.proto @@ -0,0 +1,393 @@ +// Envelope message wrapping all DataFed protocol messages +// Use has_*() methods in C++ to check which message type is present +// +// Example C++ usage: +// sdms::Envelope env; +// if (env.has_version_request()) { +// // Handle version request +// } else if (env.has_record_create_request()) { +// // Handle record creation +// auto& req = env.record_create_request(); +// } + +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +// Anonymous messages +import "anon/ack_reply.proto"; +import "anon/nack_reply.proto"; +import "anon/version_request.proto"; +import "anon/version_reply.proto"; +import "anon/get_auth_status_request.proto"; +import "anon/authenticate_by_password_request.proto"; +import "anon/authenticate_by_token_request.proto"; +import "anon/auth_status_reply.proto"; +import "anon/daily_message_request.proto"; +import "anon/daily_message_reply.proto"; + +// Auth - Credentials +import "auth/generate_credentials_request.proto"; +import "auth/revoke_credentials_request.proto"; +import "auth/generate_credentials_reply.proto"; + +// Auth - Permissions +import "auth/check_perms_request.proto"; +import "auth/check_perms_reply.proto"; +import "auth/get_perms_request.proto"; +import "auth/get_perms_reply.proto"; + +// Auth - User +import "auth/user_view_request.proto"; +import "auth/user_data_reply.proto"; +import "auth/user_set_access_token_request.proto"; +import "auth/user_get_access_token_request.proto"; +import "auth/user_access_token_reply.proto"; +import "auth/user_create_request.proto"; +import "auth/user_find_by_uuids_request.proto"; +import "auth/user_find_by_name_uid_request.proto"; +import "auth/user_update_request.proto"; +import "auth/user_list_all_request.proto"; +import "auth/user_list_collab_request.proto"; +import "auth/user_get_recent_ep_request.proto"; +import "auth/user_get_recent_ep_reply.proto"; +import "auth/user_set_recent_ep_request.proto"; + +// Auth - General +import "auth/listing_reply.proto"; + +// Auth - Record +import "auth/record_list_by_alloc_request.proto"; +import "auth/record_view_request.proto"; +import "auth/record_data_reply.proto"; +import "auth/record_create_request.proto"; +import "auth/record_create_batch_request.proto"; +import "auth/record_update_request.proto"; +import "auth/record_update_batch_request.proto"; +import "auth/record_export_request.proto"; +import "auth/record_export_reply.proto"; +import "auth/record_lock_request.proto"; +import "auth/record_delete_request.proto"; +import "auth/record_get_dependency_graph_request.proto"; +import "auth/record_alloc_change_request.proto"; +import "auth/record_alloc_change_reply.proto"; +import "auth/record_owner_change_request.proto"; +import "auth/record_owner_change_reply.proto"; + +// Auth - Data +import "auth/data_get_request.proto"; +import "auth/data_put_request.proto"; +import "auth/data_get_reply.proto"; +import "auth/data_put_reply.proto"; +import "auth/data_delete_request.proto"; +import "auth/data_path_request.proto"; +import "auth/data_path_reply.proto"; + +// Auth - Search +import "auth/search_request.proto"; + +// Auth - Collection +import "auth/coll_view_request.proto"; +import "auth/coll_data_reply.proto"; +import "auth/coll_read_request.proto"; +import "auth/coll_create_request.proto"; +import "auth/coll_update_request.proto"; +import "auth/coll_delete_request.proto"; +import "auth/coll_write_request.proto"; +import "auth/coll_move_request.proto"; +import "auth/coll_get_parents_request.proto"; +import "auth/coll_path_reply.proto"; +import "auth/coll_get_offset_request.proto"; +import "auth/coll_get_offset_reply.proto"; +import "auth/coll_list_published_request.proto"; + +// Auth - Group +import "auth/group_create_request.proto"; +import "auth/group_update_request.proto"; +import "auth/group_data_reply.proto"; +import "auth/group_delete_request.proto"; +import "auth/group_list_request.proto"; +import "auth/group_view_request.proto"; + +// Auth - ACL +import "auth/acl_view_request.proto"; +import "auth/acl_update_request.proto"; +import "auth/acl_shared_list_request.proto"; +import "auth/acl_shared_list_items_request.proto"; +import "auth/acl_data_reply.proto"; + +// Auth - Project +import "auth/project_view_request.proto"; +import "auth/project_data_reply.proto"; +import "auth/project_create_request.proto"; +import "auth/project_update_request.proto"; +import "auth/project_delete_request.proto"; +import "auth/project_list_request.proto"; +import "auth/project_search_request.proto"; +import "auth/project_get_role_request.proto"; +import "auth/project_get_role_reply.proto"; + +// Auth - Repository +import "auth/repo_data_delete_request.proto"; +import "auth/repo_data_get_size_request.proto"; +import "auth/repo_data_size_reply.proto"; +import "auth/repo_path_create_request.proto"; +import "auth/repo_path_delete_request.proto"; +import "auth/repo_list_request.proto"; +import "auth/repo_view_request.proto"; +import "auth/repo_create_request.proto"; +import "auth/repo_update_request.proto"; +import "auth/repo_delete_request.proto"; +import "auth/repo_data_reply.proto"; +import "auth/repo_calc_size_request.proto"; +import "auth/repo_calc_size_reply.proto"; +import "auth/repo_list_allocations_request.proto"; +import "auth/repo_list_subject_allocations_request.proto"; +import "auth/repo_list_object_allocations_request.proto"; +import "auth/repo_view_allocation_request.proto"; +import "auth/repo_allocations_reply.proto"; +import "auth/repo_allocation_stats_request.proto"; +import "auth/repo_allocation_stats_reply.proto"; +import "auth/repo_allocation_create_request.proto"; +import "auth/repo_allocation_create_response.proto"; +import "auth/repo_allocation_set_request.proto"; +import "auth/repo_allocation_set_default_request.proto"; +import "auth/repo_allocation_delete_request.proto"; +import "auth/repo_authz_request.proto"; + +// Auth - Query +import "auth/query_create_request.proto"; +import "auth/query_update_request.proto"; +import "auth/query_delete_request.proto"; +import "auth/query_list_request.proto"; +import "auth/query_view_request.proto"; +import "auth/query_exec_request.proto"; +import "auth/query_data_reply.proto"; + +// Auth - Note +import "auth/note_list_by_subject_request.proto"; +import "auth/note_view_request.proto"; +import "auth/note_create_request.proto"; +import "auth/note_update_request.proto"; +import "auth/note_comment_edit_request.proto"; +import "auth/note_data_reply.proto"; + +// Auth - Task +import "auth/task_view_request.proto"; +import "auth/task_list_request.proto"; +import "auth/task_data_reply.proto"; + +// Auth - Tag +import "auth/tag_search_request.proto"; +import "auth/tag_list_by_count_request.proto"; +import "auth/tag_data_reply.proto"; + +// Auth - Schema +import "auth/metadata_validate_request.proto"; +import "auth/metadata_validate_reply.proto"; +import "auth/schema_view_request.proto"; +import "auth/schema_search_request.proto"; +import "auth/schema_data_reply.proto"; +import "auth/schema_create_request.proto"; +import "auth/schema_update_request.proto"; +import "auth/schema_revise_request.proto"; +import "auth/schema_delete_request.proto"; + +// Auth - Topic +import "auth/topic_list_topics_request.proto"; +import "auth/topic_view_request.proto"; +import "auth/topic_search_request.proto"; +import "auth/topic_data_reply.proto"; + +// Envelope message containing all possible request/reply types +// Each field is optional - use has_*() to check which is set +message Envelope { + oneof payload { + // Anonymous messages (10-29) + AckReply ack_reply = 10; + NackReply nack_reply = 11; + VersionRequest version_request = 12; + VersionReply version_reply = 13; + GetAuthStatusRequest get_auth_status_request = 14; + AuthenticateByPasswordRequest authenticate_by_password_request = 15; + AuthenticateByTokenRequest authenticate_by_token_request = 16; + AuthStatusReply auth_status_reply = 17; + DailyMessageRequest daily_message_request = 18; + DailyMessageReply daily_message_reply = 19; + + // Credentials (100-109) + GenerateCredentialsRequest generate_credentials_request = 100; + RevokeCredentialsRequest revoke_credentials_request = 101; + GenerateCredentialsReply generate_credentials_reply = 102; + + // Permissions (110-119) + CheckPermsRequest check_perms_request = 110; + CheckPermsReply check_perms_reply = 111; + GetPermsRequest get_perms_request = 112; + GetPermsReply get_perms_reply = 113; + + // User (120-149) + UserViewRequest user_view_request = 120; + UserDataReply user_data_reply = 121; + UserSetAccessTokenRequest user_set_access_token_request = 122; + UserGetAccessTokenRequest user_get_access_token_request = 123; + UserAccessTokenReply user_access_token_reply = 124; + UserCreateRequest user_create_request = 125; + UserFindByUUIDsRequest user_find_by_uuids_request = 126; + UserFindByNameUIDRequest user_find_by_name_uid_request = 127; + UserUpdateRequest user_update_request = 128; + UserListAllRequest user_list_all_request = 129; + UserListCollabRequest user_list_collab_request = 130; + UserGetRecentEPRequest user_get_recent_ep_request = 131; + UserGetRecentEPReply user_get_recent_ep_reply = 132; + UserSetRecentEPRequest user_set_recent_ep_request = 133; + + // General (150-159) + ListingReply listing_reply = 150; + + // Record (200-249) + RecordListByAllocRequest record_list_by_alloc_request = 200; + RecordViewRequest record_view_request = 201; + RecordDataReply record_data_reply = 202; + RecordCreateRequest record_create_request = 203; + RecordCreateBatchRequest record_create_batch_request = 204; + RecordUpdateRequest record_update_request = 205; + RecordUpdateBatchRequest record_update_batch_request = 206; + RecordExportRequest record_export_request = 207; + RecordExportReply record_export_reply = 208; + RecordLockRequest record_lock_request = 209; + RecordDeleteRequest record_delete_request = 210; + RecordGetDependencyGraphRequest record_get_dependency_graph_request = 211; + RecordAllocChangeRequest record_alloc_change_request = 212; + RecordAllocChangeReply record_alloc_change_reply = 213; + RecordOwnerChangeRequest record_owner_change_request = 214; + RecordOwnerChangeReply record_owner_change_reply = 215; + + // Data (250-269) + DataGetRequest data_get_request = 250; + DataPutRequest data_put_request = 251; + DataGetReply data_get_reply = 252; + DataPutReply data_put_reply = 253; + DataDeleteRequest data_delete_request = 254; + DataPathRequest data_path_request = 255; + DataPathReply data_path_reply = 256; + + // Search (270-279) + SearchRequest search_request = 270; + + // Collection (300-349) + CollViewRequest coll_view_request = 300; + CollDataReply coll_data_reply = 301; + CollReadRequest coll_read_request = 302; + CollCreateRequest coll_create_request = 303; + CollUpdateRequest coll_update_request = 304; + CollDeleteRequest coll_delete_request = 305; + CollWriteRequest coll_write_request = 306; + CollMoveRequest coll_move_request = 307; + CollGetParentsRequest coll_get_parents_request = 308; + CollPathReply coll_path_reply = 309; + CollGetOffsetRequest coll_get_offset_request = 310; + CollGetOffsetReply coll_get_offset_reply = 311; + CollListPublishedRequest coll_list_published_request = 312; + + // Group (350-369) + GroupCreateRequest group_create_request = 350; + GroupUpdateRequest group_update_request = 351; + GroupDataReply group_data_reply = 352; + GroupDeleteRequest group_delete_request = 353; + GroupListRequest group_list_request = 354; + GroupViewRequest group_view_request = 355; + + // ACL (370-389) + ACLViewRequest acl_view_request = 370; + ACLUpdateRequest acl_update_request = 371; + ACLSharedListRequest acl_shared_list_request = 372; + ACLSharedListItemsRequest acl_shared_list_items_request = 373; + ACLDataReply acl_data_reply = 374; + + // Project (400-429) + ProjectViewRequest project_view_request = 400; + ProjectDataReply project_data_reply = 401; + ProjectCreateRequest project_create_request = 402; + ProjectUpdateRequest project_update_request = 403; + ProjectDeleteRequest project_delete_request = 404; + ProjectListRequest project_list_request = 405; + ProjectSearchRequest project_search_request = 406; + ProjectGetRoleRequest project_get_role_request = 407; + ProjectGetRoleReply project_get_role_reply = 408; + + // Repository (450-519) + RepoDataDeleteRequest repo_data_delete_request = 450; + RepoDataGetSizeRequest repo_data_get_size_request = 451; + RepoDataSizeReply repo_data_size_reply = 452; + RepoPathCreateRequest repo_path_create_request = 453; + RepoPathDeleteRequest repo_path_delete_request = 454; + RepoListRequest repo_list_request = 455; + RepoViewRequest repo_view_request = 456; + RepoCreateRequest repo_create_request = 457; + RepoUpdateRequest repo_update_request = 458; + RepoDeleteRequest repo_delete_request = 459; + RepoDataReply repo_data_reply = 460; + RepoCalcSizeRequest repo_calc_size_request = 461; + RepoCalcSizeReply repo_calc_size_reply = 462; + RepoListAllocationsRequest repo_list_allocations_request = 463; + RepoListSubjectAllocationsRequest repo_list_subject_allocations_request = 464; + RepoListObjectAllocationsRequest repo_list_object_allocations_request = 465; + RepoViewAllocationRequest repo_view_allocation_request = 466; + RepoAllocationsReply repo_allocations_reply = 467; + RepoAllocationStatsRequest repo_allocation_stats_request = 468; + RepoAllocationStatsReply repo_allocation_stats_reply = 469; + RepoAllocationCreateRequest repo_allocation_create_request = 470; + RepoAllocationCreateResponse repo_allocation_create_response = 471; + RepoAllocationSetRequest repo_allocation_set_request = 472; + RepoAllocationSetDefaultRequest repo_allocation_set_default_request = 473; + RepoAllocationDeleteRequest repo_allocation_delete_request = 474; + RepoAuthzRequest repo_authz_request = 475; + + // Query (520-539) + QueryCreateRequest query_create_request = 520; + QueryUpdateRequest query_update_request = 521; + QueryDeleteRequest query_delete_request = 522; + QueryListRequest query_list_request = 523; + QueryViewRequest query_view_request = 524; + QueryExecRequest query_exec_request = 525; + QueryDataReply query_data_reply = 526; + + // Note (540-559) + NoteListBySubjectRequest note_list_by_subject_request = 540; + NoteViewRequest note_view_request = 541; + NoteCreateRequest note_create_request = 542; + NoteUpdateRequest note_update_request = 543; + NoteCommentEditRequest note_comment_edit_request = 544; + NoteDataReply note_data_reply = 545; + + // Task (560-579) + TaskViewRequest task_view_request = 560; + TaskListRequest task_list_request = 561; + TaskDataReply task_data_reply = 562; + + // Tag (580-599) + TagSearchRequest tag_search_request = 580; + TagListByCountRequest tag_list_by_count_request = 581; + TagDataReply tag_data_reply = 582; + + // Schema (600-629) + MetadataValidateRequest metadata_validate_request = 600; + MetadataValidateReply metadata_validate_reply = 601; + SchemaViewRequest schema_view_request = 602; + SchemaSearchRequest schema_search_request = 603; + SchemaDataReply schema_data_reply = 604; + SchemaCreateRequest schema_create_request = 605; + SchemaUpdateRequest schema_update_request = 606; + SchemaReviseRequest schema_revise_request = 607; + SchemaDeleteRequest schema_delete_request = 608; + + // Topic (630-649) + TopicListTopicsRequest topic_list_topics_request = 630; + TopicViewRequest topic_view_request = 631; + TopicSearchRequest topic_search_request = 632; + TopicDataReply topic_data_reply = 633; + } +} diff --git a/common/proto3/common/messages/acl_rule.proto b/common/proto3/common/messages/acl_rule.proto new file mode 100644 index 000000000..0c30977b1 --- /dev/null +++ b/common/proto3/common/messages/acl_rule.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message ACLRule { + string id = 1; + uint32 grant = 2; + uint32 inhgrant = 3; +} diff --git a/common/proto3/common/messages/alloc_data.proto b/common/proto3/common/messages/alloc_data.proto new file mode 100644 index 000000000..f57089501 --- /dev/null +++ b/common/proto3/common/messages/alloc_data.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/alloc_stats_data.proto"; + +option cc_enable_arenas = true; + +message AllocData { + string repo = 1; + uint64 data_limit = 2; + uint64 data_size = 3; + uint32 rec_limit = 4; + uint32 rec_count = 5; + string path = 6; + string id = 7; + bool is_def = 8; + AllocStatsData stats = 9; + string repo_type = 10; +} diff --git a/common/proto3/common/messages/alloc_stats_data.proto b/common/proto3/common/messages/alloc_stats_data.proto new file mode 100644 index 000000000..5a62c8d10 --- /dev/null +++ b/common/proto3/common/messages/alloc_stats_data.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message AllocStatsData { + string repo = 1; + uint32 rec_count = 2; + uint32 file_count = 3; + uint64 data_size = 4; + repeated uint32 histogram = 5; +} diff --git a/common/proto3/common/messages/coll_data.proto b/common/proto3/common/messages/coll_data.proto new file mode 100644 index 000000000..f865483ed --- /dev/null +++ b/common/proto3/common/messages/coll_data.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message CollData { + string id = 1; + string title = 2; + string alias = 3; + string desc = 4; + repeated string tags = 5; + string topic = 6; + string owner = 7; + string creator = 8; + uint32 ct = 9; + uint32 ut = 10; + string parent_id = 11; + uint32 notes = 12; +} diff --git a/common/proto3/common/messages/dependency_data.proto b/common/proto3/common/messages/dependency_data.proto new file mode 100644 index 000000000..9a598c7cd --- /dev/null +++ b/common/proto3/common/messages/dependency_data.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/dependency_type.proto"; +import "enums/dependency_dir.proto"; + +option cc_enable_arenas = true; + +message DependencyData { + string id = 1; + string alias = 2; + uint32 notes = 3; + DependencyType type = 4; + DependencyDir dir = 5; +} diff --git a/common/proto3/common/messages/dependency_spec_data.proto b/common/proto3/common/messages/dependency_spec_data.proto new file mode 100644 index 000000000..91a4afae9 --- /dev/null +++ b/common/proto3/common/messages/dependency_spec_data.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/dependency_type.proto"; + +option cc_enable_arenas = true; + +message DependencySpecData { + string id = 1; + DependencyType type = 2; +} diff --git a/common/proto3/common/messages/group_data.proto b/common/proto3/common/messages/group_data.proto new file mode 100644 index 000000000..26773a99c --- /dev/null +++ b/common/proto3/common/messages/group_data.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message GroupData { + string uid = 1; + string gid = 2; + string title = 3; + string desc = 4; + repeated string member = 5; +} diff --git a/common/proto3/common/messages/listing_data.proto b/common/proto3/common/messages/listing_data.proto new file mode 100644 index 000000000..4e1a43aba --- /dev/null +++ b/common/proto3/common/messages/listing_data.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/dependency_data.proto"; + +option cc_enable_arenas = true; + +message ListingData { + string id = 1; + string title = 2; + string alias = 3; + bool locked = 4; + string owner = 5; + string creator = 6; + double size = 7; + bool external = 8; + uint32 notes = 9; + int32 gen = 10; + bool deps_avail = 11; + repeated DependencyData dep = 12; + string owner_name = 13; + string desc = 14; +} diff --git a/common/proto3/common/messages/note_comment.proto b/common/proto3/common/messages/note_comment.proto new file mode 100644 index 000000000..5cb7f40a2 --- /dev/null +++ b/common/proto3/common/messages/note_comment.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/note_type.proto"; +import "enums/note_state.proto"; + +option cc_enable_arenas = true; + +message NoteComment { + string user = 1; + uint32 time = 2; + string comment = 3; + NoteType type = 4; + NoteState state = 5; +} diff --git a/common/proto3/common/messages/note_data.proto b/common/proto3/common/messages/note_data.proto new file mode 100644 index 000000000..33c5166f3 --- /dev/null +++ b/common/proto3/common/messages/note_data.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/note_type.proto"; +import "enums/note_state.proto"; +import "messages/note_comment.proto"; + +option cc_enable_arenas = true; + +message NoteData { + string id = 1; + NoteType type = 2; + NoteState state = 3; + string subject_id = 4; + string title = 5; + repeated NoteComment comment = 6; + uint32 ct = 7; + uint32 ut = 8; + string parent_id = 9; + bool has_child = 10; +} diff --git a/common/proto3/common/messages/path_data.proto b/common/proto3/common/messages/path_data.proto new file mode 100644 index 000000000..d340ca25d --- /dev/null +++ b/common/proto3/common/messages/path_data.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/listing_data.proto"; + +option cc_enable_arenas = true; + +message PathData { + repeated ListingData item = 1; +} diff --git a/common/proto3/common/messages/project_data.proto b/common/proto3/common/messages/project_data.proto new file mode 100644 index 000000000..21e54f65d --- /dev/null +++ b/common/proto3/common/messages/project_data.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/alloc_data.proto"; + +option cc_enable_arenas = true; + +message ProjectData { + string id = 1; + string title = 2; + string desc = 3; + string owner = 4; + uint32 ct = 5; + uint32 ut = 6; + repeated string admin = 7; + repeated string member = 8; + repeated AllocData alloc = 9; +} diff --git a/common/proto3/common/messages/record_data.proto b/common/proto3/common/messages/record_data.proto new file mode 100644 index 000000000..dd3d15c1e --- /dev/null +++ b/common/proto3/common/messages/record_data.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/dependency_data.proto"; + +option cc_enable_arenas = true; + +message RecordData { + string id = 1; + string title = 2; + string alias = 3; + string desc = 4; + repeated string tags = 5; + string metadata = 6; + bool external = 7; + string source = 8; + string repo_id = 9; + double size = 10; + string ext = 11; + bool ext_auto = 12; + uint32 ct = 13; + uint32 ut = 14; + uint32 dt = 15; + string owner = 16; + string creator = 17; + bool locked = 18; + string parent_id = 19; + repeated DependencyData deps = 20; + uint32 notes = 21; + string md_err_msg = 22; + string sch_id = 23; + uint32 sch_ver = 24; +} diff --git a/common/proto3/common/messages/record_data_location.proto b/common/proto3/common/messages/record_data_location.proto new file mode 100644 index 000000000..d4f19a891 --- /dev/null +++ b/common/proto3/common/messages/record_data_location.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordDataLocation { + string id = 1; + string path = 2; +} diff --git a/common/proto3/common/messages/record_data_size.proto b/common/proto3/common/messages/record_data_size.proto new file mode 100644 index 000000000..2a6821703 --- /dev/null +++ b/common/proto3/common/messages/record_data_size.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RecordDataSize { + string id = 1; + double size = 2; +} diff --git a/common/proto3/common/messages/repo_data.proto b/common/proto3/common/messages/repo_data.proto new file mode 100644 index 000000000..765c09d35 --- /dev/null +++ b/common/proto3/common/messages/repo_data.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message RepoData { + string id = 1; + string title = 2; + string desc = 3; + uint64 capacity = 4; + string pub_key = 5; + string address = 6; + string endpoint = 7; + string path = 8; + string domain = 9; + string exp_path = 10; + repeated string admin = 11; + string type = 12; +} diff --git a/common/proto3/common/messages/repo_record_data_locations.proto b/common/proto3/common/messages/repo_record_data_locations.proto new file mode 100644 index 000000000..7a4a7edab --- /dev/null +++ b/common/proto3/common/messages/repo_record_data_locations.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/record_data_location.proto"; + +option cc_enable_arenas = true; + +message RepoRecordDataLocations { + string repo_id = 1; + repeated RecordDataLocation loc = 2; +} diff --git a/common/proto3/common/messages/schema_data.proto b/common/proto3/common/messages/schema_data.proto new file mode 100644 index 000000000..d4153eb61 --- /dev/null +++ b/common/proto3/common/messages/schema_data.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message SchemaData { + string id = 1; + uint32 ver = 2; + bool depr = 3; + uint32 cnt = 4; + bool ref = 5; + string own_id = 6; + string own_nm = 7; + string desc = 8; + bool pub = 9; + string def = 10; + repeated SchemaData uses = 11; + repeated SchemaData used_by = 12; +} diff --git a/common/proto3/common/messages/tag_data.proto b/common/proto3/common/messages/tag_data.proto new file mode 100644 index 000000000..6dfdb2e69 --- /dev/null +++ b/common/proto3/common/messages/tag_data.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message TagData { + string name = 1; + uint64 count = 2; +} diff --git a/common/proto3/common/messages/task_data.proto b/common/proto3/common/messages/task_data.proto new file mode 100644 index 000000000..2c569692c --- /dev/null +++ b/common/proto3/common/messages/task_data.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package SDMS; + +import "enums/task_type.proto"; +import "enums/task_status.proto"; + +option cc_enable_arenas = true; + +message TaskData { + string id = 1; + TaskType type = 2; + TaskStatus status = 3; + string client = 4; + uint32 step = 5; + uint32 steps = 6; + string msg = 7; + uint32 ct = 8; + uint32 ut = 9; + string source = 10; + string dest = 11; +} diff --git a/common/proto3/common/messages/topic_data.proto b/common/proto3/common/messages/topic_data.proto new file mode 100644 index 000000000..56e6cafb9 --- /dev/null +++ b/common/proto3/common/messages/topic_data.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package SDMS; + +option cc_enable_arenas = true; + +message TopicData { + string id = 1; + string title = 2; + repeated TopicData path = 3; + string desc = 4; + string creator = 5; + bool admin = 6; + uint32 coll_cnt = 7; +} diff --git a/common/proto3/common/messages/user_data.proto b/common/proto3/common/messages/user_data.proto new file mode 100644 index 000000000..9b35f1f5d --- /dev/null +++ b/common/proto3/common/messages/user_data.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package SDMS; + +import "messages/alloc_data.proto"; + +option cc_enable_arenas = true; + +message UserData { + string uid = 1; + string name_last = 2; + string name_first = 3; + string email = 4; + string options = 5; + bool is_admin = 6; + bool is_repo_admin = 7; + repeated string ident = 8; + repeated AllocData alloc = 9; +} From f1ed2f9dee33dd7661d805f63cdef0ad4291433d Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Tue, 3 Feb 2026 03:02:05 -0500 Subject: [PATCH 50/65] updated submodules --- external/DataFedDependencies | 2 +- external/globus-connect-server-deploy | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/external/DataFedDependencies b/external/DataFedDependencies index fe59a393f..0e9a5bb75 160000 --- a/external/DataFedDependencies +++ b/external/DataFedDependencies @@ -1 +1 @@ -Subproject commit fe59a393f54d3aa1b8bf551f97d274b762bf93d2 +Subproject commit 0e9a5bb75558d09ac50ac632160fc8f08e5fc873 diff --git a/external/globus-connect-server-deploy b/external/globus-connect-server-deploy index ff7167860..436b396c4 160000 --- a/external/globus-connect-server-deploy +++ b/external/globus-connect-server-deploy @@ -1 +1 @@ -Subproject commit ff7167860345e9b994110dfabdb251fe4dea8c00 +Subproject commit 436b396c4da6d141c9c0534b297b5e43cc9ac35c From 51df50ed45c3be09d6c8d5559db8f8c685e6b88d Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Wed, 4 Feb 2026 09:01:02 -0500 Subject: [PATCH 51/65] [DAPS-1839] - Fix: ci image retag (#1840) * [DAPS-1825] - upgrade (dependencies) pin to new datafed dependencies submodule with updated openssl (#1826) --- .gitlab/build/build_core_image.yml | 53 ---------- .gitlab/build/build_foxx_image.yml | 56 ---------- .gitlab/build/build_gcs_base_image.yml | 73 ------------- .gitlab/build/build_gcs_image.yml | 88 ---------------- .gitlab/build/build_repo_image.yml | 55 ---------- .gitlab/build/build_ws_image.yml | 57 ----------- .gitlab/build/retag_core_image.yml | 18 ++++ .gitlab/build/retag_foxx_image.yml | 18 ++++ .gitlab/build/retag_gcs_base_image.yml | 18 ++++ .gitlab/build/retag_gcs_image.yml | 19 ++++ .gitlab/build/retag_repo_image.yml | 18 ++++ .gitlab/build/retag_ws_image.yml | 20 ++++ .gitlab/common.yml | 135 ++++++++++++++++++++++--- .gitlab/stage_image_check.yml | 8 +- external/DataFedDependencies | 2 +- 15 files changed, 239 insertions(+), 399 deletions(-) delete mode 100644 .gitlab/build/build_core_image.yml delete mode 100644 .gitlab/build/build_foxx_image.yml delete mode 100644 .gitlab/build/build_gcs_base_image.yml delete mode 100644 .gitlab/build/build_gcs_image.yml delete mode 100644 .gitlab/build/build_repo_image.yml delete mode 100644 .gitlab/build/build_ws_image.yml create mode 100644 .gitlab/build/retag_core_image.yml create mode 100644 .gitlab/build/retag_foxx_image.yml create mode 100644 .gitlab/build/retag_gcs_base_image.yml create mode 100644 .gitlab/build/retag_gcs_image.yml create mode 100644 .gitlab/build/retag_repo_image.yml create mode 100644 .gitlab/build/retag_ws_image.yml diff --git a/.gitlab/build/build_core_image.yml b/.gitlab/build/build_core_image.yml deleted file mode 100644 index fcc80d443..000000000 --- a/.gitlab/build/build_core_image.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -stages: - - build - -include: - - local: .gitlab/common.yml - -build-core: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "core" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "core/docker/Dockerfile" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - ci-datafed-core - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - core/**/* - - common/**/* - - CMakeLists.txt - - cmake/**/* - - .gitlab-ci.yml - when: on_success - -retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "core" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - core/**/* - - common/**/* - - CMakeLists.txt - - cmake/**/* - - .gitlab-ci.yml - when: never - - when: on_success diff --git a/.gitlab/build/build_foxx_image.yml b/.gitlab/build/build_foxx_image.yml deleted file mode 100644 index c0b45a74a..000000000 --- a/.gitlab/build/build_foxx_image.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -stages: - - build - -include: - - local: .gitlab/common.yml - -build-foxx: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "foxx" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "docker/Dockerfile.foxx" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - cmake/**/* - - core/database/**/* - - core/CMakeLists.txt - - common/proto/**/* - - .gitlab-ci.yml - - .gitlab/**/* - - CMakeLists.txt - when: on_success - -retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "foxx" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - cmake/**/* - - core/database/**/* - - core/CMakeLists.txt - - common/proto/**/* - - .gitlab-ci.yml - - .gitlab/**/* - - CMakeLists.txt - when: never - - when: on_success diff --git a/.gitlab/build/build_gcs_base_image.yml b/.gitlab/build/build_gcs_base_image.yml deleted file mode 100644 index a437f7502..000000000 --- a/.gitlab/build/build_gcs_base_image.yml +++ /dev/null @@ -1,73 +0,0 @@ ---- -stages: - - build - -include: - - local: .gitlab/common.yml - -build-gcs-base: - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "gcs-base" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - GCS_BASE_IMAGE_DISTRO: "debian-12" - tags: - - ci-datafed-globus - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/**/* - - .gitlab-ci.yml - - CMakeLists.txt - - cmake/**/* - when: on_success - script: - - BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') - - echo "$BRANCH_LOWER" - - source "scripts/dependency_versions.sh" - - cd "external/globus-connect-server-deploy/docker" - - git checkout "$DATAFED_GCS_SUBMODULE_VERSION" - - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - docker build --no-cache --progress plain -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" - < "./docker-files/Dockerfile.${GCS_BASE_IMAGE_DISTRO}" - - docker tag "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:$CI_COMMIT_SHA" - - export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${BRANCH_LOWER}" - - export DATAFED_HARBOR_USERNAME="${HARBOR_USER}" - - export DATAFED_HARBOR_PASSWORD="${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - - cd "${CI_PROJECT_DIR}" - - | - while [ "$(${CI_PROJECT_DIR}/scripts/ci_harbor_artifact_count.sh -r ${DATAFED_HARBOR_REPOSITORY})" == "0" ]; do - echo "Artifact missing from harbor..." - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - sleep 5 # Optional: Add a sleep to avoid busy waiting - done - - cat "${CI_PROJECT_DIR}/harbor_check.log" - -retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "gcs-base" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/**/* - - .gitlab-ci.yml - - CMakeLists.txt - - cmake/**/* - when: never - - when: on_success diff --git a/.gitlab/build/build_gcs_image.yml b/.gitlab/build/build_gcs_image.yml deleted file mode 100644 index 4d53e04eb..000000000 --- a/.gitlab/build/build_gcs_image.yml +++ /dev/null @@ -1,88 +0,0 @@ ---- -stages: - - build - -include: - - local: .gitlab/common.yml - -build-gcs: - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "gcs" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - ci-datafed-globus - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/**/* - - .gitlab-ci.yml - - .gitlab/build/build_gcs_base_image.sh - - .gitlab/build/build_gcs_image.sh - - .gitlab/stage_build_base.sh - - external/globus-connect-server/**/* - - CMakeLists.txt - - cmake/**/* - - repository/docker/entrypoint_authz.sh - - repository/docker/Dockerfile.gcs - - repository/CMakeLists.txt - - repository/gridftp/**/* - when: on_success - script: - - BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') - - echo "$BRANCH_LOWER" - - "${CI_PROJECT_DIR}/scripts/generate_datafed.sh" - - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - DOWNSTREAM_SHA=$( git submodule status ./external/DataFedDependencies/ | awk '{print $1}' ) - - DOWNSTREAM_SHA=${DOWNSTREAM_SHA#-} - - docker build --build-arg DEPENDENCIES="${REGISTRY}/datafed/dependencies:$DOWNSTREAM_SHA" --build-arg RUNTIME="${REGISTRY}/datafed/runtime-${BRANCH_LOWER}:latest" --build-arg GCS_IMAGE="${REGISTRY}/datafed/gcs-base-${BRANCH_LOWER}:latest" -f repository/docker/Dockerfile.gcs -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" . - - docker tag "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:$CI_COMMIT_SHA" - - export DATAFED_HARBOR_REPOSITORY="${COMPONENT}-${BRANCH_LOWER}" - - export DATAFED_HARBOR_USERNAME="${HARBOR_USER}" - - export DATAFED_HARBOR_PASSWORD="${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - - | - while [ "$(${CI_PROJECT_DIR}/scripts/ci_harbor_artifact_count.sh -r ${DATAFED_HARBOR_REPOSITORY})" == "0" ]; do - echo "Artifact missing from harbor..." - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:latest" - docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" - sleep 5 # Optional: Add a sleep to avoid busy waiting - done - - cat "${CI_PROJECT_DIR}/harbor_check.log" - -retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "gcs" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - ci-datafed-globus - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/**/* - - .gitlab-ci.yml - - .gitlab/build/build_gcs_base_image.sh - - .gitlab/build/build_gcs_image.sh - - .gitlab/stage_build_base.sh - - external/globus-connect-server/**/* - - CMakeLists.txt - - cmake/**/* - - repository/docker/entrypoint_authz.sh - - repository/docker/Dockerfile.gcs - - repository/CMakeLists.txt - - repository/gridftp/**/* - when: never - - when: on_success diff --git a/.gitlab/build/build_repo_image.yml b/.gitlab/build/build_repo_image.yml deleted file mode 100644 index 9ee9fd4b5..000000000 --- a/.gitlab/build/build_repo_image.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -stages: - - build - -include: - - local: .gitlab/common.yml - -build-repo: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "repo" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "repository/docker/Dockerfile" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - ci-datafed-repo - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/proto/**/* - - .gitlab-ci.yml - - CMakeLists.txt - - cmake/**/* - - repository/CMakeLists.txt - - repository/server/**/* - when: on_success - -retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "repo" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "FALSE" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - common/proto/**/* - - .gitlab-ci.yml - - CMakeLists.txt - - cmake/**/* - - repository/CMakeLists.txt - - repository/server/**/* - when: never - - when: on_success diff --git a/.gitlab/build/build_ws_image.yml b/.gitlab/build/build_ws_image.yml deleted file mode 100644 index 9ad5767c2..000000000 --- a/.gitlab/build/build_ws_image.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -stages: - - build - -include: - - local: .gitlab/common.yml - -build-ws: - extends: .docker_build_script - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "ws" - GIT_STRATEGY: clone - DOCKER_FILE_PATH: "web/docker/Dockerfile" - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "TRUE" - INTERMEDIATE_TARGET: "ws-build" # Name of the layer in the dockerfile - INTERMEDIATE_LAYER_NAME: "build" - tags: - - ci-datafed-core - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - web/**/* - - cmake/**/* - - common/proto/**/* - - .gitlab-ci.yml - - CMakeLists.txt - when: on_success - -retag-image: - extends: .docker_retag_image - stage: build - variables: - PROJECT: "datafed" - COMPONENT: "ws" - GIT_STRATEGY: clone - DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count - BUILD_INTERMEDIATE: "TRUE" - INTERMEDIATE_TARGET: "ws-build" # Name of the layer in the dockerfile - INTERMEDIATE_LAYER_NAME: "build" - tags: - - docker - rules: - - changes: - - docker/**/* - - scripts/**/* - - web/**/* - - cmake/**/* - - common/proto/**/* - - .gitlab-ci.yml - - CMakeLists.txt - when: never - - when: on_success diff --git a/.gitlab/build/retag_core_image.yml b/.gitlab/build/retag_core_image.yml new file mode 100644 index 000000000..8b719df82 --- /dev/null +++ b/.gitlab/build/retag_core_image.yml @@ -0,0 +1,18 @@ +--- +stages: + - build + +include: + - local: .gitlab/common.yml + +retag-image: + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "core" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - docker diff --git a/.gitlab/build/retag_foxx_image.yml b/.gitlab/build/retag_foxx_image.yml new file mode 100644 index 000000000..fd9ab2021 --- /dev/null +++ b/.gitlab/build/retag_foxx_image.yml @@ -0,0 +1,18 @@ +--- +stages: + - build + +include: + - local: .gitlab/common.yml + +retag-image: + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "foxx" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - docker diff --git a/.gitlab/build/retag_gcs_base_image.yml b/.gitlab/build/retag_gcs_base_image.yml new file mode 100644 index 000000000..629f490b0 --- /dev/null +++ b/.gitlab/build/retag_gcs_base_image.yml @@ -0,0 +1,18 @@ +--- +stages: + - build + +include: + - local: .gitlab/common.yml + +retag-image: + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "gcs-base" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - docker diff --git a/.gitlab/build/retag_gcs_image.yml b/.gitlab/build/retag_gcs_image.yml new file mode 100644 index 000000000..e3c6fec2b --- /dev/null +++ b/.gitlab/build/retag_gcs_image.yml @@ -0,0 +1,19 @@ +--- +stages: + - build + +include: + - local: .gitlab/common.yml + +retag-image: + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "gcs" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - ci-datafed-globus + - docker diff --git a/.gitlab/build/retag_repo_image.yml b/.gitlab/build/retag_repo_image.yml new file mode 100644 index 000000000..e0e722a18 --- /dev/null +++ b/.gitlab/build/retag_repo_image.yml @@ -0,0 +1,18 @@ +--- +stages: + - build + +include: + - local: .gitlab/common.yml + +retag-image: + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "repo" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "FALSE" + tags: + - docker diff --git a/.gitlab/build/retag_ws_image.yml b/.gitlab/build/retag_ws_image.yml new file mode 100644 index 000000000..c97a6785b --- /dev/null +++ b/.gitlab/build/retag_ws_image.yml @@ -0,0 +1,20 @@ +--- +stages: + - build + +include: + - local: .gitlab/common.yml + +retag-image: + extends: .docker_retag_image + stage: build + variables: + PROJECT: "datafed" + COMPONENT: "ws" + GIT_STRATEGY: clone + DATAFED_HARBOR_REGISTRY: "$REGISTRY" # needed by c_harbor_artifact_count + BUILD_INTERMEDIATE: "TRUE" + INTERMEDIATE_TARGET: "ws-build" # Name of the layer in the dockerfile + INTERMEDIATE_LAYER_NAME: "build" + tags: + - docker diff --git a/.gitlab/common.yml b/.gitlab/common.yml index ee42bffbd..f24889144 100644 --- a/.gitlab/common.yml +++ b/.gitlab/common.yml @@ -55,7 +55,6 @@ # # INTERMEDIATE_LAYER_NAME # This is used when tagging the image to help differentiate different layers - .image_check: tags: - docker @@ -64,33 +63,139 @@ BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" FORCE_BUILD="FALSE" + REPO_NAME="${COMPONENT}-${BRANCH_LOWER}" + IMAGE_PATH="${REGISTRY}/${PROJECT}/${REPO_NAME}" + + # Check if image exists set +e - docker pull --quiet "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" - if [ $? -eq 0 ]; then echo "Image exists"; else FORCE_BUILD="TRUE"; fi; + docker pull --quiet "${IMAGE_PATH}:latest" + IMAGE_EXISTS=$? set -e - if [ "${BUILD_INTERMEDIATE}" == "TRUE" ] - then - set +e - docker pull --quiet "${REGISTRY}/${PROJECT}/${COMPONENT}-${INTERMEDIATE_LAYER_NAME}-${BRANCH_LOWER}:latest" - if [ $? -eq 0 ]; then echo "Image exists"; else FORCE_BUILD="TRUE"; fi; - set -e + + if [ $IMAGE_EXISTS -ne 0 ]; then + echo "Image does not exist, forcing build" + FORCE_BUILD="TRUE" + else + echo "Image exists, checking for changes in watched files" + + # Get commit SHA from latest image via Harbor API + HARBOR_API_RESPONSE=$(curl -sf -u "${HARBOR_USER}:${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" \ + "https://${REGISTRY}/api/v2.0/projects/${PROJECT}/repositories/${REPO_NAME}/artifacts?page_size=1&sort=-push_time") + + if [ $? -ne 0 ] || [ -z "$HARBOR_API_RESPONSE" ]; then + echo "WARNING: Harbor API request failed, forcing build" + FORCE_BUILD="TRUE" + else + LATEST_IMAGE_SHA=$(echo "$HARBOR_API_RESPONSE" \ + | jq -r '.[0].tags // [] | .[].name' \ + | grep -E '^[a-f0-9]{40}$' \ + | head -1) + + if [ -z "$LATEST_IMAGE_SHA" ]; then + echo "WARNING: No commit SHA tag found on latest image, forcing build" + FORCE_BUILD="TRUE" + elif [ "$LATEST_IMAGE_SHA" == "$CI_COMMIT_SHA" ]; then + echo "Current commit matches latest image, no rebuild needed" + else + echo "Latest image built from: $LATEST_IMAGE_SHA" + echo "Current commit: $CI_COMMIT_SHA" + + # Fetch full history to ensure old commit is available + git fetch --unshallow 2>/dev/null || git fetch origin "$LATEST_IMAGE_SHA" 2>/dev/null || true + + # Verify commit exists in history + set +e + git cat-file -e "$LATEST_IMAGE_SHA" 2>/dev/null + COMMIT_EXISTS=$? + set -e + + if [ $COMMIT_EXISTS -ne 0 ]; then + echo "WARNING: Commit $LATEST_IMAGE_SHA not in git history, forcing build" + FORCE_BUILD="TRUE" + else + if [ -z "$WATCHED_PATHS" ]; then + echo "ERROR: WATCHED_PATHS not defined" + exit 1 + fi + + DIFF_OUTPUT=$(git diff --name-only "$LATEST_IMAGE_SHA" "$CI_COMMIT_SHA" -- $WATCHED_PATHS) + + if [ -n "$DIFF_OUTPUT" ]; then + echo "Changes detected in watched files:" + echo "$DIFF_OUTPUT" + FORCE_BUILD="TRUE" + else + echo "No changes in watched files since $LATEST_IMAGE_SHA" + fi + fi + fi + fi + fi + + if [ "${BUILD_INTERMEDIATE}" == "TRUE" ] && [ "$FORCE_BUILD" == "FALSE" ]; then + set +e + docker pull --quiet "${REGISTRY}/${PROJECT}/${COMPONENT}-${INTERMEDIATE_LAYER_NAME}-${BRANCH_LOWER}:latest" + if [ $? -ne 0 ]; then + echo "Intermediate image missing, forcing build" + FORCE_BUILD="TRUE" + fi + set -e fi - if [ "$FORCE_BUILD" == "TRUE" ] - then - cp .gitlab/build/force_build_${COMPONENT}_image.yml ${COMPONENT}_image.yml + + # File names are all underscores + FILE_COMPONENT=$(echo "$COMPONENT" | tr '-' '_') + + if [ "$FORCE_BUILD" == "TRUE" ]; then + cp .gitlab/build/force_build_${FILE_COMPONENT}_image.yml ${FILE_COMPONENT}_image.yml else - cp .gitlab/build/build_${COMPONENT}_image.yml ${COMPONENT}_image.yml + cp .gitlab/build/retag_${FILE_COMPONENT}_image.yml ${FILE_COMPONENT}_image.yml fi + echo "REGISTRY=${REGISTRY}" >> build.env echo "HARBOR_USER=${HARBOR_USER}" >> build.env echo "HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN=${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" >> build.env sed -i 's/\(HARBOR_USER=.*\)\$/\1$$/g' build.env artifacts: paths: - - ${COMPONENT}_image.yml + - "*_image.yml" reports: dotenv: build.env +#.image_check: +# tags: +# - docker +# script: +# - | +# BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') +# docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" +# FORCE_BUILD="FALSE" +# set +e +# docker pull --quiet "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" +# if [ $? -eq 0 ]; then echo "Image exists"; else FORCE_BUILD="TRUE"; fi; +# set -e +# if [ "${BUILD_INTERMEDIATE}" == "TRUE" ] +# then +# set +e +# docker pull --quiet "${REGISTRY}/${PROJECT}/${COMPONENT}-${INTERMEDIATE_LAYER_NAME}-${BRANCH_LOWER}:latest" +# if [ $? -eq 0 ]; then echo "Image exists"; else FORCE_BUILD="TRUE"; fi; +# set -e +# fi +# if [ "$FORCE_BUILD" == "TRUE" ] +# then +# cp .gitlab/build/force_build_${COMPONENT}_image.yml ${COMPONENT}_image.yml +# else +# cp .gitlab/build/build_${COMPONENT}_image.yml ${COMPONENT}_image.yml +# fi +# echo "REGISTRY=${REGISTRY}" >> build.env +# echo "HARBOR_USER=${HARBOR_USER}" >> build.env +# echo "HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN=${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" >> build.env +# sed -i 's/\(HARBOR_USER=.*\)\$/\1$$/g' build.env +# artifacts: +# paths: +# - ${COMPONENT}_image.yml +# reports: +# dotenv: build.env + # The purpose of this anchor is to check that an image has been uploaded correctly # to the registry and if it has not attempt to upload it again. # @@ -118,7 +223,7 @@ docker push "${REGISTRY}/${PROJECT}/${DATAFED_HARBOR_REPOSITORY}:$CI_COMMIT_SHA" if [ "$first_iteration" == "FALSE" ] then - sleep 5 # Optional: Add a sleep to avoid busy waiting + sleep 5 # Optional: Add a sleep to avoid busy waiting else first_iteration="FALSE" fi diff --git a/.gitlab/stage_image_check.yml b/.gitlab/stage_image_check.yml index 10028b407..d29b8209f 100644 --- a/.gitlab/stage_image_check.yml +++ b/.gitlab/stage_image_check.yml @@ -10,6 +10,7 @@ check-ws-image: COMPONENT: "ws" BUILD_INTERMEDIATE: "TRUE" INTERMEDIATE_LAYER_NAME: "build" + WATCHED_PATHS: "docker scripts web cmake common/proto .gitlab-ci.yml CMakeLists.txt" check-core-image: extends: .image_check @@ -18,6 +19,7 @@ check-core-image: PROJECT: "datafed" COMPONENT: "core" BUILD_INTERMEDIATE: "FALSE" + WATCHED_PATHS: "docker scripts core common CMakeLists.txt cmake .gitlab-ci.yml" check-repo-image: extends: .image_check @@ -26,14 +28,16 @@ check-repo-image: PROJECT: "datafed" COMPONENT: "repo" BUILD_INTERMEDIATE: "FALSE" + WATCHED_PATHS: "docker scripts common/proto .gitlab-ci.yml CMakeLists.txt cmake repository/CMakeLists.txt repository/server" check-gcs-base-image: extends: .image_check stage: image-check variables: PROJECT: "datafed" - COMPONENT: "gcs_base" + COMPONENT: "gcs-base" BUILD_INTERMEDIATE: "FALSE" + WATCHED_PATHS: "docker scripts common .gitlab-ci.yml CMakeLists.txt cmake" check-gcs-image: extends: .image_check @@ -42,6 +46,7 @@ check-gcs-image: PROJECT: "datafed" COMPONENT: "gcs" BUILD_INTERMEDIATE: "FALSE" + WATCHED_PATHS: "docker scripts common .gitlab-ci.yml .gitlab/build/build_gcs_base_image.sh .gitlab/build/build_gcs_image.sh .gitlab/stage_build_base.sh external/globus-connect-server CMakeLists.txt cmake repository/docker/entrypoint_authz.sh repository/docker/Dockerfile.gcs repository/CMakeLists.txt repository/gridftp" check-foxx-image: extends: .image_check @@ -50,3 +55,4 @@ check-foxx-image: PROJECT: "datafed" COMPONENT: "foxx" BUILD_INTERMEDIATE: "FALSE" + WATCHED_PATHS: "docker scripts cmake core/database core/CMakeLists.txt common/proto .gitlab-ci.yml .gitlab CMakeLists.txt" diff --git a/external/DataFedDependencies b/external/DataFedDependencies index fe59a393f..0e9a5bb75 160000 --- a/external/DataFedDependencies +++ b/external/DataFedDependencies @@ -1 +1 @@ -Subproject commit fe59a393f54d3aa1b8bf551f97d274b762bf93d2 +Subproject commit 0e9a5bb75558d09ac50ac632160fc8f08e5fc873 From f0d75aee9c537115ef4554fd3465d0096da75ff8 Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Wed, 4 Feb 2026 09:55:50 -0500 Subject: [PATCH 52/65] [DAPS-1522] - refactor: router logging cleanup (#1838) Co-authored-by: JoshuaSBrown --- core/database/foxx/api/acl_router.js | 6 ++-- core/database/foxx/api/authz.js | 12 ------- core/database/foxx/api/authz_router.js | 4 +-- core/database/foxx/api/lib/logger.js | 6 ++-- .../models/repositories/base_repository.js | 7 ---- core/database/foxx/api/note_router.js | 9 +++-- core/database/foxx/api/repo_router.js | 7 ++-- core/database/foxx/api/support.js | 13 ------- core/database/foxx/api/tasks.js | 35 ------------------- core/database/foxx/tests/acl_router.test.js | 2 +- core/database/foxx/tests/authz_router.test.js | 22 ++++++++++++ core/database/foxx/tests/coll_router.test.js | 1 + core/database/foxx/tests/query_router.test.js | 1 - core/database/foxx/tests/repo_router.test.js | 9 ++++- .../database/foxx/tests/schema_router.test.js | 8 +++++ core/database/foxx/tests/task_router.test.js | 7 ++++ 16 files changed, 64 insertions(+), 85 deletions(-) diff --git a/core/database/foxx/api/acl_router.js b/core/database/foxx/api/acl_router.js index ac0963bb7..995b18020 100644 --- a/core/database/foxx/api/acl_router.js +++ b/core/database/foxx/api/acl_router.js @@ -200,7 +200,7 @@ router client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", - routePath: basePath + "/authn/password", + routePath: basePath + "/update", status: "Success", description: `Update ACL(s) on a data record or collection. ID: ${req.queryParams.id}`, extra: result, @@ -210,8 +210,8 @@ router client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", - routePath: basePath + "/authn/password", - status: "Success", + routePath: basePath + "/update", + status: "Failure", description: `Update ACL(s) on a data record or collection. ID: ${req.queryParams.id}`, extra: result, error: e, diff --git a/core/database/foxx/api/authz.js b/core/database/foxx/api/authz.js index 8ecbd8227..4977f9a30 100644 --- a/core/database/foxx/api/authz.js +++ b/core/database/foxx/api/authz.js @@ -56,7 +56,6 @@ module.exports = (function () { let record = new Record(data_key); if (!record.exists()) { // Return not found error for non-existent records - console.log("AUTHZ act: read client: " + client._id + " path " + path + " NOT_FOUND"); throw [error.ERR_NOT_FOUND, "Record not found: " + path]; } @@ -64,14 +63,12 @@ module.exports = (function () { // if record exists and if it is a public record if (!client) { if (!g_lib.hasPublicRead(record.id())) { - console.log("AUTHZ act: read" + " unknown client " + " path " + path + " FAILED"); throw [ error.ERR_PERM_DENIED, "Unknown client does not have read permissions on " + path, ]; } } else if (!obj.isRecordActionAuthorized(client, data_key, permission)) { - console.log("AUTHZ act: read" + " client: " + client._id + " path " + path + " FAILED"); throw [ error.ERR_PERM_DENIED, "Client " + client._id + " does not have read permissions on " + path, @@ -79,7 +76,6 @@ module.exports = (function () { } if (!record.isPathConsistent(path)) { - console.log("AUTHZ act: read client: " + client._id + " path " + path + " FAILED"); throw [record.error(), record.errorMessage()]; } }; @@ -98,17 +94,11 @@ module.exports = (function () { const data_key = path_components.at(-1); if (!client) { - console.log( - "AUTHZ act: create" + " client: " + client._id + " path " + path + " FAILED", - ); throw [ error.ERR_PERM_DENIED, "Unknown client does not have create permissions on " + path, ]; } else if (!obj.isRecordActionAuthorized(client, data_key, permission)) { - console.log( - "AUTHZ act: create" + " client: " + client._id + " path " + path + " FAILED", - ); throw [ error.ERR_PERM_DENIED, "Client " + client._id + " does not have create permissions on " + path, @@ -120,14 +110,12 @@ module.exports = (function () { // exists in the database. if (!record.exists()) { // If the record does not exist then the path would not be consistent. - console.log("AUTHZ act: create client: " + client._id + " path " + path + " FAILED"); throw [error.ERR_PERM_DENIED, "Invalid record specified: " + path]; } // This will tell us if the proposed path is consistent with what we expect // GridFTP will fail if the posix file path does not exist. if (!record.isPathConsistent(path)) { - console.log("AUTHZ act: create client: " + client._id + " path " + path + " FAILED"); throw [record.error(), record.errorMessage()]; } }; diff --git a/core/database/foxx/api/authz_router.js b/core/database/foxx/api/authz_router.js index 7600669d5..3597cecf0 100644 --- a/core/database/foxx/api/authz_router.js +++ b/core/database/foxx/api/authz_router.js @@ -279,7 +279,7 @@ router status: "Success", description: "Gets client permissions for object. Permissions:" + req.queryParams.perms, - extra: result, + extra: `Object ID: ${req.queryParams.id}`, }); } catch (e) { logger.logRequestFailure({ @@ -290,7 +290,7 @@ router status: "Failure", description: "Gets client permissions for object. Permissions:" + req.queryParams.perms, - extra: result, + extra: `Object ID: ${req.queryParams.id}`, error: e, }); g_lib.handleException(e, res); diff --git a/core/database/foxx/api/lib/logger.js b/core/database/foxx/api/lib/logger.js index c410a8040..8098dbee5 100644 --- a/core/database/foxx/api/lib/logger.js +++ b/core/database/foxx/api/lib/logger.js @@ -11,7 +11,7 @@ function logRequestSuccess({ extra, }) { // helper to pad fields - const pad = (label, value, length = 20) => + const pad = (label, value, length = 10) => `${label}: ${value || "unknown"}`.padEnd(length, " "); console.info( @@ -42,7 +42,7 @@ function logRequestFailure({ error, }) { // helper to pad fields - const pad = (label, value, length = 20) => + const pad = (label, value, length = 10) => `${label}: ${value || "unknown"}`.padEnd(length, " "); //PUT IF STATEMENT if (g_lib.isInteger(error) || Array.isArray(error)) { @@ -88,7 +88,7 @@ function logRequestFailure({ function logRequestStarted({ client, correlationId, httpVerb, routePath, status, description }) { // helper to pad fields - const pad = (label, value, length = 20) => + const pad = (label, value, length = 10) => `${label}: ${value || "unknown"}`.padEnd(length, " "); console.info( diff --git a/core/database/foxx/api/models/repositories/base_repository.js b/core/database/foxx/api/models/repositories/base_repository.js index f11878805..20674abea 100644 --- a/core/database/foxx/api/models/repositories/base_repository.js +++ b/core/database/foxx/api/models/repositories/base_repository.js @@ -90,8 +90,6 @@ class BaseRepository { if (g_db._exists(config.id)) { const existingDoc = g_db.repo.document(config.key); const { _id, _key, _rev, ...temp } = existingDoc; - console.log("existingData found"); - console.log(existingDoc); this.repoData = { id: existingDoc._id, key: existingDoc._key, @@ -101,8 +99,6 @@ class BaseRepository { } else { this.repoData = new_repo_data; } - console.log("Repo data after deepMerge"); - console.log(this.repoData); } catch { return Result.err({ code: error.ERR_INVALID_PARAM, @@ -111,9 +107,6 @@ class BaseRepository { } } else { this.repoData = { ...new_repo_data, id: `repo/${config.key}`, key: config.key }; - - console.log("Repo data, now that we know document doesn't exist"); - console.log(this.repoData); } return Result.ok(this); diff --git a/core/database/foxx/api/note_router.js b/core/database/foxx/api/note_router.js index 5eb4ab54d..cefbcdff7 100644 --- a/core/database/foxx/api/note_router.js +++ b/core/database/foxx/api/note_router.js @@ -614,7 +614,10 @@ router httpVerb: "GET", routePath: basePath + "/purge", status: "Started", - description: "Purge old closed annotations older than " + req.queryParams.age_sec, + description: + "Purge old closed annotations older than " + + req.queryParams.age_sec + + " seconds", }); g_db._executeTransaction({ @@ -653,7 +656,7 @@ router description: "Purge old closed annotations older than " + req.queryParams.age_sec + - "seconds.", + " seconds.", extra: `Ids of purged notes: ${purgedIds.join(", ")}`, }); } catch (e) { @@ -666,7 +669,7 @@ router description: "Purge old closed annotations older than " + req.queryParams.age_sec + - "seconds.", + " seconds.", extra: { last_purged_note: id }, error: e, }); diff --git a/core/database/foxx/api/repo_router.js b/core/database/foxx/api/repo_router.js index 45e9ffae2..27b8b4d94 100644 --- a/core/database/foxx/api/repo_router.js +++ b/core/database/foxx/api/repo_router.js @@ -194,7 +194,7 @@ router httpVerb: "GET", routePath: basePath + "/create", status: "Started", - description: `Create a server record: ${req.queryParams.id}`, + description: `Create a server record: ${req.body.id}`, }); g_db._executeTransaction({ @@ -255,7 +255,7 @@ router httpVerb: "GET", routePath: basePath + "/create", status: "Success", - description: `Create a server record: ${req.queryParams.id}`, + description: `Create a server record: ${req.body.id}`, extra: { type: repo_doc?.type, capacity: repo_doc?.capacity, @@ -269,7 +269,7 @@ router httpVerb: "GET", routePath: basePath + "/create", status: "Failure", - description: `Create a server record: ${req.queryParams.id}`, + description: `Create a server record: ${req.body.id}`, extra: { type: repo_doc?.type, capacity: repo_doc?.capacity, @@ -496,7 +496,6 @@ router var alloc = g_db._query("for v in 1..1 inbound @repo alloc return {id:v._id}", { repo: req.queryParams.id, }); - console.log(alloc); if (alloc.hasNext()) throw [ error.ERR_IN_USE, diff --git a/core/database/foxx/api/support.js b/core/database/foxx/api/support.js index 8f7ec612e..ea3ce0be4 100644 --- a/core/database/foxx/api/support.js +++ b/core/database/foxx/api/support.js @@ -401,8 +401,6 @@ module.exports = (function () { };*/ obj.handleException = function (e, res) { - console.log("Service exception:", e); - if (obj.isInteger(e) && e >= 0 && e < error.ERR_COUNT) { res.throw(error.ERR_INFO[e][0], error.ERR_INFO[e][1]); } else if (Array.isArray(e)) { @@ -528,8 +526,6 @@ module.exports = (function () { for (var i in potential_uuids) { uuids.push("uuid/" + potential_uuids[i]); } - console.log("resolveUUIDsToID"); - console.log("uuids: ", uuids); var result = obj.db ._query("for i in ident filter i._to in @ids return distinct document(i._from)", { ids: uuids, @@ -564,8 +560,6 @@ module.exports = (function () { for (var i in potential_uuids) { uuids.push("uuid/" + potential_uuids[i]); } - console.log("resolveUUIDsToID_noexcept"); - console.log("uuids: ", uuids); var result = obj.db ._query("for i in ident filter i._to in @ids return distinct document(i._from)", { ids: uuids, @@ -578,7 +572,6 @@ module.exports = (function () { var first_uuid = result[0]._id; // Next we need to make sure the provided ids are all the same if there is more than one for (var i = 1; i < result.length; i++) { - console.log("resolveUUID comparing " + first_uuid + " with " + result[i]); if (first_uuid != result[i]._id) { return; } @@ -624,7 +617,6 @@ module.exports = (function () { // UUID are defined by length and format, accounts have a "." (and known domains), SDMS unames have no "." or "-" characters var params; - console.log("getUserFromClient id: ", a_client_id); if (a_client_id.startsWith("u/")) { if (!obj.db.u.exists(a_client_id)) { @@ -649,7 +641,6 @@ module.exports = (function () { // determine the UUID, if they are not, then we will throw an error for now, var unambiguous_id = obj.resolveUUIDsToID(a_client_id); if (!unambiguous_id) { - console.log("Undefined"); return; } //params = { 'id': unambiguous_id }; @@ -706,7 +697,6 @@ module.exports = (function () { // determine the UUID, if they are not, then we will throw an error for now, var unambiguous_id = obj.resolveUUIDsToID_noexcept(a_client_id); if (!unambiguous_id) { - console.log("Undefined"); return; } //params = { 'id': unambiguous_id }; @@ -735,8 +725,6 @@ module.exports = (function () { }; obj.findUserFromUUIDs = function (a_uuids) { - console.log("findUserFromUUIDs"); - console.log("a_uuids: ", a_uuids); var result = obj.db ._query("for i in ident filter i._to in @ids return distinct document(i._from)", { ids: a_uuids, @@ -1670,7 +1658,6 @@ module.exports = (function () { }; obj.hasPublicRead = function (a_id) { - console.log("Has public read a_id is ", a_id); // Check for local topic on collections if (a_id.startsWith("c/")) { var col = obj.db.c.document(a_id); diff --git a/core/database/foxx/api/tasks.js b/core/database/foxx/api/tasks.js index 8e7ac603d..44cdc2397 100644 --- a/core/database/foxx/api/tasks.js +++ b/core/database/foxx/api/tasks.js @@ -24,8 +24,6 @@ var tasks_func = (function () { a_data_limit, a_rec_limit, ) { - console.log("taskInitAllocCreate"); - // Check if repo and subject exist if (!g_db._exists(a_repo_id)) throw [error.ERR_NOT_FOUND, "Repo, '" + a_repo_id + "', does not exist"]; @@ -106,8 +104,6 @@ var tasks_func = (function () { }; obj.taskRunAllocCreate = function (a_task) { - console.log("taskRunAllocCreate"); - var reply, state = a_task.state; @@ -156,8 +152,6 @@ var tasks_func = (function () { // ----------------------- ALLOC DELETE ---------------------------- obj.taskInitAllocDelete = function (a_client, a_repo_id, a_subject_id) { - console.log("taskInitAllocDelete"); - if (!g_db._exists(a_repo_id)) throw [error.ERR_NOT_FOUND, "Repo, '" + a_repo_id + "', does not exist"]; @@ -247,8 +241,6 @@ var tasks_func = (function () { }; obj.taskRunAllocDelete = function (a_task) { - console.log("taskRunAllocDelete"); - var reply, state = a_task.state; @@ -305,8 +297,6 @@ var tasks_func = (function () { is_collection_token_required = false, collection_info = {}, ) { - console.log("taskInitDataGet"); - var result = g_proc.preprocessItems(a_client, null, a_res_ids, g_lib.TT_DATA_GET); if (result.glob_data.length + result.ext_data.length > 0 && !a_check) { @@ -387,8 +377,6 @@ var tasks_func = (function () { }; obj.taskRunDataGet = function (a_task) { - console.log("taskRunDataGet"); - var reply, state = a_task.state; @@ -489,8 +477,6 @@ var tasks_func = (function () { is_collection_token_required = false, collection_info = {}, ) { - console.log("taskInitDataPut"); - var result = g_proc.preprocessItems(a_client, null, a_res_ids, g_lib.TT_DATA_PUT); if (result.glob_data.length > 0 && !a_check) { @@ -531,7 +517,6 @@ var tasks_func = (function () { }; obj.taskRunDataPut = function (a_task) { - console.log("taskRunDataPut"); var reply, state = a_task.state, params, @@ -541,7 +526,6 @@ var tasks_func = (function () { // No rollback functionality if (a_task.step < 0) return; - console.log("taskRunDataPut begin Step: ", a_task.step); if (a_task.step == 0) { //console.log("taskRunDataPut - do setup"); obj._transact( @@ -660,8 +644,6 @@ var tasks_func = (function () { ids: [xfr.files[0].id], }; - console.log("Printing params in task update size"); - console.log(params); reply = { cmd: g_lib.TC_RAW_DATA_UPDATE_SIZE, params: params, @@ -683,8 +665,6 @@ var tasks_func = (function () { ); } - console.log("taskRunDataPut final reply"); - console.log(reply); return reply; }; @@ -696,8 +676,6 @@ var tasks_func = (function () { involved allocations. Unmanaged records do not use allocations and are ignored. */ obj.taskInitRecAllocChg = function (a_client, a_proj_id, a_res_ids, a_dst_repo_id, a_check) { - console.log("taskInitRecAllocChg"); - // Verify that client is owner, or has admin permission to project owner var owner_id; @@ -791,8 +769,6 @@ var tasks_func = (function () { }; obj.taskRunRecAllocChg = function (a_task) { - console.log("taskRunRecAllocChg"); - var reply, state = a_task.state, params, @@ -1155,8 +1131,6 @@ var tasks_func = (function () { }; obj.taskRunRecOwnerChg = function (a_task) { - console.log("taskRunRecOwnerChg"); - var reply, state = a_task.state, params, @@ -1376,8 +1350,6 @@ var tasks_func = (function () { }; obj.taskInitRecCollDelete = function (a_client, a_ids) { - console.log("taskInitRecCollDelete start", Date.now()); - var result = g_proc.preprocessItems(a_client, null, a_ids, g_lib.TT_REC_DEL); if (result.has_pub) { @@ -1451,8 +1423,6 @@ var tasks_func = (function () { }; obj.taskRunRecCollDelete = function (a_task) { - console.log("taskRunRecCollDelete"); - var i, reply, state = a_task.state, @@ -1626,8 +1596,6 @@ var tasks_func = (function () { }; obj.taskRunProjDelete = function (a_task) { - console.log("taskRunProjDelete"); - var reply, state = a_task.state; @@ -1893,8 +1861,6 @@ var tasks_func = (function () { xfr_docs - chunked per source repo and max data transfer size */ - console.log("_buildTransferDoc", a_mode, a_remote, a_orig_fname); - var fnames, i, idx, @@ -2440,7 +2406,6 @@ var tasks_func = (function () { * contain raw data. */ obj._projectDelete = function (a_proj_id) { - console.log("_projectDelete", a_proj_id); // Delete allocations g_db.alloc.removeByExample({ _from: a_proj_id, diff --git a/core/database/foxx/tests/acl_router.test.js b/core/database/foxx/tests/acl_router.test.js index 454deae50..77fddb15a 100644 --- a/core/database/foxx/tests/acl_router.test.js +++ b/core/database/foxx/tests/acl_router.test.js @@ -18,7 +18,7 @@ describe("unit_acl_router: test /update route", () => { beforeEach(() => { // Ensure necessary collections exist - const collections = ["u", "c", "d", "acl", "owner", "g"]; + const collections = ["member", "u", "c", "d", "acl", "owner", "g"]; collections.forEach((name) => { let col = db._collection(name); if (col) col.truncate(); diff --git a/core/database/foxx/tests/authz_router.test.js b/core/database/foxx/tests/authz_router.test.js index ed074c053..20984d405 100644 --- a/core/database/foxx/tests/authz_router.test.js +++ b/core/database/foxx/tests/authz_router.test.js @@ -218,6 +218,28 @@ describe("unit_authz_router: the Foxx microservice authz_router", () => { g_db.repo.truncate(); }); + after(() => { + [ + "u", + "ident", + "uuid", + "acl", + "item", + "c", + "g", + "p", + "owner", + "member", + "d", + "alloc", + "loc", + "repo", + ].forEach((name) => { + const col = g_db._collection(name); + if (col) col.truncate(); + }); + }); + it("unit_authz_router: gridftp create action with user record and valid file path.", () => { defaultWorkingSetup(); const request_string = diff --git a/core/database/foxx/tests/coll_router.test.js b/core/database/foxx/tests/coll_router.test.js index df85b1133..0b6b94c0b 100644 --- a/core/database/foxx/tests/coll_router.test.js +++ b/core/database/foxx/tests/coll_router.test.js @@ -94,6 +94,7 @@ describe("unit_coll_router: /col/create endpoint", () => { "uuid", "accn", "u", + "d", ]; collections.forEach((name) => { let col = db._collection(name); diff --git a/core/database/foxx/tests/query_router.test.js b/core/database/foxx/tests/query_router.test.js index 97ad2b52b..8c982edb7 100644 --- a/core/database/foxx/tests/query_router.test.js +++ b/core/database/foxx/tests/query_router.test.js @@ -157,7 +157,6 @@ describe("unit_query_router: the Foxx microservice qry_router endpoints", () => }); var parsed = JSON.parse(response.body); - console.log("Response body:", response.body); // assert expect(response.status).to.equal(200); expect(parsed).to.be.an("array"); diff --git a/core/database/foxx/tests/repo_router.test.js b/core/database/foxx/tests/repo_router.test.js index f7689ab3a..fd751f723 100644 --- a/core/database/foxx/tests/repo_router.test.js +++ b/core/database/foxx/tests/repo_router.test.js @@ -11,7 +11,7 @@ const repo_base_url = `${baseUrl}/repo`; // NOTE: describe block strings are compared against test specification during test call, not file name describe("integration_repo_router: the Foxx microservice repo_router create endpoint", () => { beforeEach(() => { - const collections = ["repo", "d", "alloc", "loc", "repo", "admin", "g", "p", "u"]; + const collections = ["repo", "d", "alloc", "loc", "admin", "g", "p", "u"]; collections.forEach((name) => { let col = g_db._collection(name); if (col) { @@ -22,6 +22,13 @@ describe("integration_repo_router: the Foxx microservice repo_router create endp }); }); + after(function () { + const collections = ["repo", "d", "alloc", "loc", "admin", "g", "p", "u"]; + collections.forEach((name) => { + const col = g_db._collection(name); + if (col) col.truncate(); + }); + }); const user_params = { id: "u/shredder", key: "shredder", diff --git a/core/database/foxx/tests/schema_router.test.js b/core/database/foxx/tests/schema_router.test.js index 94f36d7e3..e296650f2 100644 --- a/core/database/foxx/tests/schema_router.test.js +++ b/core/database/foxx/tests/schema_router.test.js @@ -24,6 +24,14 @@ describe("schema router", () => { }); }); + after(function () { + const collections = ["u", "sch", "sch_dep"]; + collections.forEach((name) => { + const col = db._collection(name); + if (col) col.truncate(); + }); + }); + it("unit_schema_router: should create a schema", () => { const body = { id: "test_schema_1", diff --git a/core/database/foxx/tests/task_router.test.js b/core/database/foxx/tests/task_router.test.js index 8815e09a0..297044935 100644 --- a/core/database/foxx/tests/task_router.test.js +++ b/core/database/foxx/tests/task_router.test.js @@ -33,6 +33,13 @@ describe("unit_task_router: the Foxx microservice task_router list/ endpoint", ( }); }); + after(function () { + const collections = ["u", "task"]; + collections.forEach((name) => { + const col = db._collection(name); + if (col) col.truncate(); + }); + }); it("should successfully run the list route", () => { db.u.save({ _key: "fakeUser", From fd30747efcd152430580f3c3aa39e8a59210a29c Mon Sep 17 00:00:00 2001 From: Austin Hampton <44103380+megatnt1122@users.noreply.github.com> Date: Wed, 4 Feb 2026 11:38:30 -0500 Subject: [PATCH 53/65] [DAPS-1522] - refactor: data router logging improvements (#1789) --- core/database/CMakeLists.txt | 4 +- core/database/foxx/api/data_router.js | 547 +++++++++++- core/database/foxx/api/support.js | 1 - core/database/foxx/tests/data_router.test.js | 835 +++++++++++++++++++ 4 files changed, 1370 insertions(+), 17 deletions(-) create mode 100644 core/database/foxx/tests/data_router.test.js diff --git a/core/database/CMakeLists.txt b/core/database/CMakeLists.txt index f1739bd1b..0a7f3026a 100644 --- a/core/database/CMakeLists.txt +++ b/core/database/CMakeLists.txt @@ -23,6 +23,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_base_repo COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_base_repository:") add_test(NAME foxx_repositories COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "integration_repositories:") add_test(NAME foxx_repo_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "integration_repo_router:") + add_test(NAME foxx_admin_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_admin_router:") add_test(NAME foxx_validation_repo COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_validation_repository:") add_test(NAME foxx_path COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_path:") add_test(NAME foxx_version COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_version:") @@ -32,7 +33,7 @@ if( ENABLE_FOXX_TESTS ) add_test(NAME foxx_proj_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_proj_router:") add_test(NAME foxx_schema_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_schema_router:") add_test(NAME foxx_acl_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_acl_router:") - add_test(NAME foxx_admin_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_admin_router:") + add_test(NAME foxx_data_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_data_router:") add_test(NAME foxx_config_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_config_router:") add_test(NAME foxx_group_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_group_router:") add_test(NAME foxx_note_router COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/tests/test_foxx.sh" -t "unit_note_router:") @@ -56,6 +57,7 @@ if( ENABLE_FOXX_TESTS ) set_tests_properties(foxx_authz PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_authz_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_record PROPERTIES FIXTURES_REQUIRED Foxx) + set_tests_properties(foxx_data_router PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_repo PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_repo_globus PROPERTIES FIXTURES_REQUIRED Foxx) set_tests_properties(foxx_repo_metadata PROPERTIES FIXTURES_REQUIRED Foxx) diff --git a/core/database/foxx/api/data_router.js b/core/database/foxx/api/data_router.js index c72f20b05..078448e5a 100644 --- a/core/database/foxx/api/data_router.js +++ b/core/database/foxx/api/data_router.js @@ -10,6 +10,8 @@ const permissions = require("./lib/permissions"); const g_proc = require("./process"); const g_tasks = require("./tasks"); const { UserToken } = require("./lib/user_token"); +const logger = require("./lib/logger"); +const basePath = "dat"; module.exports = router; @@ -101,7 +103,6 @@ function recordCreate(client, record, result) { obj.ext_auto = true; } } - if (record.md) { obj.md = JSON.parse(record.md); // parse escaped JSON string TODO: this could be dangerous if (Array.isArray(obj.md)) throw [error.ERR_INVALID_PARAM, "Metadata cannot be an array"]; @@ -256,10 +257,20 @@ function recordCreate(client, record, result) { router .post("/create", function (req, res) { var retry = 10; - + let client = null; + let result = null; for (;;) { try { - var result = { + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Started", + description: "Create a new data record", + }); + result = { results: [], }; @@ -288,8 +299,34 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Success", + description: "Create a new data record", + extra: { + id: result?.results[0]?.id, + count: result?.results.length, + }, + }); + break; } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Failure", + description: "Create a new data record", + extra: { + id: result?.results[0]?.id, + count: result?.results.length, + }, + error: e, + }); if (--retry == 0 || !e.errorNum || e.errorNum != 1200) { g_lib.handleException(e, res); } @@ -331,10 +368,20 @@ router router .post("/create/batch", function (req, res) { var retry = 10; - + let client = null; + let result = null; for (;;) { try { - var result = { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create/batch", + status: "Started", + description: "Create a batch of new data records", + }); + + result = { results: [], }; @@ -367,8 +414,35 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create/batch", + status: "Success", + description: "Create a batch of new data records", + extra: { + latestId: result?.results[result?.results.length - 1]?.id, + count: result?.results.length, + }, + }); + break; } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create/batch", + status: "Failure", + description: "Create a batch of new data records", + extra: { + latestId: result?.results[result?.results.length - 1]?.id, + count: result?.results.length, + }, + + error: e, + }); if (--retry == 0 || !e.errorNum || e.errorNum != 1200) { g_lib.handleException(e, res); } @@ -420,7 +494,6 @@ router function recordUpdate(client, record, result) { // /console.log("recordUpdate:",record); - var data_id = g_lib.resolveDataID(record.id, client); var data = g_db.d.document(data_id); @@ -442,8 +515,7 @@ function recordUpdate(client, record, result) { perms |= permissions.PERM_WR_REC; } - if (data.locked || !permissions.hasPermissions(client, data, perms)) - throw error.ERR_PERM_DENIED; + if (data.locked || !g_lib.hasPermissions(client, data, perms)) throw error.ERR_PERM_DENIED; } var owner_id = g_db.owner.firstExample({ @@ -762,13 +834,23 @@ function recordUpdate(client, record, result) { router .post("/update", function (req, res) { + let client = null; + let result = null; try { - var result = { + client = g_lib.getUserFromClientID(req.queryParams.client); + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Started", + description: `Update an existing data record. RecordID: ${req.body.id}`, + }); + + result = { results: [], updates: new Set(), }; - const client = g_lib.getUserFromClientID(req.queryParams.client); - g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "loc"], @@ -794,6 +876,7 @@ router var doc, updates = []; + result.updates.forEach(function (id) { if (id == req.body.id) { // Updated record is already in results - just copy it @@ -802,6 +885,7 @@ router doc = g_db._document(id); doc.notes = g_lib.getNoteMask(client, doc); } + delete doc.desc; //delete doc.md; updates.push(doc); @@ -809,7 +893,28 @@ router result.updates = updates; res.send(result); + + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Success", + description: `Update an existing data record. RecordID: ${req.body.id}`, + extra: "N/A", + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Failure", + description: `Update an existing data record. RecordID: ${req.body.id}`, + extra: "N/A", + error: e, + }); + g_lib.handleException(e, res); } }) @@ -858,8 +963,22 @@ router router .post("/update/batch", function (req, res) { + let result = null; + const ids = Array.isArray(req.body) ? req.body.map((r) => r.id) : []; + + let totalCount = ids.length; + const displayedIds = totalCount > 5 ? `${ids.slice(0, 5).join(",")}...` : ids.join(","); try { - var result = { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update/batch", + status: "Started", + description: `Update a batch of existing data record. RecordIDs: ${displayedIds}`, + }); + + result = { results: [], updates: new Set(), }; @@ -912,7 +1031,31 @@ router result.updates = updates; res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update/batch", + status: "Success", + description: `Update a batch of existing data record. RecordIDs: ${displayedIds}`, + extra: { + count: totalCount, + }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update/batch", + status: "Failure", + description: `Update a batch of existing data record. RecordIDs: ${displayIds}`, + extra: { + count: totalCount, + }, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -970,6 +1113,15 @@ router router .post("/update/md_err_msg", function (req, res) { try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update/md_err_msg", + status: "Started", + description: `Update data record schema validation error message. RecordID: ${req.queryParams.id}`, + }); + g_db._executeTransaction({ collections: { write: ["d"], @@ -998,8 +1150,27 @@ router ); }, }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update/md_err_msg", + status: "Success", + description: `Update data record schema validation error message. RecordID: ${req.queryParams.id}`, + extra: "N/A", + }); } catch (e) { g_lib.handleException(e, res); + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update/md_err_msg", + status: "Failure", + description: `Update data record schema validation error message. RecordID: ${req.queryParams.id}`, + extra: "N/A", + error: e, + }); } }) .queryParam("client", joi.string().optional(), "Client ID") @@ -1013,11 +1184,32 @@ router router .post("/update/size", function (req, res) { var retry = 10; + let result = null; + const records = Array.isArray(req.body?.records) ? req.body.records : []; + + const total = records.length; + + const summary = records + .slice(0, 3) + .map((r) => `${r.id}:${r.size}`) + .join(", "); + + const recordSummary = + total > 3 ? `${summary}... (${total} total)` : `${summary} (${total} total)`; // Must do this in a retry loop in case of concurrent (non-put) updates for (;;) { try { - var result = []; + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update/size", + status: "Started", + description: `Update existing data record size. Summary: ${recordSummary}`, + }); + + result = []; g_db._executeTransaction({ collections: { @@ -1066,8 +1258,27 @@ router }); res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update/size", + status: "Success", + description: `Update existing data record size. Summary: ${recordSummary}`, + extra: "N/A", + }); + break; } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update/size", + status: "Failure", + description: `Update existing data record size. Summary: ${recordSummary}`, + extra: "N/A", + }); if (--retry == 0 || !e.errorNum || e.errorNum != 1200) { g_lib.handleException(e, res); } @@ -1097,6 +1308,15 @@ router router .get("/view", function (req, res) { try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Started", + description: `Get data by ID or alias. ID: ${req.queryParams.id}`, + }); + const client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); var data_id = g_lib.resolveDataID(req.queryParams.id, client); @@ -1166,7 +1386,26 @@ router res.send({ results: [data], }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Success", + description: `Get data by ID or alias. ID: ${req.queryParams.id}`, + results: "N/A", + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/view", + status: "Failure", + description: `Get data by ID or alias. ID: ${req.queryParams.id}`, + extra: "N/A", + }); + g_lib.handleException(e, res); } }) @@ -1177,7 +1416,20 @@ router router .post("/export", function (req, res) { + const ids = req.body.id || []; + const preview = ids.slice(0, 3).join(", "); + const idSummary = ids.length > 3 ? `${preview}, ...` : preview; + const idCount = ids.length; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/export", + status: "Started", + description: `Export record data. ID: ${idSummary}`, + }); + g_db._executeTransaction({ collections: { read: ["uuid", "accn", "d", "c", "item"], @@ -1224,7 +1476,28 @@ router res.send(results); }, }); + + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/export", + status: "Success", + description: `Export record data. ID: ${idSummary}`, + extra: { count: idCount }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/export", + status: "Failure", + description: `Export record data. ID: ${idSummary}`, + extra: { count: idCount }, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -1242,7 +1515,17 @@ router router .get("/dep/graph/get", function (req, res) { + let result = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/dep/graph/get", + status: "Started", + description: `Get data dependency graph. ID: ${req.queryParams.id}`, + }); + const client = g_lib.getUserFromClientID(req.queryParams.client); var data_id = g_lib.resolveDataID(req.queryParams.id, client); var i, @@ -1255,10 +1538,10 @@ router visited = [data_id], cur = [[data_id, true]], next = [], - result = [], notes, gen = 0; + result = []; // Get Ancestors //console.log("get ancestors"); @@ -1412,7 +1695,27 @@ router } res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/dep/graph/get", + status: "Success", + description: `Get data dependency graph. ID: ${req.queryParams.id}`, + extra: { count: result.length }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/dep/graph/get", + status: "Failure", + description: `Get data dependency graph. ID: ${req.queryParams.id}`, + extra: { count: result.length }, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -1423,7 +1726,20 @@ router router .get("/lock", function (req, res) { + const ids = req.queryParams.ids || []; + const preview = ids.slice(0, 3).join(", "); + const idSummary = ids.length > 3 ? `${preview}, ...` : preview; + const idCount = ids.length; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/lock", + status: "Started", + description: `Toggle data record lock. IDs: ${idSummary}, Lock: ${req.queryParams.lock}`, + }); + const client = g_lib.getUserFromClientID(req.queryParams.client); g_db._executeTransaction({ collections: { @@ -1461,7 +1777,26 @@ router res.send(result); }, }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/lock", + status: "Success", + description: `Toggle data record lock. IDs: ${idSummary}, Lock: ${req.queryParams.lock}`, + extra: { count: idCount }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/lock", + status: "Failure", + description: `Toggle data record lock. IDs: ${idSummary}, Lock: ${req.queryParams.lock}`, + extra: { count: idCount }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -1491,6 +1826,15 @@ router router .get("/path", function (req, res) { try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/path", + status: "Started", + description: `Get raw data local path. IDs: ${req.queryParams.id}; Domain: ${req.queryParams.domain}`, + }); + const client = g_lib.getUserFromClientID(req.queryParams.client); var data_id = g_lib.resolveDataID(req.queryParams.id, client); @@ -1517,7 +1861,26 @@ router path: path, }); //res.send({ path: repo.exp_path + loc.path.substr( repo.path.length ) }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/path", + status: "Success", + description: `Get raw data local path. IDs: ${req.queryParams.id}; Domain: ${req.queryParams.domain}`, + extra: { path: path }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/path", + status: "Failure", + description: `Get raw data local path. IDs: ${req.queryParams.id}; Domain: ${req.queryParams.domain}`, + extra: { path: path }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -1529,7 +1892,17 @@ router router .get("/list/by_alloc", function (req, res) { + let result = null; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/by_alloc", + status: "Started", + description: `List data records by allocation. repo: ${req.queryParams.repo}`, + }); + const client = g_lib.getUserFromClientID(req.queryParams.client); var owner_id; @@ -1545,7 +1918,6 @@ router } var qry = "for v,e in 1..1 inbound @repo loc filter e.uid == @uid sort v.title", - result, doc; if (req.queryParams.offset != undefined && req.queryParams.count != undefined) { @@ -1579,6 +1951,7 @@ router repo: req.queryParams.repo, uid: owner_id, }); + result = result.toArray(); } for (var i in result) { @@ -1589,7 +1962,27 @@ router } res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/by_alloc", + status: "Success", + description: `List data records by allocation. repo: ${req.queryParams.repo}`, + extra: { count: req.queryParams?.count }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "GET", + routePath: basePath + "/list/by_alloc", + status: "Failure", + description: `List data records by allocation. repo: ${req.queryParams.repo}`, + extra: { count: req.queryParams?.count }, + error: e, + }); + g_lib.handleException(e, res); } }) @@ -1604,6 +1997,15 @@ router router .post("/get", function (req, res) { try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/get", + status: "Started", + description: `Get (download) data to Globus destination path. ID: ${req.body.id}`, + }); + g_db._executeTransaction({ collections: { read: ["uuid", "accn", "d", "c", "item"], @@ -1658,9 +2060,29 @@ router g_lib.saveRecentGlobusPath(client, req.body.path, g_lib.TT_DATA_GET); res.send(result); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/get", + status: "Success", + description: `Get (download) data to Globus destination path. ID: ${req.body.id}`, + extra: "N/A", + }); }, }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/get", + status: "Failure", + description: `Get (download) data to Globus destination path. ID: ${req.body.id}`, + extra: "N/A", + error: e, + }); + g_lib.handleException(e, res); } }) @@ -1687,6 +2109,15 @@ router router .post("/put", function (req, res) { try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/put", + status: "Started", + description: `Put (upload) raw data to record. ID: ${req.body.id}`, + }); + g_db._executeTransaction({ collections: { read: ["uuid", "accn", "d", "c", "item"], @@ -1747,7 +2178,27 @@ router res.send(result); }, }); + + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/put", + status: "Success", + description: `Put (upload) raw data to record. ID: ${req.body.id}`, + extra: "N/A", + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/put", + status: "Failure", + description: `Put (upload) raw data to record. ID: ${req.body.id}`, + extra: "N/A", + error: e, + }); g_lib.handleException(e, res); } }) @@ -1773,7 +2224,20 @@ router router .post("/alloc_chg", function (req, res) { + const ids = req.body.ids || []; + const id_count = ids.length; + + const ids_preview = id_count > 3 ? ids.slice(0, 3).concat("...") : ids; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/alloc_chg", + status: "Started", + description: `Move raw data to new allocation. Repo ID: ${req.body.repo_id}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "d", "c", "item"], @@ -1800,7 +2264,26 @@ router res.send(result); }, }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/alloc_chg", + status: "Success", + description: `Move raw data to new allocation. Repo ID: ${req.body.repo_id}`, + extra: { IDs: ids_preview, count: id_count }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/alloc_chg", + status: "Failure", + description: `Move raw data to new allocation. Repo ID: ${req.body.repo_id}`, + extra: { IDs: ids_preview, count: id_count }, + error: e, + }); g_lib.handleException(e, res); } }) @@ -1821,7 +2304,20 @@ router router .post("/owner_chg", function (req, res) { + const ids = Array.isArray(req.body.ids) ? req.body.ids : []; + const id_count = ids.length; + + const id_preview = id_count > 3 ? ids.slice(0, 3).concat("...") : ids; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/owner_chg", + status: "Started", + description: `Move data records and raw data to a new owner/allocation. Repo ID: ${req.body.coll_id}`, + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "d", "c", "item", "admin"], @@ -1836,6 +2332,7 @@ router id = g_lib.resolveDataCollID(req.body.ids[i], client); res_ids.push(id); } + var coll_id = g_lib.resolveDataCollID(req.body.coll_id, client); var result = g_tasks.taskInitRecOwnerChg( client, @@ -1848,7 +2345,27 @@ router res.send(result); }, }); + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/owner_chg", + status: "Success", + description: `Move data records and raw data to a new owner/allocation. Coll ID: ${req.body.coll_id}`, + extra: { IDs: id_preview, count: id_count }, + }); } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/owner_chg", + status: "Failure", + description: `Move data records and raw data to a new owner/allocation. Coll ID: ${req.body.coll_id}`, + extra: { IDs: id_preview, count: id_count }, + error: e, + }); + g_lib.handleException(e, res); } }) diff --git a/core/database/foxx/api/support.js b/core/database/foxx/api/support.js index ea3ce0be4..a85eed0af 100644 --- a/core/database/foxx/api/support.js +++ b/core/database/foxx/api/support.js @@ -1109,7 +1109,6 @@ module.exports = (function () { obj.getCollCategoryTags = function (a_coll_id) { var coll = obj.db.c.document(a_coll_id), ctx = obj.catalogCalcParCtxt(coll, {}); - if (ctx.pub) return Array.from(ctx.tags); }; diff --git a/core/database/foxx/tests/data_router.test.js b/core/database/foxx/tests/data_router.test.js new file mode 100644 index 000000000..72879cc16 --- /dev/null +++ b/core/database/foxx/tests/data_router.test.js @@ -0,0 +1,835 @@ +"use strict"; + +const { expect } = require("chai"); +const request = require("@arangodb/request"); +const { db } = require("@arangodb"); +const { baseUrl } = module.context; + +const data_base_url = `${baseUrl}/dat`; + +after(function () { + // clean up all collections used in the test + const collections = ["u", "d", "c", "repo", "alloc", "loc", "owner", "alias", "item", "dep"]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) col.truncate(); + }); +}); + +describe("unit_data_router: the Foxx microservice data_router create/ endpoint", () => { + beforeEach(() => { + const collections = [ + "u", + "d", + "c", + "repo", + "alloc", + "loc", + "owner", + "alias", + "item", + "dep", + ]; + collections.forEach((name) => { + let col = db._collection(name); + if (col) + col.truncate(); // truncate if exists + else db._create(name); // create if missing + }); + + // Create the fake user + db.u.save({ + _key: "fakeUser", + _id: "u/fakeUser", + name: "fake user", + name_first: "fake", + name_last: "user", + is_admin: true, + max_coll: 50, + max_proj: 10, + max_sav_qry: 20, + email: "fakeuser@gmail.com", + }); + + // Create a fake repo + db.repo.save({ + _key: "fakeRepo", + path: "/tmp/fakeRepo", + }); + + // Create allocation for fakeUser -> fakeRepo + db.alloc.save({ + _from: "u/fakeUser", // user _id + _to: "repo/fakeRepo", // repo _id + rec_count: 0, // current record count + rec_limit: 10, // max number of records allowed + data_size: 0, // current data size + data_limit: 100000000, // max data size + state: "active", + }); + + db.owner.save({ + _from: "c/root_fakeUser", + _to: "u/fakeUser", + }); + // Create a root collection for fakeUser + if (!db._collection("c")) db._create("c"); + db.c.save({ + _key: "u_fakeUser_root", + }); + }); + it("should create exactly one data record", () => { + const res = request.post(`${data_base_url}/create?client=fakeUser`, { + body: { + title: "My First Data", + }, + json: true, + }); + + expect(res.statusCode).to.equal(200); + + const body = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + expect(body).to.have.property("results"); + expect(body.results).to.be.an("array").with.lengthOf(1); + }); + + it("should create multiple data records in a batch", () => { + const records = [ + { title: "First Batch Record" }, + { title: "Second Batch Record" }, + { title: "Third Batch Record" }, + ]; + + const res = request.post(`${data_base_url}/create/batch?client=fakeUser`, { + body: records, + json: true, + }); + + expect(res.statusCode).to.equal(200); + + // parse body in case it's returned as a string + const body = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + expect(body).to.have.property("results"); + expect(body.results).to.be.an("array").with.lengthOf(records.length); + + // verify each record was created with the correct title and an ID + records.forEach((r, i) => { + expect(body.results[i]).to.have.property("title", r.title); + expect(body.results[i]).to.have.property("id").that.is.a("string"); + }); + }); + + it("should update an existing data record", () => { + //CREATING NEW RECORD + let res = request.post(`${data_base_url}/create?client=fakeUser`, { + body: { + title: "Title Original", + }, + json: true, + }); + const body = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + const recordId = body.results[0].id; + + //UPDATING EXISTING RECORD + res = request.post(`${data_base_url}/update?client=fakeUser`, { + body: { + id: recordId, + title: "New Title", + }, + json: true, + }); + expect(res.statusCode).to.equal(200); + const updateBody = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + // ASSERTION: verify the title actually changed + expect(updateBody.results[0].title).to.equal("New Title"); + }); + + it("should update multiple data records in a batch", () => { + // Step 1: create two records + const createRes = request.post(`${data_base_url}/create/batch?client=fakeUser`, { + body: [{ title: "Batch Original 1" }, { title: "Batch Original 2" }], + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const ids = createBody.results.map((r) => r.id); + + // Step 2: update both records + const updateRes = request.post(`${data_base_url}/update/batch?client=fakeUser`, { + body: [ + { id: ids[0], title: "Batch Updated 1" }, + { id: ids[1], title: "Batch Updated 2" }, + ], + json: true, + }); + + expect(updateRes.statusCode).to.equal(200); + + const updateBody = + typeof updateRes.body === "string" ? JSON.parse(updateRes.body) : updateRes.body; + + // Step 3: assertions + expect(updateBody).to.have.property("updates"); + expect(updateBody.updates).to.be.an("array").with.lengthOf(2); + + const titles = updateBody.updates.map((r) => r.title); + expect(titles).to.include("Batch Updated 1"); + expect(titles).to.include("Batch Updated 2"); + }); + + it("should update the metadata schema validation error message on a data record", () => { + // Step 1: create a data record + const createRes = request.post(`${data_base_url}/create?client=fakeUser`, { + body: { + title: "Record With Schema Error", + }, + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const recordId = createBody.results[0].id; + + // Step 2: update md_err_msg via plain-text body + const errorMessage = "Schema validation failed: missing required field"; + + const updateRes = request.post( + `${data_base_url}/update/md_err_msg?id=${recordId}&client=fakeUser`, + { + body: errorMessage, + headers: { + "content-type": "text/plain", + }, + }, + ); + + expect(updateRes.statusCode).to.equal(204); + + // Step 3: verify DB was updated + const doc = db._document(recordId); + + expect(doc).to.have.property("md_err", true); + expect(doc).to.have.property("md_err_msg", errorMessage); + }); + + it("should update the size of an existing data record and update allocation usage", () => { + // Step 1: create a data record + const createRes = request.post(`${data_base_url}/create?client=fakeUser`, { + body: { + title: "Data Record With Size", + }, + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const recordId = createBody.results[0].id; + + // Step 2: create required loc edge (owner & alloc already exist from beforeEach) + db.loc.save({ + _from: recordId, + _to: "repo/fakeRepo", + }); + + // Step 3: verify initial values + let dataDoc = db._document(recordId); + expect(dataDoc.size || 0).to.equal(0); + + let allocDoc = db.alloc.firstExample({ + _from: "u/fakeUser", + _to: "repo/fakeRepo", + }); + expect(allocDoc.data_size).to.equal(0); + + // Step 4: call update/size + const newSize = 4096; + + const updateRes = request.post(`${data_base_url}/update/size?client=fakeUser`, { + body: { + records: [ + { + id: recordId, + size: newSize, + }, + ], + }, + json: true, + }); + + expect(updateRes.statusCode).to.equal(200); + + // Step 5: verify data record updated + dataDoc = db._document(recordId); + expect(dataDoc.size).to.equal(newSize); + expect(dataDoc).to.have.property("ut"); + expect(dataDoc).to.have.property("dt"); + + // Step 6: verify allocation updated + allocDoc = db.alloc.firstExample({ + _from: "u/fakeUser", + _to: "repo/fakeRepo", + }); + expect(allocDoc.data_size).to.equal(newSize); + }); + + it("should retrieve a data record by ID via /view", () => { + // Step 1: create a data record + const createRes = request.post(`${data_base_url}/create?client=fakeUser`, { + body: { title: "Record To View" }, + json: true, + }); + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + const recordId = createBody.results[0].id; + + // Step 2: optionally create loc edge (if your /view code expects it) + db.loc.save({ + _from: recordId, + _to: "repo/fakeRepo", + }); + + // Step 3: call the /view endpoint + const viewRes = request.get(`${data_base_url}/view`, { + qs: { + client: "fakeUser", + id: recordId, + }, + headers: { + "x-correlation-id": "test-corr-001", + }, + json: true, + }); + + expect(viewRes.statusCode).to.equal(200); + + const viewBody = typeof viewRes.body === "string" ? JSON.parse(viewRes.body) : viewRes.body; + expect(viewBody).to.have.property("results").that.is.an("array").with.lengthOf(1); + + const data = viewBody.results[0]; + expect(data).to.have.property("id", recordId); + expect(data).to.have.property("title", "Record To View"); + expect(data).to.not.have.property("_id"); + expect(data).to.not.have.property("_key"); + expect(data).to.not.have.property("_rev"); + }); + it("should export one or more data records as JSON strings via /export", () => { + // Step 1: create two data records + const createRes = request.post(`${data_base_url}/create/batch?client=fakeUser`, { + body: [{ title: "Export Record 1" }, { title: "Export Record 2" }], + json: true, + }); + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + const recordIds = createBody.results.map((r) => r.id); + + // Step 2: optionally create loc edges if required by export + recordIds.forEach((id) => { + db.loc.save({ + _from: id, + _to: "repo/fakeRepo", + }); + }); + + // Step 3: call the /export endpoint + const exportRes = request.post(`${data_base_url}/export?client=fakeUser`, { + body: { + id: recordIds, + }, + headers: { + "x-correlation-id": "test-corr-002", + }, + json: true, + }); + + expect(exportRes.statusCode).to.equal(200); + + // Step 4: parse and validate exported data + const exportBody = + typeof exportRes.body === "string" ? JSON.parse(exportRes.body) : exportRes.body; + + expect(exportBody).to.be.an("array").with.lengthOf(recordIds.length); + + exportBody.forEach((jsonStr, idx) => { + const data = JSON.parse(jsonStr); + expect(data).to.have.property("id", recordIds[idx]); + expect(data).to.have.property("title").that.includes("Export Record"); + expect(data).to.not.have.property("_id"); + expect(data).to.not.have.property("_key"); + expect(data).to.not.have.property("_rev"); + + // Optional: verify deps array exists + if (data.deps) expect(data.deps).to.be.an("array"); + }); + }); + it("should return a dependency graph for a data record", () => { + // Step 1: create three data records + const createRes = request.post(`${data_base_url}/create/batch?client=fakeUser`, { + body: [ + { title: "Root Record" }, + { title: "Child Record" }, + { title: "Grandchild Record" }, + ], + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const [rootId, childId, grandchildId] = createBody.results.map((r) => r.id); + + // Step 2: create dependency edges + // root -> child -> grandchild + db.dep.save({ + _from: rootId, + _to: childId, + type: 1, + }); + + db.dep.save({ + _from: childId, + _to: grandchildId, + type: 1, + }); + + // Step 3: call dep graph endpoint + const res = request.get( + `${data_base_url}/dep/graph/get?client=fakeUser&id=${encodeURIComponent(rootId)}`, + ); + + expect(res.statusCode).to.equal(200); + + const body = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + // Step 4: assertions + expect(body).to.be.an("array"); + expect(body.length).to.be.greaterThan(0); + + const ids = body.map((n) => n.id); + + expect(ids).to.include(rootId); + expect(ids).to.include(childId); + expect(ids).to.include(grandchildId); + + // Root node should have deps + const rootNode = body.find((n) => n.id === rootId); + expect(rootNode).to.have.property("deps"); + expect(rootNode.deps).to.be.an("array"); + + // Child node should reference root or grandchild + const childNode = body.find((n) => n.id === childId); + expect(childNode).to.have.property("id", childId); + + // Grandchild node should exist + const grandchildNode = body.find((n) => n.id === grandchildId); + expect(grandchildNode).to.have.property("id", grandchildId); + }); + it("should lock multiple data records", () => { + // Step 1: create multiple data records + const createRes = request.post(`${data_base_url}/create/batch?client=fakeUser`, { + body: [ + { title: "Lock Test 1" }, + { title: "Lock Test 2" }, + { title: "Lock Test 3" }, + { title: "Lock Test 4" }, + ], + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const ids = createBody.results.map((r) => r.id); + + // sanity check + expect(ids).to.have.lengthOf(4); + + // Step 2: lock the records + const lockRes = request.get( + `${data_base_url}/lock?client=fakeUser&lock=true&ids=${ids.join("&ids=")}`, + ); + + expect(lockRes.statusCode).to.equal(200); + + const lockBody = typeof lockRes.body === "string" ? JSON.parse(lockRes.body) : lockRes.body; + + // Step 3: verify response + expect(lockBody).to.be.an("array").with.lengthOf(ids.length); + + lockBody.forEach((r) => { + expect(r).to.have.property("id"); + expect(r).to.have.property("locked", true); + }); + + // Step 4: verify DB state + ids.forEach((id) => { + const doc = db._document(id); + expect(doc.locked).to.equal(true); + }); + }); + it("should return the raw data local path for a data record", () => { + // Step 1: create a data record + const createRes = request.post(`${data_base_url}/create?client=fakeUser`, { + body: { + title: "Path Test Record", + }, + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const recordId = createBody.results[0].id; + + // Step 2: ensure repo has a domain + const repo = db.repo.document("repo/fakeRepo"); + db.repo.update(repo._id, { + domain: "local", + path: "/tmp/fakeRepo", + exp_path: "/tmp/fakeRepo", + }); + + // Step 3: create loc edge for the record + db.loc.save({ + _from: recordId, + _to: "repo/fakeRepo", + path: "/tmp/fakeRepo/data/file.bin", + }); + + // Step 4: call /path + const res = request.get( + `${data_base_url}/path?client=fakeUser&id=${recordId}&domain=local`, + ); + + expect(res.statusCode).to.equal(200); + + const body = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + // Step 5: assertions + expect(body).to.have.property("path"); + expect(body.path).to.be.a("string"); + expect(body.path.length).to.be.greaterThan(0); + }); + it("should list data records by allocation for a repo", () => { + // Step 1: create two data records + const createRes = request.post(`${data_base_url}/create/batch?client=fakeUser`, { + body: [{ title: "Alloc Record One" }, { title: "Alloc Record Two" }], + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const ids = createBody.results.map((r) => r.id); + expect(ids).to.have.lengthOf(2); + + // Step 2: ensure repo exists (already created in beforeEach) + const repoId = "repo/fakeRepo"; + + // Step 3: create loc edges linking records to repo + ids.forEach((id) => { + db.loc.save({ + _from: id, + _to: repoId, + uid: "u/fakeUser", // required by query filter e.uid == @uid + }); + }); + + // Step 4: call list/by_alloc + const res = request.get(`${data_base_url}/list/by_alloc?client=fakeUser&repo=${repoId}`); + + expect(res.statusCode).to.equal(200); + + const body = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + // Step 5: assertions + + expect(body).to.be.an("array"); + const uniqueIds = new Set(body.map((r) => r.id)); + expect(uniqueIds.size).to.equal(2); + + const titles = body.map((r) => r.title); + expect(titles).to.include("Alloc Record One"); + expect(titles).to.include("Alloc Record Two"); + + body.forEach((rec) => { + expect(rec).to.have.property("id"); + expect(rec).to.have.property("owner"); + expect(rec).to.have.property("creator"); + }); + }); + + it("should initialize a data get task in check-only mode", () => { + // Step 1: create a data record + const createRes = request.post(`${data_base_url}/create?client=fakeUser`, { + body: { + title: "Get IT Test Record", + }, + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const recordId = createBody.results[0].id; + + // Step 2: create required loc edge (used by get path resolution) + db.loc.save({ + _from: recordId, + _to: "repo/fakeRepo", + path: "/tmp/fakeRepo/data/file.bin", + }); + + // Step 3: call /get in check-only mode + const res = request.post(`${data_base_url}/get?client=fakeUser`, { + body: { + id: [recordId], + check: true, // avoids real Globus transfer + }, + headers: { + "x-correlation-id": "test-corr-get-it-001", + }, + json: true, + }); + + expect(res.statusCode).to.equal(200); + + const body = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + // Step 4: assertions + expect(body).to.be.an("object"); + }); + + it("should initialize a data put task in check-only mode", () => { + // Step 1: create a data record + const createRes = request.post(`${data_base_url}/create?client=fakeUser`, { + body: { + title: "Put Test Record", + }, + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const recordId = createBody.results[0].id; + + // Step 2: call /put in check-only mode + const res = request.post(`${data_base_url}/put?client=fakeUser`, { + body: { + id: [recordId], + check: true, // avoid real Globus upload + ext: ".bin", + }, + headers: { + "x-correlation-id": "test-corr-put-it-001", + }, + json: true, + }); + + expect(res.statusCode).to.equal(200); + + const body = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + // Step 3: IT-safe assertions + expect(body).to.be.an("object"); + }); + + it("should initialize an allocation change task for data records", () => { + // Step 1: create two data records + const createRes = request.post(`${data_base_url}/create/batch?client=fakeUser`, { + body: [{ title: "AllocChg Record 1" }, { title: "AllocChg Record 2" }], + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const ids = createBody.results.map((r) => r.id); + expect(ids).to.have.lengthOf(2); + + // Step 2: create loc edges pointing to the original repo + ids.forEach((id) => { + db.loc.save({ + _from: id, + _to: "repo/fakeRepo", + uid: "u/fakeUser", + }); + }); + + // Step 3: create a new repo + db.repo.save({ + _key: "newRepo", + path: "/tmp/newRepo", + }); + + // Step 4: create allocation for fakeUser -> newRepo + db.alloc.save({ + _from: "u/fakeUser", + _to: "repo/newRepo", + rec_count: 0, + rec_limit: 10, + data_size: 0, + data_limit: 100000000, + state: "active", + }); + + // Step 5: call alloc_chg in check-only mode + const res = request.post(`${data_base_url}/alloc_chg?client=fakeUser`, { + body: { + ids, + repo_id: "repo/newRepo", + check: true, // IMPORTANT: do not actually move data + }, + headers: { + "x-correlation-id": "test-corr-alloc-chg-001", + }, + json: true, + }); + + expect(res.statusCode).to.equal(200); + + const body = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + // Step 6: assertions + expect(body).to.be.an("object"); + const locEdges = db.loc.byExample({ _to: "repo/newRepo" }).toArray(); + expect(locEdges.length).to.equal(0); + }); + it("should initialize an owner change task for data records", () => { + db.c.save({ + _key: "u_fakeUser_newOwner", + }); + + const newCollId = "c/u_fakeUser_newOwner"; + + // 🔑 REQUIRED: owner edge for destination collection + db.owner.save({ + _from: newCollId, + _to: "u/fakeUser", + }); + // Step 1: create multiple data records + const createRes = request.post(`${data_base_url}/create/batch?client=fakeUser`, { + body: [ + { title: "OwnerChg Record 1" }, + { title: "OwnerChg Record 2" }, + { title: "OwnerChg Record 3" }, + { title: "OwnerChg Record 4" }, + ], + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const ids = createBody.results.map((r) => r.id); + expect(ids).to.have.lengthOf(4); + + // Step 2: ensure original owner collection exists + const originalCollId = "c/u_fakeUser_root"; + + // Step 3: create owner edges for each record (REQUIRED) + ids.forEach((id) => { + db.owner.save({ + _from: id, + _to: originalCollId, + }); + }); + + // Step 5: call owner_chg in check-only mode + const res = request.post(`${data_base_url}/owner_chg?client=fakeUser`, { + body: { + ids, + coll_id: newCollId, + check: true, + }, + headers: { + "x-correlation-id": "test-corr-owner-chg-001", + }, + json: true, + }); + + expect(res.statusCode).to.equal(200); + + const body = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + // Step 6: assertions + expect(body).to.be.an("object"); + }); + it("should initialize a delete task for data records", () => { + // Step 1: create data records + const createRes = request.post(`${data_base_url}/create/batch?client=fakeUser`, { + body: [{ title: "Delete Test Record 1" }, { title: "Delete Test Record 2" }], + json: true, + }); + + expect(createRes.statusCode).to.equal(200); + + const createBody = + typeof createRes.body === "string" ? JSON.parse(createRes.body) : createRes.body; + + const ids = createBody.results.map((r) => r.id); + expect(ids).to.have.lengthOf(2); + + // Step 2: sanity check — records exist + ids.forEach((id) => { + const doc = db.d.document(id, true); // safe: returns null if not found + expect(doc).to.be.an("object"); + expect(doc).to.have.property("_id", id); + }); + + // Step 3: call delete route + const res = request.post(`${data_base_url}/delete?client=fakeUser`, { + body: { + ids, + }, + headers: { + "x-correlation-id": "test-corr-delete-001", + }, + json: true, + }); + + expect(res.statusCode).to.equal(200); + + const body = typeof res.body === "string" ? JSON.parse(res.body) : res.body; + + // Step 4: assertions — task created + expect(body).to.be.an("object"); + expect(body).to.have.property("task"); + expect(body.task).to.have.property("_id"); + expect(body.task).to.have.property("status"); + }); +}); From c86e353615f08ecb220fbbdd9f41c5948ec356f8 Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Thu, 5 Feb 2026 22:49:01 -0500 Subject: [PATCH 54/65] [1835-DAPS] - refactor: proto proto3 with envelope part 2 (#1841) --- common/proto3/common/CMakeLists.txt | 9 +++++++++ common/proto3/common/enums/error_code.proto | 18 +++++++++--------- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/common/proto3/common/CMakeLists.txt b/common/proto3/common/CMakeLists.txt index 367c0801c..7c54ebb52 100644 --- a/common/proto3/common/CMakeLists.txt +++ b/common/proto3/common/CMakeLists.txt @@ -1,5 +1,14 @@ cmake_minimum_required(VERSION 3.17.0) +# Collect proto files from the new 1-1-1 structure +file(GLOB_RECURSE ProtoFiles + "${CMAKE_CURRENT_SOURCE_DIR}/enums/*.proto" + "${CMAKE_CURRENT_SOURCE_DIR}/messages/*.proto" + "${CMAKE_CURRENT_SOURCE_DIR}/anon/*.proto" + "${CMAKE_CURRENT_SOURCE_DIR}/auth/*.proto" + "${CMAKE_CURRENT_SOURCE_DIR}/envelope.proto" +) + # Generate C++ from protos protobuf_generate( LANGUAGE cpp diff --git a/common/proto3/common/enums/error_code.proto b/common/proto3/common/enums/error_code.proto index 2d643629f..3abad9577 100644 --- a/common/proto3/common/enums/error_code.proto +++ b/common/proto3/common/enums/error_code.proto @@ -5,13 +5,13 @@ package SDMS; option cc_enable_arenas = true; enum ErrorCode { - ERROR_CODE_UNSPECIFIED = 0; - ERROR_CODE_BAD_REQUEST = 1; - ERROR_CODE_INTERNAL_ERROR = 2; - ERROR_CODE_CLIENT_ERROR = 3; - ERROR_CODE_SERVICE_ERROR = 4; - ERROR_CODE_AUTHN_REQUIRED = 5; - ERROR_CODE_AUTHN_ERROR = 6; - ERROR_CODE_DEST_PATH_ERROR = 7; - ERROR_CODE_DEST_FILE_ERROR = 8; + UNSPECIFIED = 0; + BAD_REQUEST = 1; + INTERNAL_ERROR = 2; + CLIENT_ERROR = 3; + SERVICE_ERROR = 4; + AUTHN_REQUIRED = 5; + AUTHN_ERROR = 6; + DEST_PATH_ERROR = 7; + DEST_FILE_ERROR = 8; } From c03924a194cece7fe161d89f1ca9cab22256bec5 Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Wed, 11 Feb 2026 16:38:26 -0500 Subject: [PATCH 55/65] [DAPS-1837] - refactor common library to use proto3 (#1842) * [DAPS-1837] - refactor: core server to support proto3 (#1844) * [DAPS-1845] - refactor repo server to use proto3 (#1846) * [DAPS-1847] - refactor: python package to be compatible with proto3 envelope (#1849) * [DAPS-1848] - refactor: update authz files to use proto3. (#1851) * [DAPS-1850] - refactor web server proto3 (#1852) --- .github/workflows/javascript-lint.yml | 2 +- CMakeLists.txt | 15 +- common/CMakeLists.txt | 2 +- common/include/common/IMessage.hpp | 206 ++- common/include/common/IMessageMapper.hpp | 6 +- common/include/common/ProtoBufMap.hpp | 154 +- common/proto/common/CMakeLists.txt | 24 - common/proto/common/SDMS.proto | 448 ------ common/proto/common/SDMS_Anon.proto | 106 -- common/proto/common/SDMS_Auth.proto | 1417 ----------------- common/proto/common/Version.proto.in | 35 - common/proto3/common/anon/nack_reply.proto | 2 +- .../common/auth/acl_shared_list_request.proto | 4 +- .../common/auth/acl_update_request.proto | 2 +- .../common/auth/check_perms_request.proto | 2 +- .../common/auth/coll_create_request.proto | 8 +- .../auth/coll_get_parents_request.proto | 2 +- .../auth/coll_list_published_request.proto | 6 +- .../common/auth/coll_read_request.proto | 4 +- .../common/auth/coll_update_request.proto | 10 +- .../proto3/common/auth/data_get_request.proto | 12 +- .../proto3/common/auth/data_put_request.proto | 12 +- .../auth/generate_credentials_request.proto | 4 +- .../common/auth/get_perms_request.proto | 2 +- .../common/auth/group_update_request.proto | 4 +- .../common/auth/note_update_request.proto | 6 +- .../common/auth/project_create_request.proto | 2 +- .../auth/project_get_role_request.proto | 2 +- .../common/auth/project_list_request.proto | 16 +- .../common/auth/project_update_request.proto | 4 +- .../common/auth/query_exec_request.proto | 4 +- .../common/auth/query_list_request.proto | 4 +- .../common/auth/query_update_request.proto | 2 +- .../auth/record_alloc_change_request.proto | 4 +- .../common/auth/record_create_request.proto | 22 +- .../auth/record_list_by_alloc_request.proto | 4 +- .../auth/record_owner_change_request.proto | 4 +- .../common/auth/record_update_request.proto | 22 +- .../repo_allocation_set_default_request.proto | 2 +- .../auth/repo_allocation_stats_request.proto | 2 +- .../common/auth/repo_create_request.proto | 14 +- .../common/auth/repo_list_request.proto | 4 +- ...epo_list_subject_allocations_request.proto | 4 +- .../common/auth/repo_update_request.proto | 18 +- .../auth/repo_view_allocation_request.proto | 2 +- .../common/auth/schema_revise_request.proto | 8 +- .../common/auth/schema_search_request.proto | 14 +- .../common/auth/schema_update_request.proto | 10 +- .../common/auth/schema_view_request.proto | 2 +- .../proto3/common/auth/search_request.proto | 28 +- .../auth/tag_list_by_count_request.proto | 4 +- .../common/auth/tag_search_request.proto | 4 +- .../common/auth/task_list_request.proto | 10 +- .../auth/topic_list_topics_request.proto | 6 +- .../common/auth/user_create_request.proto | 4 +- .../auth/user_find_by_name_uid_request.proto | 4 +- .../auth/user_get_access_token_request.proto | 4 +- .../common/auth/user_list_all_request.proto | 4 +- .../auth/user_list_collab_request.proto | 4 +- .../common/auth/user_update_request.proto | 6 +- .../common/auth/user_view_request.proto | 2 +- .../common/enums/access_token_type.proto | 12 +- common/proto3/common/enums/encryption.proto | 6 +- common/proto3/common/enums/search_mode.proto | 4 +- common/proto3/common/enums/sort_option.proto | 12 +- common/proto3/common/enums/task_command.proto | 12 +- common/proto3/common/enums/task_status.proto | 10 +- common/proto3/common/enums/task_type.proto | 20 +- .../proto3/common/messages/group_data.proto | 4 +- common/source/Frame.cpp | 32 +- common/source/Frame.hpp | 125 +- common/source/ProtoBufFactory.cpp | 37 +- common/source/ProtoBufFactory.hpp | 7 +- common/source/ProtoBufMap.cpp | 207 ++- common/source/Util.cpp | 1 - .../communicators/ZeroMQCommunicator.cpp | 50 +- .../communicators/ZeroMQCommunicator.hpp | 2 +- .../ZeroMQCommunicatorSecure.cpp | 1 - common/source/messages/GoogleProtoMessage.cpp | 31 +- common/source/servers/Proxy.cpp | 4 - .../tcp_secure/test_tcp_secure_client.cpp | 31 +- .../tcp_secure/test_tcp_secure_server.cpp | 28 +- common/tests/unit/CMakeLists.txt | 1 - common/tests/unit/test_Buffer.cpp | 12 +- .../tests/unit/test_CommunicatorFactory.cpp | 35 +- common/tests/unit/test_Frame.cpp | 51 +- common/tests/unit/test_MessageFactory.cpp | 4 +- common/tests/unit/test_ProtoBufFactory.cpp | 59 - common/tests/unit/test_ProtoBufMap.cpp | 14 +- common/tests/unit/test_Proxy.cpp | 31 +- common/tests/unit/test_ProxyBasicZMQ.cpp | 51 +- core/server/ClientWorker.cpp | 310 ++-- core/server/ClientWorker.hpp | 2 +- core/server/Config.hpp | 2 +- core/server/DatabaseAPI.cpp | 427 ++--- core/server/DatabaseAPI.hpp | 438 +++-- core/server/GlobusAPI.cpp | 53 +- core/server/GlobusAPI.hpp | 1 - core/server/TaskMgr.cpp | 2 +- core/server/TaskMgr.hpp | 2 - core/server/TaskWorker.cpp | 32 +- core/server/Version.hpp.in | 16 + core/server/main.cpp | 17 +- core/server/tests/unit/test_DatabaseAPI.cpp | 10 + docker/Dockerfile.runtime | 1 + python/datafed_pkg/CMakeLists.txt | 2 +- python/datafed_pkg/datafed/CLI.py | 18 +- python/datafed_pkg/datafed/CMakeLists.txt | 68 +- python/datafed_pkg/datafed/CommandLib.py | 102 +- python/datafed_pkg/datafed/Connection.py | 142 +- python/datafed_pkg/datafed/MessageLib.py | 29 +- python/datafed_pkg/datafed/VERSION.py.in | 8 + .../datafed_pkg/scripts/fix_proto_imports.sh | 111 ++ python/datafed_pkg/setup.py | 9 + python/datafed_pkg/test/security.py | 4 +- python/pyproto_add_msg_idx.py | 61 - .../globus5/authz/source/AuthzWorker.cpp | 30 +- .../globus5/authz/source/Version.hpp.in | 17 + .../authz/tests/unit/test_AuthzWorker.cpp | 6 +- repository/server/Config.hpp | 4 +- repository/server/RepoServer.cpp | 25 +- repository/server/RequestWorker.cpp | 73 +- repository/server/Version.hpp.in | 16 + repository/server/main.cpp | 15 +- web/datafed-ws.js | 261 ++- web/docker/Dockerfile | 5 +- web/static/api.js | 52 - web/version.js.in | 7 +- 128 files changed, 2157 insertions(+), 3860 deletions(-) delete mode 100644 common/proto/common/CMakeLists.txt delete mode 100644 common/proto/common/SDMS.proto delete mode 100644 common/proto/common/SDMS_Anon.proto delete mode 100644 common/proto/common/SDMS_Auth.proto delete mode 100644 common/proto/common/Version.proto.in delete mode 100644 common/tests/unit/test_ProtoBufFactory.cpp create mode 100755 python/datafed_pkg/scripts/fix_proto_imports.sh delete mode 100755 python/pyproto_add_msg_idx.py diff --git a/.github/workflows/javascript-lint.yml b/.github/workflows/javascript-lint.yml index 1ef359c9a..e516c34a1 100644 --- a/.github/workflows/javascript-lint.yml +++ b/.github/workflows/javascript-lint.yml @@ -19,7 +19,7 @@ jobs: - name: Install ESLint run: | npm init -y - npm install eslint@latest @babel/eslint-parser@latest eslint-define-config globals eslint-plugin-jsdoc --save + npm install eslint@^9 @babel/eslint-parser@latest eslint-define-config globals eslint-plugin-jsdoc --save npx eslint "**/*.js" # Step 4: Report status diff --git a/CMakeLists.txt b/CMakeLists.txt index ac472ce8e..cb3a1fb3d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -117,20 +117,19 @@ endif() if ( BUILD_REPO_SERVER OR BUILD_CORE_SERVER OR BUILD_AUTHZ OR BUILD_COMMON OR BUILD_PYTHON_CLIENT OR BUILD_WEB_SERVER) - configure_file( - "${CMAKE_CURRENT_SOURCE_DIR}/common/proto/common/Version.proto.in" - "${CMAKE_CURRENT_SOURCE_DIR}/common/proto/common/Version.proto" - @ONLY) + # Create file glob here because need to be made visible here as well - file( GLOB ProtoFiles "${PROJECT_SOURCE_DIR}/common/proto/common/*.proto" ) + file(GLOB_RECURSE ProtoFiles "${PROJECT_SOURCE_DIR}/common/proto3/common/*.proto") include(./cmake/Protobuf.cmake) endif() if( BUILD_WEB_SERVER ) include(./cmake/Web.cmake) - file(COPY ${ProtoFiles} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}/web/") + + file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/common/proto3/common/" + DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}/web/proto3/") if( ENABLE_UNIT_TESTS ) add_test(NAME unit_tests_web COMMAND "${DEPENDENCY_INSTALL_PATH}/nvm/versions/node/${LOCAL_NODE_VERSION}/bin/npm" "test") @@ -200,7 +199,7 @@ endif() if( BUILD_PYTHON_CLIENT ) # make target = pydatafed - file(COPY ${PROJECT_SOURCE_DIR}/external/DataFedDependencies/python/datafed_pkg/requirements.txt DESTINATION ${PROJECT_SOURCE_DIR}/python/datafed_pkg/requirements.txt) + file(COPY ${PROJECT_SOURCE_DIR}/external/DataFedDependencies/python/datafed_pkg/requirements.txt DESTINATION ${PROJECT_SOURCE_DIR}/python/datafed_pkg) add_subdirectory( python EXCLUDE_FROM_ALL ) endif() @@ -228,9 +227,9 @@ if( INSTALL_CORE_SERVER ) endif() if( INSTALL_WEB_SERVER ) - install( FILES ${ProtoFiles} DESTINATION ${DATAFED_INSTALL_PATH}/web ) install( DIRECTORY ${PROJECT_SOURCE_DIR}/web/static DESTINATION ${DATAFED_INSTALL_PATH}/web ) install( DIRECTORY ${PROJECT_SOURCE_DIR}/web/views DESTINATION ${DATAFED_INSTALL_PATH}/web ) + install( DIRECTORY ${PROJECT_SOURCE_DIR}/web/proto3 DESTINATION ${DATAFED_INSTALL_PATH}/web ) install( FILES ${PROJECT_SOURCE_DIR}/web/version.js DESTINATION ${DATAFED_INSTALL_PATH}/web ) endif() diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index ab7244a97..3c00dd348 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required (VERSION 3.17.0) # The below command will by default generate the proto files in this folder # we want to place them in the binary folder in /proto/common -add_subdirectory(proto/common) +add_subdirectory(proto3/common) if( BUILD_COMMON ) configure_file( diff --git a/common/include/common/IMessage.hpp b/common/include/common/IMessage.hpp index 7d455feca..71aa5bbc7 100644 --- a/common/include/common/IMessage.hpp +++ b/common/include/common/IMessage.hpp @@ -17,16 +17,45 @@ class Message; namespace SDMS { -enum class MessageType { GOOGLE_PROTOCOL_BUFFER, STRING }; +/** + * @enum MessageType + * @brief Discriminator for the payload type carried by an IMessage. + */ +enum class MessageType { + GOOGLE_PROTOCOL_BUFFER, ///< Payload is a google::protobuf::Message. + STRING ///< Payload is a plain std::string. +}; /** - * The message is on its way to a server this it is a REQUEST - * The message is on its way from a server then it is a RESPONSE - **/ + * @enum MessageState + * @brief Indicates the directional state of a message relative to a server. + * + * - @c REQUEST — the message is traveling *toward* a server. + * - @c RESPONSE — the message is traveling *from* a server. + */ enum class MessageState { REQUEST, RESPONSE }; -enum class MessageAttribute { ID, KEY, STATE, CORRELATION_ID }; +/** + * @enum MessageAttribute + * @brief Named keys for the core metadata attributes stored on a message. + */ +enum class MessageAttribute { + ID, ///< Unique message identifier. + KEY, ///< Routing or lookup key. + STATE, ///< Current MessageState (REQUEST or RESPONSE). + CORRELATION_ID ///< Identifier used to correlate requests with responses. +}; +/** + * @brief Convert a MessageAttribute to its human-readable string form. + * + * @param[in] attribute The attribute to stringify. + * @return A string representation, or @c "unsupported_toString_print" if + * the attribute does not have a defined conversion. + * + * @note Only ID and KEY are currently supported; all other values fall + * through to the unsupported case. + */ inline const std::string toString(const MessageAttribute attribute) { if (attribute == MessageAttribute::ID) { return std::string("ID"); @@ -37,67 +66,186 @@ inline const std::string toString(const MessageAttribute attribute) { } } +/// @brief Well-known string constants used throughout the messaging layer. namespace constants { namespace message { namespace google { -/// Supported dynamic arguments +/// @brief Key for the serialized frame size of a protobuf message. const std::string FRAME_SIZE = "frame_size"; -const std::string PROTO_ID = "proto_id"; -const std::string MSG_ID = "msg_id"; +/// @brief Key for the integral message-type identifier (e.g. envelope field number). const std::string MSG_TYPE = "msg_type"; +/// @brief Key for the security/session context attached to a message. const std::string CONTEXT = "context"; } // namespace google } // namespace message } // namespace constants +/** + * @class IMessage + * @brief Abstract interface for all messages exchanged through the SDMS + * messaging infrastructure. + * + * IMessage defines a uniform contract for: + * - **Payload management** — carrying either a protobuf Message or a raw + * string, with ownership semantics enforced by the implementation. + * - **Routing** — an ordered list of route identifiers that determines + * how a message is forwarded through the system. + * - **Metadata** — core attributes (ID, KEY, STATE, CORRELATION_ID) plus + * arbitrary named attributes with small-integer values. + * + * Implementations are expected to own the payload and manage its lifetime. + * Callers retrieve payload data via non-owning raw pointers or copies. + */ class IMessage { public: - + /// @brief Virtual destructor. virtual ~IMessage() {}; - virtual bool exists(MessageAttribute) const = 0; - virtual bool exists(const std::string &) const = 0; /** + * @brief Check whether a core metadata attribute has been set. + * + * @param[in] attribute The attribute to query. + * @return @c true if the attribute is present on this message. + */ + virtual bool exists(MessageAttribute attribute) const = 0; + + /** + * @brief Check whether a named (dynamic) attribute has been set. + * + * @param[in] attribute_name The name of the dynamic attribute to query. + * @return @c true if the named attribute is present on this message. + */ + virtual bool exists(const std::string &attribute_name) const = 0; + + /*--------------------------------------------------------------------- * Setters - **/ + *-------------------------------------------------------------------*/ + + /** + * @brief Set the message payload. + * + * The implementation must take ownership of the supplied payload. For + * protobuf payloads the unique_ptr is moved in; for string payloads the + * string is copied or moved. After this call the IMessage instance is + * solely responsible for the lifetime of the payload data. + * + * @param[in] payload A variant holding either a protobuf message + * (via @c std::unique_ptr) or a plain string. + */ + virtual void setPayload( + std::variant, + std::string> + payload) = 0; /** - * Adding a payload should make a copy and store internally. It should - * Imply ownership of the payload and it's memory management. - **/ - virtual void - setPayload(std::variant, - std::string>) = 0; + * @brief Append a single route to the end of the routing list. + * + * @param[in] route The route identifier to append. + */ virtual void addRoute(const std::string &route) = 0; + /** + * @brief Replace the entire routing list. + * + * @param[in] routes The new ordered list of route identifiers. + */ virtual void setRoutes(const std::list &routes) = 0; - virtual void set(MessageAttribute, const std::string &) = 0; - virtual void set(MessageAttribute, MessageState) = 0; + /** + * @brief Set a core metadata attribute to a string value. + * + * Applicable to attributes such as @c ID, @c KEY, and + * @c CORRELATION_ID. + * + * @param[in] attribute The attribute to set. + * @param[in] value The string value to assign. + */ + virtual void set(MessageAttribute attribute, const std::string &value) = 0; - virtual void set(std::string attribute_name, - std::variant) = 0; /** + * @brief Set a core metadata attribute to a MessageState value. + * + * Intended for the @c STATE attribute. + * + * @param[in] attribute The attribute to set (expected: @c STATE). + * @param[in] state The MessageState value to assign. + */ + virtual void set(MessageAttribute attribute, MessageState state) = 0; + + /** + * @brief Set a named dynamic attribute to a small unsigned integer. + * + * Dynamic attributes are identified by string keys (e.g. the constants + * in @c SDMS::constants::message::google). + * + * @param[in] attribute_name The name of the dynamic attribute. + * @param[in] value The value, stored as one of uint8/16/32. + */ + virtual void set(std::string attribute_name, + std::variant value) = 0; + + /*--------------------------------------------------------------------- * Getters - **/ + *-------------------------------------------------------------------*/ /** - * The correlation ID is assigned to a message when it is created and is - *extremely important for tracing a message in the logs. - **/ + * @brief Retrieve a core metadata attribute. + * + * The returned variant holds a @c std::string for most attributes, or a + * @c MessageState when querying @c STATE. + * + * @param[in] attribute The attribute to retrieve. + * @return A variant containing either the string value or the + * MessageState, depending on the attribute. + */ virtual std::variant - get(MessageAttribute) const = 0; + get(MessageAttribute attribute) const = 0; + + /** + * @brief Get a const reference to the ordered routing list. + * + * @return Const reference to the internal route list. + */ virtual const std::list &getRoutes() const = 0; + + /** + * @brief Get a mutable reference to the ordered routing list. + * + * @return Mutable reference to the internal route list. + */ virtual std::list &getRoutes() = 0; + + /** + * @brief Return the payload type carried by this message. + * + * @return The MessageType discriminator. + */ virtual MessageType type() const noexcept = 0; + + /** + * @brief Retrieve a named dynamic attribute. + * + * @param[in] attribute_name The name of the dynamic attribute. + * @return The value stored as a @c uint8_t, @c uint16_t, or @c uint32_t + * variant. + */ virtual std::variant get(const std::string &attribute_name) const = 0; - /// Note not returning a unique_ptr but a raw pointer because the message - // should stil have ownership of the object. + /** + * @brief Retrieve a non-owning handle to the message payload. + * + * Ownership remains with the IMessage instance. For protobuf payloads + * a raw pointer is returned (not a @c unique_ptr) to make this + * explicit. For string payloads the string is returned by value (copy). + * + * @return A variant holding either a non-owning + * @c google::protobuf::Message* or a @c std::string. + */ virtual std::variant getPayload() = 0; }; } // namespace SDMS + #endif // MESSAGE_HPP diff --git a/common/include/common/IMessageMapper.hpp b/common/include/common/IMessageMapper.hpp index 20e44766e..38ef18a79 100644 --- a/common/include/common/IMessageMapper.hpp +++ b/common/include/common/IMessageMapper.hpp @@ -22,10 +22,8 @@ class IMessageMapper { }; public: - virtual uint16_t getMessageType(uint8_t a_proto_id, - const std::string &a_message_name) = 0; - - virtual uint8_t getProtocolID(MessageProtocol) const = 0; + virtual uint16_t getMessageType(const std::string& message_name) const = 0; + virtual bool requiresAuth(const std::string& msg_type) const = 0; }; } // namespace SDMS diff --git a/common/include/common/ProtoBufMap.hpp b/common/include/common/ProtoBufMap.hpp index 08d92b739..e30f62b90 100644 --- a/common/include/common/ProtoBufMap.hpp +++ b/common/include/common/ProtoBufMap.hpp @@ -1,9 +1,21 @@ +/** + * @file ProtoBufMap.hpp + * @brief Provides message type mapping and envelope wrap/unwrap for the SDMS + * protobuf messaging layer. + * + * ProtoBufMap maintains bidirectional mappings between envelope field numbers + * (used as stable message type identifiers) and protobuf Descriptor objects. + * It also handles serialization boundary concerns via the Envelope message + * pattern, wrapping outgoing messages and unwrapping incoming ones. + */ + #ifndef PROTOBUFMAP_HPP #define PROTOBUFMAP_HPP #pragma once // Public common includes #include "IMessageMapper.hpp" +#include "common/envelope.pb.h" // Third party includes #include @@ -16,34 +28,154 @@ namespace SDMS { +/** + * @class ProtoBufMap + * @brief Bidirectional message type registry and envelope serialization layer. + * + * Maps between uint16_t message type identifiers (derived from envelope field + * numbers) and protobuf Descriptor/FileDescriptor objects. Implements the + * IMessageMapper interface and provides envelope wrap/unwrap operations for + * wire-boundary serialization. + * + * Message type identifiers are organized by category: + * - 10–29: Anonymous (no-auth) messages + * - 200–249: Record messages + * - etc. + * + * @see IMessageMapper + * @see SDMS::Envelope + */ class ProtoBufMap : public IMessageMapper { public: + /** @brief Maps a file index to its protobuf FileDescriptor. */ typedef std::map FileDescriptorMap; + + /** @brief Maps a message type ID (envelope field number) to its Descriptor. */ typedef std::map DescriptorMap; + + /** @brief Reverse mapping from Descriptor back to message type ID. */ typedef std::map MsgTypeMap; private: - FileDescriptorMap m_file_descriptor_map; - DescriptorMap m_descriptor_map; - MsgTypeMap m_msg_type_map; - std::unordered_map m_protocol_ids; + FileDescriptorMap m_file_descriptor_map; ///< Registered file descriptors. + DescriptorMap m_descriptor_map; ///< Type ID → Descriptor lookup. + MsgTypeMap m_msg_type_map; ///< Descriptor → type ID lookup. public: + /** + * @brief Constructs the map and populates all mappings from the Envelope + * descriptor's oneof fields. + * + * Iterates the Envelope message descriptor via reflection to register every + * message field, using each field's number as the stable type identifier. + */ ProtoBufMap(); + /** + * @brief Retrieves the Descriptor for a given message type ID. + * + * @param message_type Envelope field number identifying the message type. + * @return Pointer to the Descriptor, or nullptr if not found. + */ const ::google::protobuf::Descriptor * getDescriptorType(uint16_t message_type) const; - bool exists(uint16_t message_type) const { - return m_descriptor_map.count(message_type) > 0; - } - uint16_t getMessageType(::google::protobuf::Message &); + + /** + * @brief Checks whether a message type ID is registered. + * + * @param message_type Envelope field number to look up. + * @return True if the type is registered, false otherwise. + */ + bool exists(uint16_t message_type) const; + + /** + * @brief Resolves the message type ID for a concrete protobuf message. + * + * @param msg A protobuf message instance. + * @return The envelope field number corresponding to the message's type. + * @throws std::runtime_error If the message type is not registered. + */ + uint16_t getMessageType(const ::google::protobuf::Message &msg) const; + + /** + * @brief Returns a human-readable name for a message type ID. + * + * @param MessageType Envelope field number identifying the message type. + * @return The full protobuf type name, or an error string if not found. + */ std::string toString(uint16_t MessageType) const; - virtual uint16_t getMessageType(uint8_t a_proto_id, - const std::string &a_message_name) final; - virtual uint8_t getProtocolID(MessageProtocol) const final; + + /** + * @brief Resolves a message type ID from a protobuf message type name string. + * + * @param message_name Fully-qualified or short protobuf message name. + * @return The corresponding envelope field number. + * @throws std::runtime_error If the name does not match any registered type. + */ + virtual uint16_t + getMessageType(const std::string &message_name) const final; + + /** + * @brief Wraps an inner message into an Envelope for wire transmission. + * + * Resolves the inner message's type ID via getMessageType(), then uses + * reflection to set the corresponding oneof field in a new Envelope by + * copying from the inner message. + * + * @param inner The message to wrap. + * @return A populated Envelope ready for serialization. + * @throws EC_INVALID_PARAM If the resolved field number does not exist + * in the Envelope descriptor. + */ + std::unique_ptr + wrapInEnvelope(const ::google::protobuf::Message &inner) const; + + /** + * @brief Extracts the inner message from a received Envelope. + * + * Uses reflection to determine which oneof field is set, then calls + * ReleaseMessage() to transfer ownership of the inner message out of + * the envelope. After this call, the released field in @p envelope is + * cleared. + * + * @param envelope The received Envelope to unwrap. Modified in place — + * the released field is no longer owned by the envelope. + * @return The extracted inner message. Caller takes ownership. + * @throws EC_INVALID_PARAM If the envelope's message type cannot be + * resolved to a valid field. + */ + std::unique_ptr<::google::protobuf::Message> + unwrapFromEnvelope(SDMS::Envelope &envelope) const; + + /** + * @brief Determines whether a message type requires authentication. + * + * Messages in the anonymous set (e.g., version handshake, authentication + * requests) do not require a prior authenticated session. All other + * message types do. + * + * @param msg_type Short message type name (e.g., "VersionRequest"). + * @return True if the message requires an authenticated session, false if + * it is in the anonymous (no-auth) set. + */ + virtual bool requiresAuth(const std::string &msg_type) const final { + static const std::unordered_set anon_types = { + "AckReply", + "NackReply", + "VersionRequest", + "VersionReply", + "GetAuthStatusRequest", + "AuthenticateByPasswordRequest", + "AuthenticateByTokenRequest", + "AuthStatusReply", + "DailyMessageRequest", + "DailyMessageReply"}; + return anon_types.count(msg_type) == 0; + } }; + } // namespace SDMS #endif // PROTOBUFMAP_HPP diff --git a/common/proto/common/CMakeLists.txt b/common/proto/common/CMakeLists.txt deleted file mode 100644 index 47d00e102..000000000 --- a/common/proto/common/CMakeLists.txt +++ /dev/null @@ -1,24 +0,0 @@ -cmake_minimum_required (VERSION 3.17.0) - -# Create the .cpp and .hpp files -protobuf_generate( - LANGUAGE cpp - PROTOS ${ProtoFiles} - IMPORT_DIRS "${DataFed_SOURCE_DIR}/common/proto/common" - OUT_VAR protobuf-generated-files -) - -add_custom_target(protobuf-gen-target DEPENDS ${protobuf-generated-files}) - -# make sure that datafed-protobuf is dependent on the cpp files when it compiles -if(BUILD_SHARED_LIBS) - add_library( datafed-protobuf SHARED ${protobuf-generated-files} ) -else() - add_library( datafed-protobuf STATIC ${protobuf-generated-files} ) -endif() -# The following command makes sure that the protobuf files are generated -# before attempting to compile with them. -add_dependencies(datafed-protobuf protobuf-gen-target) -set_target_properties(datafed-protobuf PROPERTIES POSITION_INDEPENDENT_CODE ON SOVERSION ${DATAFED_COMMON_PROTOCOL_API_MAJOR} VERSION ${DATAFED_COMMON_PROTOCOL_API_MAJOR}.${DATAFED_COMMON_PROTOCOL_API_MINOR}.${DATAFED_COMMON_PROTOCOL_API_PATCH} ) -target_link_libraries( datafed-protobuf protobuf::libprotobuf protobuf::libprotoc protobuf::libprotobuf-lite) -target_include_directories( datafed-protobuf INTERFACE ${PROJECT_BINARY_DIR}/common/proto) diff --git a/common/proto/common/SDMS.proto b/common/proto/common/SDMS.proto deleted file mode 100644 index 14c2c984a..000000000 --- a/common/proto/common/SDMS.proto +++ /dev/null @@ -1,448 +0,0 @@ -// ALERT: If messages in this proto file are ADDED or REMOVED, or if the -// definitions of existing message fields are CHANGED, then the VER_PROTO_MAJOR -// enum value in Version.proto must be incremented and backward compatibility -// will be lost. If fields are APPENDED to the end of existing message -// definitions, then VER_PROTO_MINOR must be incremented and backward -// compatibility will be maintained. Unused messages and parameters should be -// marked as deprecated, and these should be removed at the next major version -// change. - -// ALERT: Any LARGE INTEGER fields in messages are currently defined as DOUBLE -// instead of INT64 - This is because Javascript does not support 64 bit integers -// and uses doubles for both integer and floats. As long as an integer does not -// exceed 53 bits, this approach works. - -syntax = "proto2"; - -package SDMS; - - -// ============================ Enums - -// Used to return error categories. Likely to be deprecated. -enum ErrorCode -{ - ID_BAD_REQUEST = 1; - ID_INTERNAL_ERROR = 2; - ID_CLIENT_ERROR = 3; - ID_SERVICE_ERROR = 4; - ID_AUTHN_REQUIRED = 5; - ID_AUTHN_ERROR = 6; - ID_DEST_PATH_ERROR = 7; - ID_DEST_FILE_ERROR = 8; -} - -// Used to return status of backend services. Likely to be deprecated. -enum ServiceStatus -{ - SS_NORMAL = 0; - SS_DEGRADED = 1; - SS_FAILED = 2; - SS_OFFLINE = 3; -} - -// Used to specify data or collection search mode in queries -enum SearchMode -{ - SM_DATA = 0; - SM_COLLECTION = 1; -} - -// Used to specify dependency relatioships. Likely to be deprecated and replaced with expandable system. -enum DependencyType -{ - DEP_IS_DERIVED_FROM = 0; - DEP_IS_COMPONENT_OF = 1; - DEP_IS_NEW_VERSION_OF = 2; - DEP_TYPE_COUNT = 3; -} - -// Used to indicate dependency direction (IN = offspring, OUT = ancestor) -// Note that records only store outgoing dependencies to ancestors -enum DependencyDir -{ - DEP_IN = 0; - DEP_OUT = 1; -} - -// General record/colelction sorting options -enum SortOption -{ - SORT_ID = 0; - SORT_TITLE = 1; - SORT_OWNER = 2; - SORT_TIME_CREATE = 3; - SORT_TIME_UPDATE = 4; - SORT_RELEVANCE = 5; -} - -// Defines a users role in a project -// Note that project owners are treated as project admins -enum ProjectRole -{ - PROJ_NO_ROLE = 0; - PROJ_MEMBER = 1; - PROJ_MANAGER = 2; - PROJ_ADMIN = 3; -} - -// Used to specify type of access token contained within message -enum AccessTokenType { - GENERIC = 1; - GLOBUS = 2; - GLOBUS_AUTH = 3; - GLOBUS_TRANSFER = 4; - GLOBUS_DEFAULT = 5; - ACCESS_SENTINEL = 255; -} - -// Used to specify how the work is being done on the servers. -enum ExecutionMethod { - DIRECT = 1; - DEFERRED = 2; -} -// ============================ Data Structures - -// Allocation-specific statistics data -message AllocStatsData -{ - required string repo = 1; - required uint32 rec_count = 2; - required uint32 file_count = 3; - required uint64 data_size = 4; - repeated uint32 histogram = 5; -} - -message AllocData -{ - required string repo = 1; - required uint64 data_limit = 2; - required uint64 data_size = 3; - required uint32 rec_limit = 4; - required uint32 rec_count = 5; - optional string path = 6; - optional string id = 7; - optional bool is_def = 8; - optional AllocStatsData stats = 9; - optional string repo_type = 10; -} - -// For viewing dependencies -message DependencyData -{ - required string id = 1; - optional string alias = 2; - optional uint32 notes = 3; - required DependencyType type = 4; - required DependencyDir dir = 5; -} - -// For specifying dependencies -message DependencySpecData -{ - required string id = 1; - required DependencyType type = 2; -} - -message UserData -{ - required string uid = 1; - required string name_last = 2; - required string name_first = 3; - optional string email = 4; - optional string options = 5; - optional bool is_admin = 6; - optional bool is_repo_admin = 7; - repeated string ident = 8; - repeated AllocData alloc = 9; -} - -message ProjectData -{ - required string id = 1; - required string title = 2; - optional string desc = 3; - optional string owner = 4; - optional uint32 ct = 5; - optional uint32 ut = 6; - repeated string admin = 7; - repeated string member = 8; - repeated AllocData alloc = 9; -} - -message ListingData -{ - required string id = 1; - required string title = 2; - optional string alias = 3; - optional bool locked = 4; - optional string owner = 5; - optional string creator = 6; - optional double size = 7; - optional bool external = 8; - optional uint32 notes = 9; - // Needed for provenance graphing - optional int32 gen = 10; - optional bool deps_avail = 11; - repeated DependencyData dep = 12; - // Needed for catalog view - optional string owner_name = 13; - optional string desc = 14; -} - -/* -message CatItemInfoData -{ - required string id = 1; - required string title = 2; - required string owner_id = 3; - optional string owner_name = 4; - optional string alias = 5; - optional uint32 notes = 6; - optional string brief = 7; - optional double size = 8; -} -*/ - -message TagData -{ - required string name = 1; - required uint64 count = 2; -} - -message PathData -{ - repeated ListingData item = 1; -} - -message RecordData -{ - required string id = 1; - required string title = 2; - optional string alias = 3; - optional string desc = 4; - repeated string tags = 5; - optional string metadata = 6; - optional bool external = 7; - optional string source = 8; - optional string repo_id = 9; - optional double size = 10; - optional string ext = 11; - optional bool ext_auto = 12; - optional uint32 ct = 13; - optional uint32 ut = 14; - optional uint32 dt = 15; - optional string owner = 16; - optional string creator = 17; - optional bool locked = 18; - optional string parent_id = 19; - repeated DependencyData deps = 20; - optional uint32 notes = 21; - optional string md_err_msg = 22; - optional string sch_id = 23; - optional uint32 sch_ver = 24; -} - -// Fields required for a data repo to locate raw data -// TODO Path will be removed and replaced with owner ID -message RecordDataLocation -{ - required string id = 1; - required string path = 2; -} - -// A list of data locations associated with a specific repo -message RepoRecordDataLocations -{ - required string repo_id = 1; - repeated RecordDataLocation loc = 2; -} - -message RecordDataSize -{ - required string id = 1; - required double size = 2; -} - -message CollData -{ - required string id = 1; - required string title = 2; - optional string alias = 3; - optional string desc = 4; - repeated string tags = 5; - optional string topic = 6; - optional string owner = 7; - optional string creator = 8; - optional uint32 ct = 9; - optional uint32 ut = 10; - optional string parent_id = 11; - optional uint32 notes = 12; -} - -message GroupData -{ - required string uid = 1; - required string gid = 2; - optional string title = 3; - optional string desc = 4; - repeated string member = 5; -} - -message ACLRule -{ - required string id = 1; - optional uint32 grant = 2; - optional uint32 inhgrant = 3; -} - -message TopicData -{ - required string id = 1; - required string title = 2; - repeated TopicData path = 3; - optional string desc = 4; - optional string creator = 5; - optional bool admin = 6; - required uint32 coll_cnt = 7; - - //repeated TagMetrics tags = 7; - //repeated UserMetrics users = 8; - //repeated DateMetrics date = 9; -} - -message RepoData -{ - required string id = 1; - optional string title = 2; - optional string desc = 3; - optional uint64 capacity = 4; - optional string pub_key = 5; - optional string address = 6; - optional string endpoint = 7; - optional string path = 8; - optional string domain = 9; - optional string exp_path = 10; - repeated string admin = 11; - optional string type = 12; -} - - -// -------------------------------------------- ANNOTATION DEFINES - -// Defines annotation types -enum NoteType -{ - NOTE_QUESTION = 0; - NOTE_INFO = 1; - NOTE_WARN = 2; - NOTE_ERROR = 3; -} - -// Defines annotation state -enum NoteState -{ - NOTE_CLOSED = 0; - NOTE_OPEN = 1; - NOTE_ACTIVE = 2; -} - -// Defines annotation user comment data structure -message NoteComment -{ - required string user = 1; // User ID of comment author - required uint32 time = 2; // Timestamp comment was created - required string comment = 3; // Text content of comment - optional NoteType type = 4; // Note type change - optional NoteState state = 5; // Note state change -} - -// Defines annotation data structure -message NoteData -{ - required string id = 1; // Note ID - required NoteType type = 2; // Current note type - required NoteState state = 3; // Current note state - required string subject_id = 4; // Subject data record or collection ID - required string title = 5; // Title - repeated NoteComment comment = 6; // User comments in chronological order - required uint32 ct = 7; // Note create timestamp - required uint32 ut = 8; // Note update timestamp - optional string parent_id = 9; // Parent note ID (only if inherited) - optional bool has_child = 10; // Flag indicating presences of dependent note(s) -} - -// -------------------------------------------------- TASK DEFINES - -enum TaskType -{ - TT_DATA_GET = 0; - TT_DATA_PUT = 1; - TT_DATA_DEL = 2; - TT_REC_CHG_ALLOC = 3; - TT_REC_CHG_OWNER = 4; - TT_REC_DEL = 5; - TT_ALLOC_CREATE = 6; - TT_ALLOC_DEL = 7; - TT_USER_DEL = 8; - TT_PROJ_DEL = 9; -} - -enum TaskStatus -{ - TS_BLOCKED = 0; - TS_READY = 1; - TS_RUNNING = 2; - TS_SUCCEEDED = 3; - TS_FAILED = 4; -} - -enum TaskCommand -{ - TC_STOP = 0; - TC_RAW_DATA_TRANSFER = 1; - TC_RAW_DATA_DELETE = 2; - TC_RAW_DATA_UPDATE_SIZE = 3; - TC_ALLOC_CREATE = 4; - TC_ALLOC_DELETE = 5; -} - -enum Encryption -{ - ENCRYPT_NONE = 0; - ENCRYPT_AVAIL = 1; - ENCRYPT_FORCE = 2; -} - -message TaskData -{ - required string id = 1; - required TaskType type = 2; - required TaskStatus status = 3; - required string client = 4; - required uint32 step = 5; - required uint32 steps = 6; - required string msg = 7; - required uint32 ct = 8; - required uint32 ut = 9; - optional string source = 10; - optional string dest = 11; -} - -// -------------------------------------------------- TASK DEFINES - -message SchemaData -{ - required string id = 1; - required uint32 ver = 2; - optional bool depr = 3; - optional uint32 cnt = 4; - optional bool ref = 5; - optional string own_id = 6; - optional string own_nm = 7; - optional string desc = 8; - optional bool pub = 9; - optional string def = 10; - repeated SchemaData uses = 11; - repeated SchemaData used_by = 12; -} diff --git a/common/proto/common/SDMS_Anon.proto b/common/proto/common/SDMS_Anon.proto deleted file mode 100644 index ac916a653..000000000 --- a/common/proto/common/SDMS_Anon.proto +++ /dev/null @@ -1,106 +0,0 @@ -// ALERT: If messages in this proto file are ADDED or REMOVED, or if the -// definitions of existing message fields are CHANGED, then the VER_PROTO_MAJOR -// enum value in Version.proto must be incremented and backward compatibility -// will be lost. If fields are APPENDED to the end of existing message -// definitions, then VER_PROTO_MINOR must be incremented and backward -// compatibility will be maintained. Unused messages and parameters should be -// marked as deprecated, and these should be removed at the next major version -// change. - -syntax = "proto2"; - -import "SDMS.proto"; - -package SDMS.Anon; - -enum Protocol -{ - ID = 1; -} - - -// The AckReply is a simple positive acknowledgement for requests that do not -// need to reply with any data payload. -message AckReply -{ -} - -// NackReply is used to convey error information and can be returned from any -// request. -message NackReply -{ - required SDMS.ErrorCode err_code = 1; - optional string err_msg = 2; -} - -// Request to get system version information -// Reply: VersionReply on success, NackError on error -message VersionRequest -{ -} - -// Reply containing system version information. This information is compared -// to client/server local version information to determine if interoperability -// is possible. -message VersionReply -{ -// required uint32 major = 1; // System MAJOR version, no backward compatibility -// required uint32 mapi_major = 2; // Message API MAJOR version, no backward compatibility -// required uint32 mapi_minor = 3; // Message API MINOR version, backward compatible -// required uint32 core = 4; // Core server MINOR version, information only -// required uint32 web = 5; // Web server MINOR version, info/notification purposes -// required uint32 repo = 6; // Repo server MINOR version, info/notification purposes -// required uint32 client_py = 7; // Python client/api MINOR version, info/notification purposes - optional uint32 release_year = 1; - optional uint32 release_month = 2; - optional uint32 release_day = 3; - optional uint32 release_hour = 4; - optional uint32 release_minute = 5; - - optional uint32 api_major = 6; - optional uint32 api_minor = 7; - optional uint32 api_patch = 8; - - optional uint32 component_major = 9; - optional uint32 component_minor = 10; - optional uint32 component_patch = 11; -} - -// Request to get client authentication status -// Reply: AuthStatusReply on success, NackError on error -message GetAuthStatusRequest -{ -} - -// Request to authenticate by password -// Reply: AuthStatusReply on success, NackError on error -message AuthenticateByPasswordRequest -{ - required string uid = 1; // Local user ID - required string password = 2; // Password -} - -// Request to authenticate by Globus access token -// Reply: AuthStatusReply on success, NackError on error -message AuthenticateByTokenRequest -{ - required string token = 1; // Access token -} - -// Reply containing client authentication status -message AuthStatusReply -{ - required bool auth = 1; // Authenticated if true - optional string uid = 2; // DataFed user id -} - -// Get the system daily message -// Reply: DailyMessageReply -message DailyMessageRequest -{ -} - -message DailyMessageReply -{ - optional string message = 1; // Daily message, if set -} diff --git a/common/proto/common/SDMS_Auth.proto b/common/proto/common/SDMS_Auth.proto deleted file mode 100644 index 71fbf293c..000000000 --- a/common/proto/common/SDMS_Auth.proto +++ /dev/null @@ -1,1417 +0,0 @@ -// ALERT: If messages in this proto file are ADDED or REMOVED, or if the -// definitions of existing message fields are CHANGED, then the VER_PROTO_MAJOR -// enum value in Version.proto must be incremented and backward compatibility -// will be lost. If fields are APPENDED to the end of existing message -// definitions, then VER_PROTO_MINOR must be incremented and backward -// compatibility will be maintained. Unused messages and parameters should be -// marked as deprecated, and these should be removed at the next major version -// change. - -// NOTE: All messages in this protobuf file require an authorized client and will -// be rejected without processing if otherwise. - -syntax = "proto2"; - -import "SDMS.proto"; - -package SDMS.Auth; - -enum Protocol -{ - ID = 2; -} - -// ============================================================================ -// ----------- Setup/Config Messages ------------------------------------------ -// ============================================================================ - -// Request core server to generate/retrieve user security credentials -// Reply: GenerateCredentialsReply on success, NackReply on error -message GenerateCredentialsRequest -{ - optional string domain = 1; // Reserved for future use (Facility/org domain) - optional uint32 uid = 2; // Reserved for future use (POSIX uid) -} - -// Request core server to revoke (clear) user security credentials for the sending user -// This will prevent non-interactive login from ALL installed clients for the sending user -// Reply: AckReply on success, NackReply on error -message RevokeCredentialsRequest -{ -} - -// Reply containing user security credentials (ZeroMQ keys) that can be used for non-interactive login -// Note: these keys must be stored securely -message GenerateCredentialsReply -{ - required string pub_key = 1; // Public key - required string priv_key = 2; // Private key -} - - -// ============================================================================ -// ----------- Permissions Messages ------------------------------------------- -// ============================================================================ - -// Check if specified permission are granted on given resource -// Reply: CheckPermsReply on success, NackReply on error -message CheckPermsRequest -{ - required string id = 1; // Record, collection, or project ID - required uint32 perms = 2; // Requested access permissions -} - -// Reply indicating if all requested permissions are granted -message CheckPermsReply -{ - required bool granted = 1; // True if all permission granted, false otherwise -} - -// Request current user access permissions for given resource -// Reply: GetPermsReply on success, NackReply on error -message GetPermsRequest -{ - required string id = 1; // Record, collection, or project ID - optional uint32 perms = 2; // Optional bitmask for permissions to check, gets all if not given -} - -// Reply containing granted permissions for given user/resource -// If perms bitmask was specified, granted will only apply to requested permissions -message GetPermsReply -{ - required uint32 granted = 1; // Granted permissions -} - - -// ============================================================================ -// ----------- User Messages -------------------------------------------------- -// ============================================================================ - -// Request to vew user information. Details can only be requested for -// authenticate client or by system admins. -// Reply: UserDataReply on success, NackReply on error -message UserViewRequest -{ - required string uid = 1; // DataFed user ID - optional bool details = 2; // Include details -} - -// Reply containing user information -message UserDataReply -{ - repeated SDMS.UserData user = 1; // User data - optional uint32 offset = 2; // Offset of this result page - optional uint32 count = 3; // Count of this result page - optional uint32 total = 4; // Total number of results -} - - -// Set user access tokens (from Globus OAuth). This message must be called -// whenever a user logs in via Globus (web portal) in order to update the access -// and refresh tokens issued by Globus. -// Reply: AckReply on success, NackReply on error -message UserSetAccessTokenRequest -{ - required string access = 1; // Access token - required uint32 expires_in = 2; // Access expiration time in seconds - required string refresh = 3; // Refresh token (never expires) - optional AccessTokenType type = 4 [default = GLOBUS_DEFAULT]; - optional string other = 5; // Other information relevant to the set access token -} - -// Get user access token (from Globus OAuth). This token will allow client to -// access Globus APIs on behalf of the authenticated user. -// Reply: UserAccessTokenReply on success, NackReply on error -message UserGetAccessTokenRequest -{ - optional string collection_id = 1; - optional string collection_type = 2; // TODO: use enum -} - -// Reply containing the user Globus access token and expiration time (in -// seconds) for authenticated user. -message UserAccessTokenReply -{ - required string access = 1; // Globus access token - required uint32 expires_in = 2; // Access token expiration in seconds - optional bool needs_consent = 3; // Indicate requirement of consent flow -} - -// Request to create a new DataFed user account. -// Reply: AckReply on success, NackReply on error -// Note: Use of this message is restricted to system service. Any attempted use by non-admin -// users will be logged. -message UserCreateRequest -{ - required string uid = 1; // User's GlobusID UID (name portion only) - optional string password = 2; // User-specified DataFed CLI password - required string name = 3; // Full name as given by Globus - required string email = 4; // Email address - repeated string uuid = 5; // UUID for primary Globus account - optional string options = 6; // DataFed options (JSON string) - optional string secret = 7; // System secret -} - -// Request to find DataFed user by one or more Globus UUIDs -// Reply: UserDataReply on success, NackReply on failure -message UserFindByUUIDsRequest -{ - repeated string uuid = 1; // An identity UUID linked to user's Globus account -} - -// Request to find users matching partial name/UID string (ngram search) -// Reply: UserDataReply on success, NackReply on failure -message UserFindByNameUIDRequest -{ - required string name_uid = 1; // String with full/partial name or UID to search for - optional uint32 offset = 2; // Optional result offset - optional uint32 count = 3; // Optional result count -} - -// Request to update a user record -// Reply: UserDataReply on success, NackReply on failure -message UserUpdateRequest -{ - required string uid = 1; // DataFed UID of user record to update - optional string email = 2; // Optional new email address - optional string password = 3; // Optional new DataFed password - optional string options = 4; // Optional DataFed options (JSON string) -} - -// Request to list all registered users -// Reply: UserDataReply on success, NackReply on failure -message UserListAllRequest -{ - optional uint32 offset = 1; // Optional result offset - optional uint32 count = 2; // Optional result count -} - -// Request to list all users that are collaborators of auth user -// Collaborators are users on same project(s) and/or with ACLs set to/from auth user -// Reply: UserDataReply on success, NackReply on failure -message UserListCollabRequest -{ - optional uint32 offset = 1; // Optional result offset - optional uint32 count = 2; // Optional result count -} - -// Request to get the list of recent Globus endpoints used by auth user -// Reply: UserGetRecentEPReply on success, NackReply on failure -message UserGetRecentEPRequest -{ -} - -// Reply with list of recent Globus endpoints used by auth user -message UserGetRecentEPReply -{ - repeated string ep = 1; // Endpoint ID (legacy not UUID) -} - -// Request to set the list of recent Globus endpoints used by auth user -// This request will replace the stored endpoint list with the one provided -// Reply: AckReply on success, NackReply on error -message UserSetRecentEPRequest -{ - repeated string ep = 1; // Endpoint ID (legacy not UUID) -} - - -// ============================================================================ -// ----------- General Data/Collection Messages ------------------------------- -// ============================================================================ - -// Reply containing general list of items (data record, collections, users, -// projects, etc). Only common fields are returned. -message ListingReply -{ - repeated ListingData item = 1; // Items - optional uint32 offset = 2; // Offset of this result page - optional uint32 count = 3; // Count of this result page - optional uint32 total = 4; // Total number of results -} - - -// ============================================================================ -// ----------- Data Record Messages ------------------------------------------- -// ============================================================================ - -// Request to list all records associated with a given repository and user/subject. -// If the subject is provided sufficient privileges are required. -// Reply: ListingReply on success, NackReply on error -message RecordListByAllocRequest -{ - required string repo = 1; // Repository ID - optional string subject = 2; // Optional project or user ID - optional uint32 offset = 3; // Optional result offset - optional uint32 count = 4; // Optional result count -} - -// Request to view a data record. Requires READ_REC permissions. Metadata will be -// returned if client has READ_META permission. -// Reply: RecordDataReply on success, NackReply on error -message RecordViewRequest -{ - required string id = 1; // Record ID or alias - optional bool details = 2; // Reserved for future use -} - -// Reply containing details of one or more data records in response to original -// request. If original request resulted in side effects, impacted records will -// be listed in the update field. This is an optimization to reduce latency for -// graphical clients that may be displaying multiple/many records that need to -// be refreshed as a result of the original request. -message RecordDataReply -{ - repeated SDMS.RecordData data = 1; // Record details - repeated SDMS.ListingData update = 2; // Optional list of records impacted by side effects -} - -// Request to create a new data record. -// Reply: RecordDataReply on success, NackReply on error -message RecordCreateRequest -{ - optional string title = 1; // Record title (required) - optional string desc = 2; // Optional description (markdown supported) - repeated string tags = 3; // Optional list of tags - optional string alias = 4; // Optional alias - optional string metadata = 5; // Optional metadata (JSON string) - optional string parent_id = 6; // Optional parent collection ID or alias (default is root) - optional bool external = 7; // Optional flag for external (unmanaged) raw data - optional string source = 8; // Optional raw data source Globus path - optional string repo_id = 9; // Optional repository ID (default for user/project used if omitted) - optional string ext = 10; // Optional record extension - optional bool ext_auto = 11; // Optional flag for automatic extension (based on source file) - repeated DependencySpecData deps = 12; // Optional record dependencies - optional string sch_id = 13; // Optional metadata schema ID - optional bool sch_enforce = 14; // Optional flag to enforce schema validation (fails on non-compliance) -} - -// Request to create data records in batch. This message will be DEPRECATED. -// Reply: RecordDataReply on success, NackReply on error -message RecordCreateBatchRequest -{ - required string records = 1; // JSON array containing records following DB record create schema -} - -// Request to update an existing data record. -// Reply: RecordDataReply on success, NackReply on error -message RecordUpdateRequest -{ - required string id = 1; // Record ID or alias - optional string title = 2; // Optional title - optional string desc = 3; // Optional description (markdown supported) - repeated string tags = 4; // Optional list of tags to add - optional bool tags_clear = 5; // Optional flag to clear existing tags - optional string alias = 6; // Optional alias - optional string metadata = 7; // Optional metadata (JSON string) - optional bool mdset = 8; // Optional flag to set (replace) metadata if true, or merge if false - optional string sch_id = 9; // Optional metadata schema ID - optional bool sch_enforce = 10; // Optional flag to enforce schema validation (fails on non-compliance) - optional string source = 11; // Optional raw data source Globus path - optional string ext = 12; // Optional record extension - optional bool ext_auto = 13; // Optional flag for automatic extension (based on source file) - //optional uint64 size = 14; - //optional uint32 dt = 15; - repeated DependencySpecData dep_add = 16; // Optional new dependencies to add - repeated DependencySpecData dep_rem = 17; // Optional existing dependencies to remove -} - -// Request to update data records in batch. This message will be DEPRECATED. -// Reply: RecordDataReply on success, NackReply on error -message RecordUpdateBatchRequest -{ - required string records = 1; // JSON array containing records following DB record update schema -} - -// Request to export data records in batch. This message may be DEPRECATED. -// Reply: RecordExportReply on success, NackReply on error -message RecordExportRequest -{ - repeated string id = 1; -} - -// Reply containing JSON representation of data records. This message may be DEPRECATED. -message RecordExportReply -{ - repeated string record = 1; // JSON string containing data record -} - -// Not used currently -message RecordLockRequest -{ - repeated string id = 1; - required bool lock = 2; -} - -// Request to delete one or more data records. Deletions are handle by background tasks -// thus a TaskDataReply is returned containing the TaskID of the delete operation. -// Reply: TaskDataReply on success, NackReply on error -message RecordDeleteRequest -{ - repeated string id = 1; // ID/alias of record(s) to delete -} - -// Request to get the dependency graph for a given data record. Returns all ancestor -// and descendent records that are related by derivation linkage. Lateral provenance -// relationships of related records are not returned. -// Reply: ListingReply on success, NackReply on error -message RecordGetDependencyGraphRequest -{ - required string id = 1; // ID/alias of data record of interest -} - -// Request to change tha allocation of specified records. All records must be owned by -// the auth user or the specified project. Any records already on destination repo will -// be ignored. On success, a background task is created to perform the actual allocation -// change (requires moving raw data and updating records). -// Reply: RecordAllocChangeReply on success, NackReply on error -message RecordAllocChangeRequest -{ - repeated string id = 1; // ID/alias of data records and or collections - required string repo_id = 2; // Destination repository ID - optional string proj_id = 3; // Optional project ID if data belongs to a project - optional bool check = 4; // Optional flag to perform initial validation only if true -} - -// Reply containing information about record allocation change, including -// associated background task. -message RecordAllocChangeReply -{ - required uint32 act_cnt = 1; // Actual count of records to changed - required uint64 act_size = 2; // Actual data size (bytes) that will be moved - required uint32 tot_cnt = 3; // Total number of records in request (including those already on destination repo) - required uint64 data_limit = 4; // Dest repo data size limit (bytes) - required uint64 data_size = 5; // Dest repo current data size (bytes) - required uint32 rec_limit = 6; // Dest repo record count limit - required uint32 rec_count = 7; // Dest repo current record count - optional SDMS.TaskData task = 8; // Background task information -} - -// Request to change tha ownership of specified records. All records must be owned by -// the auth user or the specified project. Auth user must have CREATE permission on -// destination collection. On success, a background task is created to perform the -// actual ownership change (requires moving raw data and updating records). -// Reply: RecordOwnerChangeReply on success, NackReply on error -message RecordOwnerChangeRequest -{ - repeated string id = 1; // ID/alias of data records and or collections - required string coll_id = 2; // Destination collection ID/alias - optional string repo_id = 3; // Optional destination repo ID (default will be used if omitted) - optional string proj_id = 4; // DEPRECATED - optional bool check = 5; // Optional flag to perform initial validation only if true -} - -// Reply containing information about record ownership change, including -// associated background task. -message RecordOwnerChangeReply -{ - required uint32 act_cnt = 1; // Actual count of records to changed - required uint64 act_size = 2; // Actual data size (bytes) that will be moved - required uint32 tot_cnt = 3; // Total number of records in request (including those already owned by destination account) - repeated AllocData alloc = 4; // List of available allocations (only if check option set to true) - optional SDMS.TaskData task = 8; // Background task information -} - - -// ============================================================================ -// ----------- Raw Data Messages ---------------------------------------------- -// ============================================================================ - -// Request to download raw data for one or more records. Auth user must have READ_DATA -// permission on all requested records. If the orig_fname option is specified, a check -// will be performed to ensure there are no duplicate filenames, and the request will -// fail if there are. On success, a background task is created to perform the actual -// data transfer. -// Reply: DataGetReply on success, NackReply on error -message DataGetRequest -{ - repeated string id = 1; // ID/alias of data records and or collections - optional string path = 2; // Globus path to download to (not required if check option is set) - optional Encryption encrypt = 3; // Optional encryption mode (none, if available, required) - optional bool orig_fname = 4; // Optional flag to download to original filenames - optional bool check = 5; // Optional flag to perform initial validation only if true - optional string collection_id = 6; // Globus Collection ID - optional string collection_type = 7; // Globus Collection Type -} - -// Request to upload raw data to a data records. Auth user must have WRITE_DATA -// permission on the requested record. On success, a background task is created to -// perform the actual data transfer. -// Reply: DataPutReply on success, NackReply on error -message DataPutRequest -{ - required string id = 1; // ID/alias of data record - optional string path = 2; // Globus path to upload from - optional Encryption encrypt = 3; // Optional encryption mode (none, if available, required) - optional string ext = 4; // Optional extension override - optional bool check = 5; // Optional flag to perform initial validation only if true - optional string collection_id = 6; // Globus Collection ID - optional string collection_type = 7; // Globus Collection Type -} - -// Reply containing data download information, including associated background task. -message DataGetReply -{ - repeated ListingData item = 1; // Basic data for records to be downloaded - optional SDMS.TaskData task = 2; // Background task information -} - -// Reply containing data upload information, including associated background task. -message DataPutReply -{ - required RecordData item = 1; // Basic data for record to be uploaded - optional SDMS.TaskData task = 2; // Background task information -} - -// Not currently used (delete raw data only) -message DataDeleteRequest -{ - repeated string id = 1; -} - -// Not currently used (get domain-local path to raw data of a record for direct access) -message DataPathRequest -{ - required string id = 1; - required string domain = 2; -} - -// Not currently used (domain-local path to raw data of a record for direct access) -message DataPathReply -{ - required string path = 1; -} - - -// ============================================================================ -// ----------- Search Messages ------------------------------------------------ -// ============================================================================ - -// Request to search for data records or collection. -// Reply: ListingReply on success, NackReply on error -message SearchRequest -{ - required SDMS.SearchMode mode = 1; // Mode is data or collections - optional bool published = 2; // If true, searches public catalog - - // Data and Collections - optional string id = 3; // Partial ID or alias match (wildcard) - optional string text = 4; // Words or phrases (english root word match) - repeated string tags = 5; // Tags - repeated string cat_tags = 6; // Catalog tags used internally for catalog topics - optional uint32 from = 7; // Matches from updated date/time - optional uint32 to = 8; // Matches to updated date/time - optional string owner = 9; // User/project ID of owner - optional string creator = 10; // User ID of creator - repeated string coll = 11; // Collections to search - - // Data scope only - optional string sch_id = 12; // Metadata schema ID - optional string meta = 13; // Metadata expression - optional bool meta_err = 14; // Match records with metadata validation errors - - optional SDMS.SortOption sort = 15; // Sort option (title, ID, etc) - optional bool sort_rev = 16; // Reverse sort order - optional uint32 offset = 17; // Result offset - optional uint32 count = 18; // Result count -} - - -// ============================================================================ -// ----------- Collection Messages -------------------------------------------- -// ============================================================================ - -// Request to view details of a collection. Requires READ_REC permission. -// Reply: CollDataReply on success, NackReply on error -message CollViewRequest -{ - required string id = 1; // Collection ID/alias -} - -// Reply containing details of one or more collections. If original request -// resulted in side effects, impacted collections will be listed in the update field. -// This is an optimization to reduce latency for graphical clients that may be -// displaying multiple/many collections that need to be refreshed as a result -// of the original request. -message CollDataReply -{ - repeated SDMS.CollData coll = 1; // Collection data - repeated SDMS.ListingData update = 2; // Optional list of records/collections impacted by side effects -} - -// Request to read (list) contents of collection (records, child collections). Requires -// LIST permission. -// Reply: ListingReply on success, NackError on error -message CollReadRequest -{ - required string id = 1; // ID/alias of collection to read - optional bool details = 3; // DEPRECATED - optional uint32 offset = 4; // Result offset - optional uint32 count = 5; // Result count -} - -// Request to create a new collection. Requires CREATE permission in parent collection. -// Reply: CollDataReply on success, NackError on error -message CollCreateRequest -{ - optional string title = 1; // Title of collection - optional string desc = 2; // Optional description (markdown supported) - optional string alias = 3; // Optional alias - optional string parent_id = 4; // Optional parent collection ID/alias (root is default) - optional string topic = 6; // Optional topic for public collection - repeated string tags = 7; // Optional tags -} - -// Request to update an existing collection. Requires WRITE_REC permission -// Reply: CollDataReply on success, NackError on error -message CollUpdateRequest -{ - required string id = 1; // ID / alias of collection to update - optional string title = 2; // Optional title - optional string desc = 3; // Optional description (markdown supported) - optional string alias = 4; // Optional alias - optional string topic = 6; // Optional topic for public collection - repeated string tags = 7; // Optional tags to add - optional bool tags_clear = 8; // Optional flag to clear existing tags if true -} - -// Request to delete a collection. Requires DELETE permission. All contained collections -// will be deleted, as well as any data records that are not linked to other collections. -// On success, a background task will be started to perform the actual delete operation. -// Reply: TaskDataReply on success, NackError on error -message CollDeleteRequest -{ - repeated string id = 1; // ID/alias of collection to delete -} - -// Request to write (add/remove) items to a collection. Requires LINK permission. -// This request does not unlink data records being added to the collection. Any -// items removed that are not linked elsewhere will be added to the root collection -// and returned in the update section of the reply. -// Reply: ListingReply on success, NackError on error -message CollWriteRequest -{ - required string id = 1; // ID/alias of collection to write to - repeated string add = 2; // ID/alias of records/collections to add - repeated string rem = 3; // ID/alias of records/collections to remove - optional bool rem_all = 4; // DEPRECATED -} - -// Request to move records and child collections from a source collection to a -// destination collection. Requires LINK permission and same owners for both source -// and destination collections. -// Reply: AckReply on success, NackError on error -message CollMoveRequest -{ - required string src_id = 1; // ID/alias of source collection - required string dst_id = 2; // ID/alias of destination collection - repeated string item = 3; // ID/alias of items in source collection to move -} - -// Request to get the parent collection path of a given data record or collection. -// Data records will have multiple paths if linked to multiple collections. -// Reply: CollPathReply on success, NackError on error -message CollGetParentsRequest -{ - required string id = 1; // ID/alias of data record or collection - optional bool inclusive = 2; // Optional flag to include specified id in path -} - -// Reply containing one or more collection paths -message CollPathReply -{ - repeated SDMS.PathData path = 1; // List of Collection paths -} - -// Request to get the page of an item in a collection. -// Reply: CollGetOffsetReply on success, NackError on error -// TODO: This request should simply return offset not page number -message CollGetOffsetRequest -{ - required string id = 1; // ID/alias of containing collection - required string item = 2; // ID/alias of child data record or collection - required uint32 page_sz = 3; // Page size to use for calculation -} - -// Reply containing page number of item in collection -message CollGetOffsetReply -{ - required string id = 1; // ID/alias of containing collection - required string item = 2; // ID/alias of child data record or collection - required uint32 offset = 3; // Page number of item -} - -// Request to list all published collections of user or project. -// Reply: ListingReply on success, NackError on error -message CollListPublishedRequest -{ - optional string subject = 1; // Optional user/project ID (auth user is default) - optional uint32 offset = 2; // Result offset - optional uint32 count = 3; // Result count -} - - -// ============================================================================ -// ----------- Group Messages ------------------------------------------------- -// ============================================================================ - -// Request to create a new group. -// Reply: GroupDataReply on success, NackError on error -message GroupCreateRequest -{ - required SDMS.GroupData group = 1; // Group data -} - -// Request to update an existing group. -// Reply: GroupDataReply on success, NackError on error -message GroupUpdateRequest -{ - required string uid = 1; // User/project ID of group - required string gid = 2; // Group ID - optional string title = 3; // Optional title - optional string desc = 4; // Optional description - repeated string add_uid = 5; // User IDs to add to group - repeated string rem_uid = 6; // User IDs to remove from group -} - -// Reply containing group details -message GroupDataReply -{ - repeated SDMS.GroupData group = 1; // Group details -} - -// Request to delete a group belonging to auth user or project. Deleting a project -// group requires manager permission for associated project. -// Reply: AckReply on success, NackError on error -// TODO: uid should be optional and changed to proj_id -message GroupDeleteRequest -{ - required string uid = 1; // User/project ID of group owner - required string gid = 2; // Group ID of group -} - -// Request to list all groups belonging to user or project. Listing project groups -// requires membership in associated project. -// Reply: GroupDataReply on success, NackError on error -// TODO: uid should be optional and changed to proj_id -message GroupListRequest -{ - required string uid = 1; // User/project ID of group owner -} - -// Request to view details of specified group. Viewing project group requires -// membership in associated project. -// Reply: GroupDataReply on success, NackError on error -// TODO: uid should be optional and changed to proj_id -message GroupViewRequest -{ - required string uid = 1; // User/project ID of group owner - required string gid = 2; // Group ID of group -} - - -// ============================================================================ -// ----------- Access Control List (ACL) Messages ----------------------------- -// ============================================================================ - -// Request to view all ACL rules on a given data record or collection. -// Reply: ACLDataReply on success, NackError on error -message ACLViewRequest -{ - required string id = 1; // ID/alias of data record or collection -} - -// Request to update (replace) ACLs on a data record or collection. ACLs are -// set using a JSON payload containing an array of rule objects containing -// the following fields: -// id : User or group ID (required) -// grant : bitmask for local permission grants -// inhgrant : bitmask for inherited permission grants (collections only) -// Reply: ACLDataReply on success, NackError on error -// TODO: Why is this not using the ACLRule struct for rules? -message ACLUpdateRequest -{ - required string id = 1; // ID/alias of data record or collection - optional string rules = 2; // New ACL rules as JSON -} - -// Request to list users/projects owning any ACLs set for client -// Reply: ListingReply on success, NackError on error -message ACLSharedListRequest -{ - optional bool inc_users = 2; // Include user ACLs - optional bool inc_projects = 3; // Include project ACLs -} - -// List data records and/or collections that are share with client by the specified -// owner (user or project). -// Reply: ListingReply on success, NackError on error -message ACLSharedListItemsRequest -{ - required string owner = 2; // User/project ID that is sharing data -} - -// Reply containing ACL information -message ACLDataReply -{ - repeated ACLRule rule = 1; // ACL rules -} - - -// ============================================================================ -// ----------- Project Messages ----------------------------------------------- -// ============================================================================ - -// Request to view project details. Requires client to be associated with the -// project, or be an admin. -// Reply: ProjectDataReply on success, NackError on error -message ProjectViewRequest -{ - required string id = 1; // Project ID -} - -// Reply containing detailed information for one or more projects. -message ProjectDataReply -{ - repeated ProjectData proj = 1; // Project data -} - -// Request to create a new project. Requires client to be a repository administrator. -// The creator becomes the owner of the project. -// Reply: ProjectDataReply on success, NackError on error -message ProjectCreateRequest -{ - required string id = 1; // Project OD - optional string title = 2; // Title - optional string desc = 3; // Description - repeated string admin = 4; // List of project admins (managers) - repeated string member = 5; // List of project members -} - -// Request to update an existing project. Requires client to be either the owner -// or an admin of the specified project. Only the project owner can alter the admin -// list. -// Reply: ProjectDataReply on success, NackError on error -message ProjectUpdateRequest -{ - required string id = 1; // Project ID - optional string title = 2; // New title - optional string desc = 3; // New description - optional bool admin_set = 4 [default = false]; // Clear existing admin list - repeated string admin = 5; // New admins (user IDs) - optional bool member_set = 6 [default = false]; // Clear existing member list - repeated string member = 7; // New members (user IDs) -} - -// Request to delete a project. Client must be the project owner or a -// system admin. On success a background task is started to perform the -// actual delete operation. -// Reply: TaskDataReply on success, NackError on error -message ProjectDeleteRequest -{ - repeated string id = 1; // Project ID to delete -} - -// Request to list all projects associated with client. -// Reply: ListingReply on success, NackError on error -message ProjectListRequest -{ - optional string subject = 1; // DEPRECATED - optional bool as_owner = 2; // Flag to include owned projects - optional bool as_admin = 3; // Flag to include managed projects - optional bool as_member = 4; // Flag to include member projects - optional SortOption sort = 5; // Sort option - optional bool sort_rev = 6; // Reverse sort order - optional uint32 offset = 7; // Result offset - optional uint32 count = 8; // Result count -} - -// Not currently used -message ProjectSearchRequest -{ - required string text_query = 1; - repeated string scope = 2; -} - -// Request to get the project role of client or specified user -// Reply: ProjectGetRoleReply on success, NackError on error -message ProjectGetRoleRequest -{ - required string id = 1; // Project ID - optional string subject = 2; // Optional user ID -} - -// Reply containing the project role of a user -message ProjectGetRoleReply -{ - required ProjectRole role = 1; -} - - -// ============================================================================ -// ----------- Repository Messages (Repo) ------------------------------------- -// ============================================================================ - -// NOTE: The following messages are use by Core and Repo servers only - -// Request to delete the raw data of onr or more records from repo -// Reply: AckReply on success, NackError on error -message RepoDataDeleteRequest -{ - repeated RecordDataLocation loc = 1; // Record ID and file path -} - -// Request to get the file size of one or more data records from repo -// Reply: RepoDataSizeReply on success, NackError on error -message RepoDataGetSizeRequest -{ - repeated RecordDataLocation loc = 1; // Record ID and file path -} - -// Reply to hold raw data size of one or more dat records -message RepoDataSizeReply -{ - repeated RecordDataSize size = 1; // Record size information -} - -// Request to create a data storage path on a repo -// Reply: AckReply on success, NackError on error -message RepoPathCreateRequest -{ - required string path = 1; // Path to raw data storage directory -} - -// Request to delete a data storage path on a repo -// Reply: AckReply on success, NackError on error -message RepoPathDeleteRequest -{ - required string path = 1; // Path to raw data storage directory -} - - -// ============================================================================ -// ----------- Repository Messages (Core) ------------------------------------- -// ============================================================================ - -// Request to list repositories -// Reply: RepoDataReply on success, NackError on error -message RepoListRequest -{ - optional bool details = 1; // Flag to include detailed repo information - optional bool all = 2; // Return all repos if true, otherwise only those administered by client -} - -// Request to view details of a repository -// Reply: RepoDataReply on success, NackError on error -message RepoViewRequest -{ - required string id = 1; // Repo ID -} - -// Request to create a new repository. Only system admins may send this request. -// Reply: RepoDataReply on success, NackError on error -message RepoCreateRequest -{ - required string id = 1; // ID of repo - required string title = 2; // Title - optional string desc = 3; // Description - optional string domain = 5; // RESERVED - optional string path = 6; // Path to storage directories - optional string exp_path = 7; // RESERVED - optional string address = 8; // Repo server address - optional string endpoint = 9; // Globus endpoint UUID or legacy name - optional string pub_key = 10; // Public encryption key - required uint64 capacity = 11; // Total data capacity - repeated string admin = 12; // Repo admin(s) - optional string type = 13; // Repository type (defaults to "globus") -} - -// Request to update an existing repository. Only system or repos admins may -// send this request. NOTE: Changing the storage path will not automatically -// move raw data files - this must be done manually. -// Reply: RepoDataReply on success, NackError on error -message RepoUpdateRequest -{ - required string id = 1; // ID of repo - optional string title = 2; // Title - optional string desc = 3; // Description - optional string domain = 5; // RESERVED - optional string path = 6; // Path to storage directories - optional string exp_path = 7; // RESERVED - optional string address = 8; // Repo server address - optional string endpoint = 9; // Globus endpoint UUID or legacy name - optional string pub_key = 10; // Public encryption key - optional uint64 capacity = 11; // Total data capacity - repeated string admin = 12; // Repo admin(s) - optional string type = 13; // Repository type -} - -// Request to delete a repository. Only system or repos admins may send this -// request. NOTE: All data records for all allocations must be moved/deleted -// first or this request will fail. -// Reply: AckReply on success, NackError on error -message RepoDeleteRequest -{ - required string id = 1; // Repo ID -} - -// Reply containing details of one or more repositories -message RepoDataReply -{ - repeated SDMS.RepoData repo = 1; // Repo details -} - -// Request to calculate the count and size of data stored in onr or more collections. -// Reply: RepoCalcSizeReply on success, NackError on error -// TODO: This request should be in Collection category -message RepoCalcSizeRequest -{ - required bool recurse = 1; // Recursive flag - repeated string item = 2; // Data / collection IDs -} - -// Reply containing data size information -message RepoCalcSizeReply -{ - repeated AllocStatsData stats = 1; // Data size info -} - -// Request to list all allocations on a repo administered by client. -// Reply: RepoAllocationsReply on success, NackError on error -message RepoListAllocationsRequest -{ - required string id = 1; // Repo ID -} - -// Request to list allocations of client or project. -// Reply: RepoAllocationsReply on success, NackError on error -message RepoListSubjectAllocationsRequest -{ - optional string subject = 1; // Optional project ID - optional bool stats = 2; // Flag to include allocation statistics -} - -// Request to list allocations belonging to owner of data record or collection. -// Client must have CREATE permission on collection. -// Reply: RepoAllocationsReply on success, NackError on error -message RepoListObjectAllocationsRequest -{ - required string id = 1; // Collection ID/alias -} - -// Request to view an allocation of a client or project on a given repo. If a -// project is specified, client must be associated with the project. -// Reply: RepoAllocationsReply on success, NackError on error -message RepoViewAllocationRequest -{ - required string repo = 1; // Repo ID - required string subject = 2; // Optional project ID -} - -// Reply containing allocation information -message RepoAllocationsReply -{ - repeated AllocData alloc = 1; // Allocation information -} - -// Request to calculate allocation statistics of a user or project on a given -// repo. Only repo admins may make this request. -// Reply: RepoAllocationStatsReply on success, NackError on error -message RepoAllocationStatsRequest -{ - required string repo = 1; // Repo ID - optional string subject = 2; // Optional user/project ID -} - -// Reply containing allocation statistics. -message RepoAllocationStatsReply -{ - required AllocStatsData alloc = 1; // Allocation stats -} - -// Request to create a new allocation on a repo for a user or project. -// Only repo admins may make this request. On success, a background task is -// started to create the allocation if it is a DataFed managed globus repo, -// for a metadata only repo it is created immediatly. -// Reply: TaskDataReply (to be superseded by RepoAllocationCreateResponse) -// on success, NackError on error -message RepoAllocationCreateRequest -{ - required string repo = 1; // Repo ID - required string subject = 2; // User/project ID - required uint64 data_limit = 3; // Data size limit (bytes) - required uint32 rec_limit = 4; // Data record limit(count) -} - -message RepoAllocationCreateResponse -{ - required SDMS.ExecutionMethod execution_method = 1; // The execution method that was used to create the allocation - optional SDMS.TaskData task = 2; // The task data if deferred execution - optional SDMS.AllocData result = 3; // The Allocation data if direct execution -} - -// Request to update an existing allocation. Only repo admins may make this -// request. -// Reply: AckReply on success, NackError on error -// TODO: Rename this message to RepoAllocationUpdateRequest -message RepoAllocationSetRequest -{ - required string repo = 1; // Repo ID - required string subject = 2; // User/project ID - required uint64 data_limit = 3; // New data size limit (bytes) - required uint32 rec_limit = 4; // New data record limit (count) -} - -// Request to set the default allocation for a user or project. If a -// project is specified, client must be associated with the project. -// Reply: AckReply on success, NackError on error -message RepoAllocationSetDefaultRequest -{ - required string repo = 1; // Repo ID - optional string subject = 2; // User/project ID -} - -// Request to delete an existing allocation. Only repo admins may make this -// request. On success, a background task is started to create the allocation. -// Reply: TaskDataReply on success, NackError on error -message RepoAllocationDeleteRequest -{ - required string repo = 1; // Repo ID - required string subject = 2; // User/project ID -} - -// Request to check authorization for GridFTP action on file in a repo. -// Reply: AckReply on success, NackError on error -message RepoAuthzRequest -{ - required string repo = 1; // Repo ID - required string client = 2; // Client ID - required string file = 3; // Path to file in repo - required string action = 4; // GridFTP action -} - -// ============================================================================ -// ----------- Saved Query Messages ------------------------------------------- -// ============================================================================ - -// NOTE: All saved query requests are restricted to authenticated client. - -// Request to create a new saved query. -// Reply: QueryDataReply on success, NackError on error -message QueryCreateRequest -{ - required string title = 1; // Query title - required SearchRequest query = 2; // Search parameters -} - -// Request to update an existing saved query. -// Reply: QueryDataReply on success, NackError on error -message QueryUpdateRequest -{ - required string id = 1; // Query ID - optional string title = 2; // Optional new title - optional SearchRequest query = 3; // Optional new search parameters -} - -// Request to -// Reply: AckReply on success, NackError on error -message QueryDeleteRequest -{ - repeated string id = 1; // Query ID -} - -// Request to list saved queries. -// Reply: ListingReply on success, NackError on error -message QueryListRequest -{ - optional uint32 offset = 1; // Result offset - optional uint32 count = 2; // Result count -} - -// Request to view a saved query information. -// Reply: QueryDataReply on success, NackError on error -message QueryViewRequest -{ - required string id = 1; // Query ID -} - -// Request to execute the specified saved query. -// Reply: ListingReply on success, NackError on error -message QueryExecRequest -{ - required string id = 1; // Query ID - optional uint32 offset = 2; // Results offset - optional uint32 count = 3; // Results count -} - -// Reply containing saved query data -message QueryDataReply -{ - required string id = 1; // Query ID - required string title = 2; // Title - required string owner = 4; // Owner user ID - required uint32 ct = 5; // Query create timestamp - required uint32 ut = 6; // Query update timestamp - required SearchRequest query = 7; // Search parameters -} - - -// ============================================================================ -// ----------- Annotation Messages -------------------------------------------- -// ============================================================================ - -// Request to list annotations associated with a data record or collection. -// Reply: NoteDataReply on success, NackError on error -message NoteListBySubjectRequest -{ - required string subject = 1; // Data record or collection ID/alias -} - -// Request to view details of an annotation -// Reply: NoteDataReply on success, NackError on error -message NoteViewRequest -{ - required string id = 1; // Note ID -} - -// Request to create a new annotation and associate with a data record -// or collection. Creator must have READ_REC permission to create an -// annotation, and must be the owner/creator of the subject to activate -// on note creation. -// Reply: NoteDataReply on success, NackError on error -message NoteCreateRequest -{ - required SDMS.NoteType type = 1; // Note type - required string subject = 2; // Subject data record or collection ID/alias - required string title = 3; // Note title - required string comment = 4; // Note comments (text) - required bool activate = 5; // Flag to activate on creation -} - -// Request to update an existing annotation. Only owner/creator of note -// subject may change note title, type, or state. -// Reply: NoteDataReply on success, NackError on error -message NoteUpdateRequest -{ - required string id = 1; // Note ID - required string comment = 2; // Comments about update - optional SDMS.NoteType new_type = 3; // Optional new note type - optional SDMS.NoteState new_state = 4; // Optional new note state - optional string new_title = 5; // Optional new note title -} - -// Request to edit an existing comment within a note. Only the author of -// the original comment may update the comment. -// Reply: NoteDataReply on success, NackError on error -message NoteCommentEditRequest -{ - required string id = 1; // Note ID - required uint32 comment_idx = 2; // Index of comment to edit - required string comment = 3; // New comment text -} - -// Reply containing note information and any record/collection updates due to -// side effects. -message NoteDataReply -{ - repeated SDMS.NoteData note = 1; // Note information - repeated SDMS.ListingData update = 2; // Updated data records / collections -} - -// ============================================================================ -// ----------- Task Messages -------------------------------------------------- -// ============================================================================ - -// Request to view a specific task. -// Reply: TaskDataReply on success, NackError on error -message TaskViewRequest -{ - required string task_id = 1; // Task ID -} - -// Request to list tasks for auth client. The 'since' and 'from'/'to' parameters -// are mutually exclusive. -// Reply: TaskDataReply on success, NackError on error -message TaskListRequest -{ - optional uint32 since = 1; // Tasks updated since given seconds - optional uint32 from = 2; // Tasks updated starting from timestamp - optional uint32 to = 3; // Tasks updated until timestamp - repeated SDMS.TaskStatus status = 4; // List of status types to return - optional uint32 offset = 5; // Result offset - optional uint32 count = 6; // Result count -} - -// Reply containing detailed information for one or more tasks. -message TaskDataReply -{ - repeated SDMS.TaskData task = 1; // Task information - //optional uint32 offset = 2; // Offset of this result page - //optional uint32 count = 3; // Count of this result page - //optional uint32 total = 4; // Total number of results -} - - -// ============================================================================ -// ----------- Tag Messages --------------------------------------------------- -// ============================================================================ - -// Request to search for a tag against an ngram index of existing tag names. -// Reply: TagDataReply on success, NackError on error -message TagSearchRequest -{ - optional string name = 1; // Partial/full tag name - optional uint32 offset = 2; // Result offset - optional uint32 count = 3; // Result count -} - -// Request to list all existing tags sorted by reference count. -// Reply: TagDataReply on success, NackError on error -message TagListByCountRequest -{ - optional uint32 offset = 1; // Result offset - optional uint32 count = 2; // Result count -} - -// Reply containing statistics -message TagDataReply -{ - repeated SDMS.TagData tag = 1; // Tag information - optional uint32 offset = 2; // Offset of this result page - optional uint32 count = 3; // Count of this result page - optional uint32 total = 4; // Total number of results -} - - -// ============================================================================ -// ----------- Schema / Validation Messages ----------------------------------- -// ============================================================================ - -// Request to validate metadata against a given metadata schema. -// Reply: MetadataValidateReply on success, NackError on error -message MetadataValidateRequest -{ - required string metadata = 1; // Metadata as a JSON string - required string sch_id = 2; // Schema ID to validate against -} - -// Reply containing metadata errors -message MetadataValidateReply -{ - optional string errors = 1; // Validation error message -} - -// Request to view a schema. If the resolve option is true, all schema references -// will be loaded and stored in the '_refs' field of the specified schema. -// Reply: SchemaDataReply on success, NackError on error -message SchemaViewRequest -{ - required string id = 1; // Schema ID - optional bool resolve = 2; // Flag to resolve references -} - -// Request to search for schemas. -// Reply: SchemaDataReply on success, NackError on error -message SchemaSearchRequest -{ - optional string id = 1; // Partial schema ID (ngram index) - optional string text = 2; // Word or phrase to match in description - optional string owner = 3; // Owner of schema - optional SDMS.SortOption sort = 4; // Sort order - optional bool sort_rev = 5; // Flag to reverse sort order - optional uint32 offset = 6; // Result offset - optional uint32 count = 7; // Result count -} - -// Reply containing detailed information for one or more schemas. -message SchemaDataReply -{ - repeated SDMS.SchemaData schema = 1; // Schema details - optional uint32 offset = 2; // Offset of this result page - optional uint32 count = 3; // Count of this result page - optional uint32 total = 4; // Total number of results -} - -// Request to create a new schema. Only system admins may set the system flag. -// Reply: AckReply on success, NackError on error -message SchemaCreateRequest -{ - required string id = 1; // Schema ID - required string desc = 2; // Description - required bool pub = 3; // Public flag - required bool sys = 4; // System flag - required string def = 5; // Schema definition (JSON schema specification) -} - -// Request to update an existing schema. Updates are not allowed for schemas that -// are in use or referenced by other schemas. -// Reply: AckReply on success, NackError on error -message SchemaUpdateRequest -{ - required string id = 1; // Schema ID - optional string id_new = 2; // Optional new schema ID - optional string desc = 3; // Optional new description - optional bool pub = 4; // Optional new public flag - optional bool sys = 5; // Optional new system flag - optional string def = 6; // Optional new definition -} - -// Request to revise an existing schema. Revision creates a copy of the original -// schema with a new revision number and updated fields. -// Reply: AckReply on success, NackError on error -message SchemaReviseRequest -{ - required string id = 1; // Schema ID - optional string desc = 2; // Optional new description - optional bool pub = 3; // Optional new public flag - optional bool sys = 4; // Optional new system flag - optional string def = 5; // Optional new definition -} - - -// Request to delete an existing schema. Deletion is not allowed for schema that -// are in use or are referenced by other schemas. -// Reply: AckReply on success, NackError on error -message SchemaDeleteRequest -{ - required string id = 1; // Schema ID -} - - -// ============================================================================ -// ----------- Catalog / Topic Messages --------------------------------------- -// ============================================================================ - -// Request to list catalog topics. If topic_id is specified, only child topics -// of that topic are returned; otherwise all top-level topics are returned. -// Reply: TopicDataReply on success, NackError on error -message TopicListTopicsRequest -{ - optional string topic_id = 1; // Optional parent topic - optional uint32 offset = 2; // Result offset - optional uint32 count = 3; // Result count -} - -// Request to view a specific topic. -// Reply: TopicDataReply on success, NackError on error -message TopicViewRequest -{ - required string id = 1; // Topic ID -} - -// Request to search for topics by category words/phrase. -// Reply: TopicDataReply on success, NackError on error -message TopicSearchRequest -{ - optional string phrase = 1; // Word/phrase to match -} - -// Reply containing details of one or more topics. -message TopicDataReply -{ - repeated TopicData topic = 1; // Topic details - optional uint32 offset = 2; // Offset of this result page - optional uint32 count = 3; // Count of this result page - optional uint32 total = 4; // Total number of results -} - diff --git a/common/proto/common/Version.proto.in b/common/proto/common/Version.proto.in deleted file mode 100644 index b9ebb4c15..000000000 --- a/common/proto/common/Version.proto.in +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto2"; -// WARNING the Version.proto file is generated by CMAKE -// -// DataFed version numbers must be manually updated as needed. -// All minor version numbers should be reset when VER_MAJOR is incremented. -// VER_MAPI_MINOR should be reset to 0 when VER_MAPI_MAJOR is incremented. -// Servers and clients must be interoperable as long as VER_MAJOR and VER_MAPI_MAJOR match - -// Minor version are used to notify admins when local servers need to be updated. -// This is accomplished by sending a version request to the Core server on startup -// and comparing to the local version numbers. - -enum Version -{ - option allow_alias = true; // Must be commented out if there are no duplicate values - -// VER_MAJOR = 1; // System MAJOR version, no backward compatibility -// VER_MAPI_MAJOR = 4; // Message API MAJOR version, no backward compatibility -// VER_MAPI_MINOR = 1; // Message API MINOR version, backward compatible -// VER_CORE = 0; // Core server MINOR version, information only -// VER_WEB = 0; // Web server MINOR version, info/notification purposes -// VER_REPO = 0; // Repo server MINOR version, info/notification purposes -// VER_CLIENT_PY = 0; // Python client/api MINOR version, info/notification purposes - - DATAFED_RELEASE_YEAR = @DATAFED_RELEASE_YEAR@; - DATAFED_RELEASE_MONTH = @DATAFED_RELEASE_MONTH@; - DATAFED_RELEASE_DAY = @DATAFED_RELEASE_DAY@; - DATAFED_RELEASE_HOUR = @DATAFED_RELEASE_HOUR@; - DATAFED_RELEASE_MINUTE = @DATAFED_RELEASE_MINUTE@; - - DATAFED_COMMON_PROTOCOL_API_MAJOR = @DATAFED_COMMON_PROTOCOL_API_MAJOR@; - DATAFED_COMMON_PROTOCOL_API_MINOR = @DATAFED_COMMON_PROTOCOL_API_MINOR@; - DATAFED_COMMON_PROTOCOL_API_PATCH = @DATAFED_COMMON_PROTOCOL_API_PATCH@; -} - diff --git a/common/proto3/common/anon/nack_reply.proto b/common/proto3/common/anon/nack_reply.proto index b4a318466..8cb856ac3 100644 --- a/common/proto3/common/anon/nack_reply.proto +++ b/common/proto3/common/anon/nack_reply.proto @@ -9,5 +9,5 @@ option cc_enable_arenas = true; // Error response message NackReply { ErrorCode err_code = 1; - string err_msg = 2; + optional string err_msg = 2; } diff --git a/common/proto3/common/auth/acl_shared_list_request.proto b/common/proto3/common/auth/acl_shared_list_request.proto index e26889812..18867890f 100644 --- a/common/proto3/common/auth/acl_shared_list_request.proto +++ b/common/proto3/common/auth/acl_shared_list_request.proto @@ -5,6 +5,6 @@ package SDMS; option cc_enable_arenas = true; message ACLSharedListRequest { - bool inc_users = 2; - bool inc_projects = 3; + optional bool inc_users = 2; + optional bool inc_projects = 3; } diff --git a/common/proto3/common/auth/acl_update_request.proto b/common/proto3/common/auth/acl_update_request.proto index 713cd455f..dc63aeb8f 100644 --- a/common/proto3/common/auth/acl_update_request.proto +++ b/common/proto3/common/auth/acl_update_request.proto @@ -6,5 +6,5 @@ option cc_enable_arenas = true; message ACLUpdateRequest { string id = 1; - string rules = 2; + optional string rules = 2; } diff --git a/common/proto3/common/auth/check_perms_request.proto b/common/proto3/common/auth/check_perms_request.proto index 731c47107..93eae3b48 100644 --- a/common/proto3/common/auth/check_perms_request.proto +++ b/common/proto3/common/auth/check_perms_request.proto @@ -6,5 +6,5 @@ option cc_enable_arenas = true; message CheckPermsRequest { string id = 1; - uint32 perms = 2; + optional uint32 perms = 2; } diff --git a/common/proto3/common/auth/coll_create_request.proto b/common/proto3/common/auth/coll_create_request.proto index e24d6fb98..b599e3be5 100644 --- a/common/proto3/common/auth/coll_create_request.proto +++ b/common/proto3/common/auth/coll_create_request.proto @@ -6,9 +6,9 @@ option cc_enable_arenas = true; message CollCreateRequest { string title = 1; - string desc = 2; - string alias = 3; - string parent_id = 4; - string topic = 6; + optional string desc = 2; + optional string alias = 3; + optional string parent_id = 4; + optional string topic = 6; repeated string tags = 7; } diff --git a/common/proto3/common/auth/coll_get_parents_request.proto b/common/proto3/common/auth/coll_get_parents_request.proto index 6a87d2d85..7750dc3ff 100644 --- a/common/proto3/common/auth/coll_get_parents_request.proto +++ b/common/proto3/common/auth/coll_get_parents_request.proto @@ -6,5 +6,5 @@ option cc_enable_arenas = true; message CollGetParentsRequest { string id = 1; - bool inclusive = 2; + optional bool inclusive = 2; } diff --git a/common/proto3/common/auth/coll_list_published_request.proto b/common/proto3/common/auth/coll_list_published_request.proto index 90c7ceced..0b2d06895 100644 --- a/common/proto3/common/auth/coll_list_published_request.proto +++ b/common/proto3/common/auth/coll_list_published_request.proto @@ -5,7 +5,7 @@ package SDMS; option cc_enable_arenas = true; message CollListPublishedRequest { - string subject = 1; - uint32 offset = 2; - uint32 count = 3; + optional string subject = 1; + optional uint32 offset = 2; + optional uint32 count = 3; } diff --git a/common/proto3/common/auth/coll_read_request.proto b/common/proto3/common/auth/coll_read_request.proto index 658233744..89b51905a 100644 --- a/common/proto3/common/auth/coll_read_request.proto +++ b/common/proto3/common/auth/coll_read_request.proto @@ -7,6 +7,6 @@ option cc_enable_arenas = true; message CollReadRequest { string id = 1; bool details = 3; - uint32 offset = 4; - uint32 count = 5; + optional uint32 offset = 4; + optional uint32 count = 5; } diff --git a/common/proto3/common/auth/coll_update_request.proto b/common/proto3/common/auth/coll_update_request.proto index 2e2c9bc39..901116e42 100644 --- a/common/proto3/common/auth/coll_update_request.proto +++ b/common/proto3/common/auth/coll_update_request.proto @@ -6,10 +6,10 @@ option cc_enable_arenas = true; message CollUpdateRequest { string id = 1; - string title = 2; - string desc = 3; - string alias = 4; - string topic = 6; + optional string title = 2; + optional string desc = 3; + optional string alias = 4; + optional string topic = 6; repeated string tags = 7; - bool tags_clear = 8; + optional bool tags_clear = 8; } diff --git a/common/proto3/common/auth/data_get_request.proto b/common/proto3/common/auth/data_get_request.proto index 8c582d0b4..44702f797 100644 --- a/common/proto3/common/auth/data_get_request.proto +++ b/common/proto3/common/auth/data_get_request.proto @@ -8,10 +8,10 @@ option cc_enable_arenas = true; message DataGetRequest { repeated string id = 1; - string path = 2; - Encryption encrypt = 3; - bool orig_fname = 4; - bool check = 5; - string collection_id = 6; - string collection_type = 7; + optional string path = 2; + optional Encryption encrypt = 3; + optional bool orig_fname = 4; + optional bool check = 5; + optional string collection_id = 6; + optional string collection_type = 7; } diff --git a/common/proto3/common/auth/data_put_request.proto b/common/proto3/common/auth/data_put_request.proto index bde29f1c5..bbc6e36f5 100644 --- a/common/proto3/common/auth/data_put_request.proto +++ b/common/proto3/common/auth/data_put_request.proto @@ -8,10 +8,10 @@ option cc_enable_arenas = true; message DataPutRequest { string id = 1; - string path = 2; - Encryption encrypt = 3; - string ext = 4; - bool check = 5; - string collection_id = 6; - string collection_type = 7; + optional string path = 2; + optional Encryption encrypt = 3; + optional string ext = 4; + optional bool check = 5; + optional string collection_id = 6; + optional string collection_type = 7; } diff --git a/common/proto3/common/auth/generate_credentials_request.proto b/common/proto3/common/auth/generate_credentials_request.proto index 774cd354a..302d50743 100644 --- a/common/proto3/common/auth/generate_credentials_request.proto +++ b/common/proto3/common/auth/generate_credentials_request.proto @@ -5,6 +5,6 @@ package SDMS; option cc_enable_arenas = true; message GenerateCredentialsRequest { - string domain = 1; - uint32 uid = 2; + optional string domain = 1; + optional uint32 uid = 2; } diff --git a/common/proto3/common/auth/get_perms_request.proto b/common/proto3/common/auth/get_perms_request.proto index 23d6da93a..ec82b26be 100644 --- a/common/proto3/common/auth/get_perms_request.proto +++ b/common/proto3/common/auth/get_perms_request.proto @@ -6,5 +6,5 @@ option cc_enable_arenas = true; message GetPermsRequest { string id = 1; - uint32 perms = 2; + optional uint32 perms = 2; } diff --git a/common/proto3/common/auth/group_update_request.proto b/common/proto3/common/auth/group_update_request.proto index f6a701134..aea7602b7 100644 --- a/common/proto3/common/auth/group_update_request.proto +++ b/common/proto3/common/auth/group_update_request.proto @@ -7,8 +7,8 @@ option cc_enable_arenas = true; message GroupUpdateRequest { string uid = 1; string gid = 2; - string title = 3; - string desc = 4; + optional string title = 3; + optional string desc = 4; repeated string add_uid = 5; repeated string rem_uid = 6; } diff --git a/common/proto3/common/auth/note_update_request.proto b/common/proto3/common/auth/note_update_request.proto index 21c7552fb..d02b86901 100644 --- a/common/proto3/common/auth/note_update_request.proto +++ b/common/proto3/common/auth/note_update_request.proto @@ -10,7 +10,7 @@ option cc_enable_arenas = true; message NoteUpdateRequest { string id = 1; string comment = 2; - NoteType new_type = 3; - NoteState new_state = 4; - string new_title = 5; + optional NoteType new_type = 3; + optional NoteState new_state = 4; + optional string new_title = 5; } diff --git a/common/proto3/common/auth/project_create_request.proto b/common/proto3/common/auth/project_create_request.proto index 7dc6f233d..71ed5f4cd 100644 --- a/common/proto3/common/auth/project_create_request.proto +++ b/common/proto3/common/auth/project_create_request.proto @@ -7,7 +7,7 @@ option cc_enable_arenas = true; message ProjectCreateRequest { string id = 1; string title = 2; - string desc = 3; + optional string desc = 3; repeated string admin = 4; repeated string member = 5; } diff --git a/common/proto3/common/auth/project_get_role_request.proto b/common/proto3/common/auth/project_get_role_request.proto index c6d7f2abc..8854865ea 100644 --- a/common/proto3/common/auth/project_get_role_request.proto +++ b/common/proto3/common/auth/project_get_role_request.proto @@ -6,5 +6,5 @@ option cc_enable_arenas = true; message ProjectGetRoleRequest { string id = 1; - string subject = 2; + optional string subject = 2; } diff --git a/common/proto3/common/auth/project_list_request.proto b/common/proto3/common/auth/project_list_request.proto index a3598fd9b..0c38a5b7c 100644 --- a/common/proto3/common/auth/project_list_request.proto +++ b/common/proto3/common/auth/project_list_request.proto @@ -7,12 +7,12 @@ import "enums/sort_option.proto"; option cc_enable_arenas = true; message ProjectListRequest { - string subject = 1; - bool as_owner = 2; - bool as_admin = 3; - bool as_member = 4; - SortOption sort = 5; - bool sort_rev = 6; - uint32 offset = 7; - uint32 count = 8; + optional string subject = 1; + optional bool as_owner = 2; + optional bool as_admin = 3; + optional bool as_member = 4; + optional SortOption sort = 5; + optional bool sort_rev = 6; + optional uint32 offset = 7; + optional uint32 count = 8; } diff --git a/common/proto3/common/auth/project_update_request.proto b/common/proto3/common/auth/project_update_request.proto index c7be4896a..1573271c7 100644 --- a/common/proto3/common/auth/project_update_request.proto +++ b/common/proto3/common/auth/project_update_request.proto @@ -6,8 +6,8 @@ option cc_enable_arenas = true; message ProjectUpdateRequest { string id = 1; - string title = 2; - string desc = 3; + optional string title = 2; + optional string desc = 3; bool admin_set = 4; repeated string admin = 5; bool member_set = 6; diff --git a/common/proto3/common/auth/query_exec_request.proto b/common/proto3/common/auth/query_exec_request.proto index 84c0fe3a2..a7f62e965 100644 --- a/common/proto3/common/auth/query_exec_request.proto +++ b/common/proto3/common/auth/query_exec_request.proto @@ -6,6 +6,6 @@ option cc_enable_arenas = true; message QueryExecRequest { string id = 1; - uint32 offset = 2; - uint32 count = 3; + optional uint32 offset = 2; + optional uint32 count = 3; } diff --git a/common/proto3/common/auth/query_list_request.proto b/common/proto3/common/auth/query_list_request.proto index fc14850ed..a1b0b3c34 100644 --- a/common/proto3/common/auth/query_list_request.proto +++ b/common/proto3/common/auth/query_list_request.proto @@ -5,6 +5,6 @@ package SDMS; option cc_enable_arenas = true; message QueryListRequest { - uint32 offset = 1; - uint32 count = 2; + optional uint32 offset = 1; + optional uint32 count = 2; } diff --git a/common/proto3/common/auth/query_update_request.proto b/common/proto3/common/auth/query_update_request.proto index 54d3f584a..518a93ce7 100644 --- a/common/proto3/common/auth/query_update_request.proto +++ b/common/proto3/common/auth/query_update_request.proto @@ -8,6 +8,6 @@ option cc_enable_arenas = true; message QueryUpdateRequest { string id = 1; - string title = 2; + optional string title = 2; SearchRequest query = 3; } diff --git a/common/proto3/common/auth/record_alloc_change_request.proto b/common/proto3/common/auth/record_alloc_change_request.proto index b0a96b6dc..ba9a4c5f2 100644 --- a/common/proto3/common/auth/record_alloc_change_request.proto +++ b/common/proto3/common/auth/record_alloc_change_request.proto @@ -7,6 +7,6 @@ option cc_enable_arenas = true; message RecordAllocChangeRequest { repeated string id = 1; string repo_id = 2; - string proj_id = 3; - bool check = 4; + optional string proj_id = 3; + optional bool check = 4; } diff --git a/common/proto3/common/auth/record_create_request.proto b/common/proto3/common/auth/record_create_request.proto index 824e9ff2f..259ed570d 100644 --- a/common/proto3/common/auth/record_create_request.proto +++ b/common/proto3/common/auth/record_create_request.proto @@ -8,17 +8,17 @@ option cc_enable_arenas = true; message RecordCreateRequest { string title = 1; - string desc = 2; + optional string desc = 2; repeated string tags = 3; - string alias = 4; - string metadata = 5; - string parent_id = 6; - bool external = 7; - string source = 8; - string repo_id = 9; - string ext = 10; - bool ext_auto = 11; + optional string alias = 4; + optional string metadata = 5; + optional string parent_id = 6; + optional bool external = 7; + optional string source = 8; + optional string repo_id = 9; + optional string ext = 10; + optional bool ext_auto = 11; repeated DependencySpecData deps = 12; - string sch_id = 13; - bool sch_enforce = 14; + optional string sch_id = 13; + optional bool sch_enforce = 14; } diff --git a/common/proto3/common/auth/record_list_by_alloc_request.proto b/common/proto3/common/auth/record_list_by_alloc_request.proto index 68d6d2464..973570e3c 100644 --- a/common/proto3/common/auth/record_list_by_alloc_request.proto +++ b/common/proto3/common/auth/record_list_by_alloc_request.proto @@ -7,6 +7,6 @@ option cc_enable_arenas = true; message RecordListByAllocRequest { string repo = 1; string subject = 2; - uint32 offset = 3; - uint32 count = 4; + optional uint32 offset = 3; + optional uint32 count = 4; } diff --git a/common/proto3/common/auth/record_owner_change_request.proto b/common/proto3/common/auth/record_owner_change_request.proto index a0b03137a..13ffef242 100644 --- a/common/proto3/common/auth/record_owner_change_request.proto +++ b/common/proto3/common/auth/record_owner_change_request.proto @@ -7,7 +7,7 @@ option cc_enable_arenas = true; message RecordOwnerChangeRequest { repeated string id = 1; string coll_id = 2; - string repo_id = 3; + optional string repo_id = 3; string proj_id = 4; - bool check = 5; + optional bool check = 5; } diff --git a/common/proto3/common/auth/record_update_request.proto b/common/proto3/common/auth/record_update_request.proto index b4dfedf29..7fdd216df 100644 --- a/common/proto3/common/auth/record_update_request.proto +++ b/common/proto3/common/auth/record_update_request.proto @@ -8,18 +8,18 @@ option cc_enable_arenas = true; message RecordUpdateRequest { string id = 1; - string title = 2; - string desc = 3; + optional string title = 2; + optional string desc = 3; repeated string tags = 4; - bool tags_clear = 5; - string alias = 6; - string metadata = 7; - bool mdset = 8; - string sch_id = 9; - bool sch_enforce = 10; - string source = 11; - string ext = 12; - bool ext_auto = 13; + optional bool tags_clear = 5; + optional string alias = 6; + optional string metadata = 7; + optional bool mdset = 8; + optional string sch_id = 9; + optional bool sch_enforce = 10; + optional string source = 11; + optional string ext = 12; + optional bool ext_auto = 13; repeated DependencySpecData dep_add = 16; repeated DependencySpecData dep_rem = 17; } diff --git a/common/proto3/common/auth/repo_allocation_set_default_request.proto b/common/proto3/common/auth/repo_allocation_set_default_request.proto index 2947fdc43..aa96c0b9f 100644 --- a/common/proto3/common/auth/repo_allocation_set_default_request.proto +++ b/common/proto3/common/auth/repo_allocation_set_default_request.proto @@ -6,5 +6,5 @@ option cc_enable_arenas = true; message RepoAllocationSetDefaultRequest { string repo = 1; - string subject = 2; + optional string subject = 2; } diff --git a/common/proto3/common/auth/repo_allocation_stats_request.proto b/common/proto3/common/auth/repo_allocation_stats_request.proto index d76e3e651..b5a9cef86 100644 --- a/common/proto3/common/auth/repo_allocation_stats_request.proto +++ b/common/proto3/common/auth/repo_allocation_stats_request.proto @@ -6,5 +6,5 @@ option cc_enable_arenas = true; message RepoAllocationStatsRequest { string repo = 1; - string subject = 2; + optional string subject = 2; } diff --git a/common/proto3/common/auth/repo_create_request.proto b/common/proto3/common/auth/repo_create_request.proto index e4df95856..7efb3431f 100644 --- a/common/proto3/common/auth/repo_create_request.proto +++ b/common/proto3/common/auth/repo_create_request.proto @@ -7,13 +7,13 @@ option cc_enable_arenas = true; message RepoCreateRequest { string id = 1; string title = 2; - string desc = 3; - string domain = 5; - string path = 6; - string exp_path = 7; - string address = 8; - string endpoint = 9; - string pub_key = 10; + optional string desc = 3; + optional string domain = 5; + optional string path = 6; + optional string exp_path = 7; + optional string address = 8; + optional string endpoint = 9; + optional string pub_key = 10; uint64 capacity = 11; repeated string admin = 12; string type = 13; diff --git a/common/proto3/common/auth/repo_list_request.proto b/common/proto3/common/auth/repo_list_request.proto index 7e904a2bc..7d56d9d47 100644 --- a/common/proto3/common/auth/repo_list_request.proto +++ b/common/proto3/common/auth/repo_list_request.proto @@ -5,6 +5,6 @@ package SDMS; option cc_enable_arenas = true; message RepoListRequest { - bool details = 1; - bool all = 2; + optional bool details = 1; + optional bool all = 2; } diff --git a/common/proto3/common/auth/repo_list_subject_allocations_request.proto b/common/proto3/common/auth/repo_list_subject_allocations_request.proto index c5a3f8baf..23421fe83 100644 --- a/common/proto3/common/auth/repo_list_subject_allocations_request.proto +++ b/common/proto3/common/auth/repo_list_subject_allocations_request.proto @@ -5,6 +5,6 @@ package SDMS; option cc_enable_arenas = true; message RepoListSubjectAllocationsRequest { - string subject = 1; - bool stats = 2; + optional string subject = 1; + optional bool stats = 2; } diff --git a/common/proto3/common/auth/repo_update_request.proto b/common/proto3/common/auth/repo_update_request.proto index 7218713cd..00b83dd07 100644 --- a/common/proto3/common/auth/repo_update_request.proto +++ b/common/proto3/common/auth/repo_update_request.proto @@ -6,15 +6,15 @@ option cc_enable_arenas = true; message RepoUpdateRequest { string id = 1; - string title = 2; - string desc = 3; - string domain = 5; - string path = 6; - string exp_path = 7; - string address = 8; - string endpoint = 9; - string pub_key = 10; - uint64 capacity = 11; + optional string title = 2; + optional string desc = 3; + optional string domain = 5; + optional string path = 6; + optional string exp_path = 7; + optional string address = 8; + optional string endpoint = 9; + optional string pub_key = 10; + optional uint64 capacity = 11; repeated string admin = 12; string type = 13; } diff --git a/common/proto3/common/auth/repo_view_allocation_request.proto b/common/proto3/common/auth/repo_view_allocation_request.proto index 30e244cba..111482905 100644 --- a/common/proto3/common/auth/repo_view_allocation_request.proto +++ b/common/proto3/common/auth/repo_view_allocation_request.proto @@ -6,5 +6,5 @@ option cc_enable_arenas = true; message RepoViewAllocationRequest { string repo = 1; - string subject = 2; + optional string subject = 2; } diff --git a/common/proto3/common/auth/schema_revise_request.proto b/common/proto3/common/auth/schema_revise_request.proto index 7eb565b7a..02a1a6931 100644 --- a/common/proto3/common/auth/schema_revise_request.proto +++ b/common/proto3/common/auth/schema_revise_request.proto @@ -6,8 +6,8 @@ option cc_enable_arenas = true; message SchemaReviseRequest { string id = 1; - string desc = 2; - bool pub = 3; - bool sys = 4; - string def = 5; + optional string desc = 2; + optional bool pub = 3; + optional bool sys = 4; + optional string def = 5; } diff --git a/common/proto3/common/auth/schema_search_request.proto b/common/proto3/common/auth/schema_search_request.proto index a621e39a4..c88275d15 100644 --- a/common/proto3/common/auth/schema_search_request.proto +++ b/common/proto3/common/auth/schema_search_request.proto @@ -7,11 +7,11 @@ import "enums/sort_option.proto"; option cc_enable_arenas = true; message SchemaSearchRequest { - string id = 1; - string text = 2; - string owner = 3; - SortOption sort = 4; - bool sort_rev = 5; - uint32 offset = 6; - uint32 count = 7; + optional string id = 1; + optional string text = 2; + optional string owner = 3; + optional SortOption sort = 4; + optional bool sort_rev = 5; + optional uint32 offset = 6; + optional uint32 count = 7; } diff --git a/common/proto3/common/auth/schema_update_request.proto b/common/proto3/common/auth/schema_update_request.proto index 82bf40b5f..c02dee5e5 100644 --- a/common/proto3/common/auth/schema_update_request.proto +++ b/common/proto3/common/auth/schema_update_request.proto @@ -6,9 +6,9 @@ option cc_enable_arenas = true; message SchemaUpdateRequest { string id = 1; - string id_new = 2; - string desc = 3; - bool pub = 4; - bool sys = 5; - string def = 6; + optional string id_new = 2; + optional string desc = 3; + optional bool pub = 4; + optional bool sys = 5; + optional string def = 6; } diff --git a/common/proto3/common/auth/schema_view_request.proto b/common/proto3/common/auth/schema_view_request.proto index a6518eca2..4b7cb37f5 100644 --- a/common/proto3/common/auth/schema_view_request.proto +++ b/common/proto3/common/auth/schema_view_request.proto @@ -6,5 +6,5 @@ option cc_enable_arenas = true; message SchemaViewRequest { string id = 1; - bool resolve = 2; + optional bool resolve = 2; } diff --git a/common/proto3/common/auth/search_request.proto b/common/proto3/common/auth/search_request.proto index eaaa72c40..e87533c0e 100644 --- a/common/proto3/common/auth/search_request.proto +++ b/common/proto3/common/auth/search_request.proto @@ -9,21 +9,21 @@ option cc_enable_arenas = true; message SearchRequest { SearchMode mode = 1; - bool published = 2; - string id = 3; - string text = 4; + optional bool published = 2; + optional string id = 3; + optional string text = 4; repeated string tags = 5; repeated string cat_tags = 6; - uint32 from = 7; - uint32 to = 8; - string owner = 9; - string creator = 10; + optional uint32 from = 7; + optional uint32 to = 8; + optional string owner = 9; + optional string creator = 10; repeated string coll = 11; - string sch_id = 12; - string meta = 13; - bool meta_err = 14; - SortOption sort = 15; - bool sort_rev = 16; - uint32 offset = 17; - uint32 count = 18; + optional string sch_id = 12; + optional string meta = 13; + optional bool meta_err = 14; + optional SortOption sort = 15; + optional bool sort_rev = 16; + optional uint32 offset = 17; + optional uint32 count = 18; } diff --git a/common/proto3/common/auth/tag_list_by_count_request.proto b/common/proto3/common/auth/tag_list_by_count_request.proto index 91a684a34..e190f69a1 100644 --- a/common/proto3/common/auth/tag_list_by_count_request.proto +++ b/common/proto3/common/auth/tag_list_by_count_request.proto @@ -5,6 +5,6 @@ package SDMS; option cc_enable_arenas = true; message TagListByCountRequest { - uint32 offset = 1; - uint32 count = 2; + optional uint32 offset = 1; + optional uint32 count = 2; } diff --git a/common/proto3/common/auth/tag_search_request.proto b/common/proto3/common/auth/tag_search_request.proto index fbff65c4b..741cba10c 100644 --- a/common/proto3/common/auth/tag_search_request.proto +++ b/common/proto3/common/auth/tag_search_request.proto @@ -6,6 +6,6 @@ option cc_enable_arenas = true; message TagSearchRequest { string name = 1; - uint32 offset = 2; - uint32 count = 3; + optional uint32 offset = 2; + optional uint32 count = 3; } diff --git a/common/proto3/common/auth/task_list_request.proto b/common/proto3/common/auth/task_list_request.proto index fbf399e05..dae0cc665 100644 --- a/common/proto3/common/auth/task_list_request.proto +++ b/common/proto3/common/auth/task_list_request.proto @@ -7,10 +7,10 @@ import "enums/task_status.proto"; option cc_enable_arenas = true; message TaskListRequest { - uint32 since = 1; - uint32 from = 2; - uint32 to = 3; + optional uint32 since = 1; + optional uint32 from = 2; + optional uint32 to = 3; repeated TaskStatus status = 4; - uint32 offset = 5; - uint32 count = 6; + optional uint32 offset = 5; + optional uint32 count = 6; } diff --git a/common/proto3/common/auth/topic_list_topics_request.proto b/common/proto3/common/auth/topic_list_topics_request.proto index ddadce07a..ef4d5f448 100644 --- a/common/proto3/common/auth/topic_list_topics_request.proto +++ b/common/proto3/common/auth/topic_list_topics_request.proto @@ -5,7 +5,7 @@ package SDMS; option cc_enable_arenas = true; message TopicListTopicsRequest { - string topic_id = 1; - uint32 offset = 2; - uint32 count = 3; + optional string topic_id = 1; + optional uint32 offset = 2; + optional uint32 count = 3; } diff --git a/common/proto3/common/auth/user_create_request.proto b/common/proto3/common/auth/user_create_request.proto index 8fe964982..2595e4a82 100644 --- a/common/proto3/common/auth/user_create_request.proto +++ b/common/proto3/common/auth/user_create_request.proto @@ -6,10 +6,10 @@ option cc_enable_arenas = true; message UserCreateRequest { string uid = 1; - string password = 2; + optional string password = 2; string name = 3; string email = 4; repeated string uuid = 5; - string options = 6; + optional string options = 6; string secret = 7; } diff --git a/common/proto3/common/auth/user_find_by_name_uid_request.proto b/common/proto3/common/auth/user_find_by_name_uid_request.proto index 1bccffee8..bc527af10 100644 --- a/common/proto3/common/auth/user_find_by_name_uid_request.proto +++ b/common/proto3/common/auth/user_find_by_name_uid_request.proto @@ -6,6 +6,6 @@ option cc_enable_arenas = true; message UserFindByNameUIDRequest { string name_uid = 1; - uint32 offset = 2; - uint32 count = 3; + optional uint32 offset = 2; + optional uint32 count = 3; } diff --git a/common/proto3/common/auth/user_get_access_token_request.proto b/common/proto3/common/auth/user_get_access_token_request.proto index 813bf255e..c396ca9c7 100644 --- a/common/proto3/common/auth/user_get_access_token_request.proto +++ b/common/proto3/common/auth/user_get_access_token_request.proto @@ -5,6 +5,6 @@ package SDMS; option cc_enable_arenas = true; message UserGetAccessTokenRequest { - string collection_id = 1; - string collection_type = 2; + optional string collection_id = 1; + optional string collection_type = 2; } diff --git a/common/proto3/common/auth/user_list_all_request.proto b/common/proto3/common/auth/user_list_all_request.proto index 9e409044b..3bc560cf9 100644 --- a/common/proto3/common/auth/user_list_all_request.proto +++ b/common/proto3/common/auth/user_list_all_request.proto @@ -5,6 +5,6 @@ package SDMS; option cc_enable_arenas = true; message UserListAllRequest { - uint32 offset = 1; - uint32 count = 2; + optional uint32 offset = 1; + optional uint32 count = 2; } diff --git a/common/proto3/common/auth/user_list_collab_request.proto b/common/proto3/common/auth/user_list_collab_request.proto index 303571242..335ca4f09 100644 --- a/common/proto3/common/auth/user_list_collab_request.proto +++ b/common/proto3/common/auth/user_list_collab_request.proto @@ -5,6 +5,6 @@ package SDMS; option cc_enable_arenas = true; message UserListCollabRequest { - uint32 offset = 1; - uint32 count = 2; + optional uint32 offset = 1; + optional uint32 count = 2; } diff --git a/common/proto3/common/auth/user_update_request.proto b/common/proto3/common/auth/user_update_request.proto index bfb2db56b..55b550eb6 100644 --- a/common/proto3/common/auth/user_update_request.proto +++ b/common/proto3/common/auth/user_update_request.proto @@ -6,7 +6,7 @@ option cc_enable_arenas = true; message UserUpdateRequest { string uid = 1; - string email = 2; - string password = 3; - string options = 4; + optional string email = 2; + optional string password = 3; + optional string options = 4; } diff --git a/common/proto3/common/auth/user_view_request.proto b/common/proto3/common/auth/user_view_request.proto index d404eaf1b..70cea53da 100644 --- a/common/proto3/common/auth/user_view_request.proto +++ b/common/proto3/common/auth/user_view_request.proto @@ -6,5 +6,5 @@ option cc_enable_arenas = true; message UserViewRequest { string uid = 1; - bool details = 2; + optional bool details = 2; } diff --git a/common/proto3/common/enums/access_token_type.proto b/common/proto3/common/enums/access_token_type.proto index 8b23ac985..81af3bbcc 100644 --- a/common/proto3/common/enums/access_token_type.proto +++ b/common/proto3/common/enums/access_token_type.proto @@ -5,10 +5,10 @@ package SDMS; option cc_enable_arenas = true; enum AccessTokenType { - ACCESS_TOKEN_TYPE_UNSPECIFIED = 0; - ACCESS_TOKEN_TYPE_GENERIC = 1; - ACCESS_TOKEN_TYPE_GLOBUS = 2; - ACCESS_TOKEN_TYPE_GLOBUS_AUTH = 3; - ACCESS_TOKEN_TYPE_GLOBUS_TRANSFER = 4; - ACCESS_TOKEN_TYPE_GLOBUS_DEFAULT = 5; + TOKEN_UNSPECIFIED = 0; + GENERIC = 1; + GLOBUS = 2; + GLOBUS_AUTH = 3; + GLOBUS_TRANSFER = 4; + GLOBUS_DEFAULT = 5; } diff --git a/common/proto3/common/enums/encryption.proto b/common/proto3/common/enums/encryption.proto index b843452f8..e9c814a5e 100644 --- a/common/proto3/common/enums/encryption.proto +++ b/common/proto3/common/enums/encryption.proto @@ -5,7 +5,7 @@ package SDMS; option cc_enable_arenas = true; enum Encryption { - ENCRYPTION_NONE = 0; - ENCRYPTION_AVAIL = 1; - ENCRYPTION_FORCE = 2; + ENCRYPT_NONE = 0; + ENCRYPT_AVAIL = 1; + ENCRYPT_FORCE = 2; } diff --git a/common/proto3/common/enums/search_mode.proto b/common/proto3/common/enums/search_mode.proto index cf6b1f42a..f5675ac58 100644 --- a/common/proto3/common/enums/search_mode.proto +++ b/common/proto3/common/enums/search_mode.proto @@ -5,6 +5,6 @@ package SDMS; option cc_enable_arenas = true; enum SearchMode { - SEARCH_MODE_DATA = 0; - SEARCH_MODE_COLLECTION = 1; + SM_DATA = 0; + SM_COLLECTION = 1; } diff --git a/common/proto3/common/enums/sort_option.proto b/common/proto3/common/enums/sort_option.proto index 11acfb432..1342c507f 100644 --- a/common/proto3/common/enums/sort_option.proto +++ b/common/proto3/common/enums/sort_option.proto @@ -5,10 +5,10 @@ package SDMS; option cc_enable_arenas = true; enum SortOption { - SORT_OPTION_ID = 0; - SORT_OPTION_TITLE = 1; - SORT_OPTION_OWNER = 2; - SORT_OPTION_TIME_CREATE = 3; - SORT_OPTION_TIME_UPDATE = 4; - SORT_OPTION_RELEVANCE = 5; + SORT_ID = 0; + SORT_TITLE = 1; + SORT_OWNER = 2; + SORT_TIME_CREATE = 3; + SORT_TIME_UPDATE = 4; + SORT_RELEVANCE = 5; } diff --git a/common/proto3/common/enums/task_command.proto b/common/proto3/common/enums/task_command.proto index fc4ba3f8e..15fc469a7 100644 --- a/common/proto3/common/enums/task_command.proto +++ b/common/proto3/common/enums/task_command.proto @@ -5,10 +5,10 @@ package SDMS; option cc_enable_arenas = true; enum TaskCommand { - TASK_COMMAND_STOP = 0; - TASK_COMMAND_RAW_DATA_TRANSFER = 1; - TASK_COMMAND_RAW_DATA_DELETE = 2; - TASK_COMMAND_RAW_DATA_UPDATE_SIZE = 3; - TASK_COMMAND_ALLOC_CREATE = 4; - TASK_COMMAND_ALLOC_DELETE = 5; + TC_STOP = 0; + TC_RAW_DATA_TRANSFER = 1; + TC_RAW_DATA_DELETE = 2; + TC_RAW_DATA_UPDATE_SIZE = 3; + TC_ALLOC_CREATE = 4; + TC_ALLOC_DELETE = 5; } diff --git a/common/proto3/common/enums/task_status.proto b/common/proto3/common/enums/task_status.proto index 25973da5a..e42287e44 100644 --- a/common/proto3/common/enums/task_status.proto +++ b/common/proto3/common/enums/task_status.proto @@ -5,9 +5,9 @@ package SDMS; option cc_enable_arenas = true; enum TaskStatus { - TASK_STATUS_BLOCKED = 0; - TASK_STATUS_READY = 1; - TASK_STATUS_RUNNING = 2; - TASK_STATUS_SUCCEEDED = 3; - TASK_STATUS_FAILED = 4; + TS_BLOCKED = 0; + TS_READY = 1; + TS_RUNNING = 2; + TS_SUCCEEDED = 3; + TS_FAILED = 4; } diff --git a/common/proto3/common/enums/task_type.proto b/common/proto3/common/enums/task_type.proto index b6819e403..947ec650f 100644 --- a/common/proto3/common/enums/task_type.proto +++ b/common/proto3/common/enums/task_type.proto @@ -5,14 +5,14 @@ package SDMS; option cc_enable_arenas = true; enum TaskType { - TASK_TYPE_DATA_GET = 0; - TASK_TYPE_DATA_PUT = 1; - TASK_TYPE_DATA_DEL = 2; - TASK_TYPE_REC_CHG_ALLOC = 3; - TASK_TYPE_REC_CHG_OWNER = 4; - TASK_TYPE_REC_DEL = 5; - TASK_TYPE_ALLOC_CREATE = 6; - TASK_TYPE_ALLOC_DEL = 7; - TASK_TYPE_USER_DEL = 8; - TASK_TYPE_PROJ_DEL = 9; + TT_DATA_GET = 0; + TT_DATA_PUT = 1; + TT_DATA_DEL = 2; + TT_REC_CHG_ALLOC = 3; + TT_REC_CHG_OWNER = 4; + TT_REC_DEL = 5; + TT_ALLOC_CREATE = 6; + TT_ALLOC_DEL = 7; + TT_USER_DEL = 8; + TT_PROJ_DEL = 9; } diff --git a/common/proto3/common/messages/group_data.proto b/common/proto3/common/messages/group_data.proto index 26773a99c..393d80fda 100644 --- a/common/proto3/common/messages/group_data.proto +++ b/common/proto3/common/messages/group_data.proto @@ -7,7 +7,7 @@ option cc_enable_arenas = true; message GroupData { string uid = 1; string gid = 2; - string title = 3; - string desc = 4; + optional string title = 3; + optional string desc = 4; repeated string member = 5; } diff --git a/common/source/Frame.cpp b/common/source/Frame.cpp index 5f248143a..cf3ff8474 100644 --- a/common/source/Frame.cpp +++ b/common/source/Frame.cpp @@ -24,11 +24,12 @@ namespace g_constants = constants::message::google; * * zmq_msg_init_size( &zmq_msg, 8 ); **/ +// Frame.cpp + void FrameConverter::copy(CopyDirection direction, zmq_msg_t &zmq_msg, Frame &frame) { if (direction == CopyDirection::FROM_FRAME) { if (zmq_msg_size(&zmq_msg) != sizeof(Frame)) { - EXCEPT_PARAM( 1, "Unable to copy frame to zmq_msg sizes are inconsistent Frame: " << sizeof(Frame) << " zmq_msg " << zmq_msg_size(&zmq_msg)); @@ -36,8 +37,7 @@ void FrameConverter::copy(CopyDirection direction, zmq_msg_t &zmq_msg, unsigned char *msg_frame_allocation = (unsigned char *)zmq_msg_data(&zmq_msg); *((uint32_t *)msg_frame_allocation) = htonl(frame.size); - *(msg_frame_allocation + 4) = frame.proto_id; - *(msg_frame_allocation + 5) = frame.msg_id; + *((uint16_t *)(msg_frame_allocation + 4)) = htons(frame.msg_type); *((uint16_t *)(msg_frame_allocation + 6)) = htons(frame.context); } else { // TO_FRAME if (zmq_msg_size(&zmq_msg) != sizeof(Frame)) { @@ -48,8 +48,7 @@ void FrameConverter::copy(CopyDirection direction, zmq_msg_t &zmq_msg, unsigned char *msg_frame_allocation = (unsigned char *)zmq_msg_data(&zmq_msg); frame.size = ntohl(*((uint32_t *)msg_frame_allocation)); - frame.proto_id = *(msg_frame_allocation + 4); - frame.msg_id = *(msg_frame_allocation + 5); + frame.msg_type = ntohs(*((uint16_t *)(msg_frame_allocation + 4))); frame.context = ntohs(*((uint16_t *)(msg_frame_allocation + 6))); } } @@ -58,9 +57,7 @@ void FrameConverter::copy(CopyDirection direction, IMessage &msg, const Frame &frame) { if (direction == CopyDirection::FROM_FRAME) { msg.set(g_constants::FRAME_SIZE, frame.size); - msg.set(g_constants::PROTO_ID, frame.proto_id); - msg.set(g_constants::MSG_ID, frame.msg_id); - msg.set(g_constants::MSG_TYPE, frame.getMsgType()); + msg.set(g_constants::MSG_TYPE, frame.msg_type); msg.set(g_constants::CONTEXT, frame.context); } else { EXCEPT(1, "Unsupported copy direction for FrameConverter working on " @@ -71,9 +68,7 @@ void FrameConverter::copy(CopyDirection direction, IMessage &msg, Frame FrameFactory::create(::google::protobuf::Message &a_msg, ProtoBufMap &proto_map) { Frame frame; - auto msg_type = proto_map.getMessageType(a_msg); - frame.proto_id = msg_type >> 8; - frame.msg_id = msg_type & 0xFF; + frame.msg_type = proto_map.getMessageType(a_msg); frame.size = a_msg.ByteSizeLong(); return frame; } @@ -88,21 +83,13 @@ Frame FrameFactory::create(const IMessage &msg) { "constant is not defined cannot create Frame from IMessage, missing: " << g_constants::FRAME_SIZE); } - if (msg.exists(g_constants::PROTO_ID)) { - frame.proto_id = std::get(msg.get(g_constants::PROTO_ID)); - } else { - EXCEPT_PARAM( - 1, - "constant is not defined cannot create Frame from IMessage, missing: " - << g_constants::PROTO_ID); - } - if (msg.exists(g_constants::MSG_ID)) { - frame.msg_id = std::get(msg.get(g_constants::MSG_ID)); + if (msg.exists(g_constants::MSG_TYPE)) { + frame.msg_type = std::get(msg.get(g_constants::MSG_TYPE)); } else { EXCEPT_PARAM( 1, "constant is not defined cannot create Frame from IMessage, missing: " - << g_constants::MSG_ID); + << g_constants::MSG_TYPE); } if (msg.exists(g_constants::CONTEXT)) { frame.context = std::get(msg.get(g_constants::CONTEXT)); @@ -117,7 +104,6 @@ Frame FrameFactory::create(const IMessage &msg) { Frame FrameFactory::create(zmq_msg_t &zmq_msg) { Frame frame; - // No need for conversion if the message size is 0 just use default frame if (zmq_msg_size(&zmq_msg) > 0) { FrameConverter converter; converter.copy(FrameConverter::CopyDirection::TO_FRAME, zmq_msg, frame); diff --git a/common/source/Frame.hpp b/common/source/Frame.hpp index 58a0aa6b7..aaa263ac4 100644 --- a/common/source/Frame.hpp +++ b/common/source/Frame.hpp @@ -1,3 +1,14 @@ +/** + * @file Frame.hpp + * @brief Wire-level frame header and conversion utilities for the SDMS + * messaging transport layer. + * + * Defines the fixed-size Frame header that prefixes every message on the wire, + * along with FrameConverter (for copying between Frame and ZMQ/IMessage + * representations) and FrameFactory (for constructing Frame headers from + * various message sources). + */ + #ifndef FRAME_HPP #define FRAME_HPP #pragma once @@ -16,45 +27,119 @@ namespace SDMS { // Forward declarations class ProtoBufMap; +/** + * @struct Frame + * @brief Fixed-size 8-byte header that precedes every message on the wire. + * + * Contains the serialized payload size, the message type identifier (envelope + * field number), and an optional application-defined context value. Fields are + * converted to/from network byte order at the ZMQ serialization boundary. + */ struct Frame { + uint32_t size = 0; ///< Size of the serialized payload in bytes. + uint16_t msg_type = 0; ///< Message type ID (envelope field number). + uint16_t context = 0; ///< Optional application-defined context value. - uint32_t size = 0; ///< Size of buffer in bytes - uint8_t proto_id = - 0; ///< Protocol ID (defined by Protocol enum in proto file) - uint8_t msg_id = 0; ///< Message ID (defined by alphabetical order of message - ///< names in proto file) - uint16_t context = 0; ///< Optional context value - + /** + * @brief Resets all fields to zero. + */ void clear() { size = 0; - proto_id = 0; - msg_id = 0; + msg_type = 0; context = 0; } - - /// Message type is 16 bits with protocol ID as the upper 8 bits and message - /// ID as the lower 8 bits - inline uint16_t getMsgType() const { - return (((uint16_t)proto_id) << 8) | msg_id; - } }; +/** + * @class FrameConverter + * @brief Copies data between Frame headers and ZMQ or IMessage representations. + * + * Provides bidirectional conversion for ZMQ messages. The IMessage overload + * only supports the FROM_FRAME direction (Frame → IMessage); the reverse + * is unsupported and will throw. + */ class FrameConverter { public: - /** - * Make sure that zmq_msg_init is not called on this message before it - * is passed in. - **/ - enum class CopyDirection { TO_FRAME, FROM_FRAME }; + /** @brief Specifies the direction of a copy operation. */ + enum class CopyDirection { + TO_FRAME, ///< Deserialize: copy from source into Frame. + FROM_FRAME ///< Serialize: copy from Frame into destination. + }; + /** + * @brief Copies between a raw ZMQ message buffer and a Frame. + * + * The ZMQ message must be pre-initialized and sized to exactly 8 bytes + * (sizeof(Frame)) before calling. For the FROM_FRAME direction, use: + * @code + * zmq_msg_init_size(&zmq_msg, 8); + * @endcode + * + * @param copy Direction of the copy operation. + * @param zmq_msg Pre-initialized ZMQ message of exactly 8 bytes. + * @param frame Frame to populate (TO_FRAME) or read from (FROM_FRAME). + * + * @throws TraceException if zmq_msg size != sizeof(Frame). + */ void copy(CopyDirection copy, zmq_msg_t &zmq_msg, Frame &frame); + + /** + * @brief Copies frame fields from a Frame into an IMessage. + * + * @note Only CopyDirection::FROM_FRAME is supported. Passing TO_FRAME + * will throw. + * + * @param copy Must be CopyDirection::FROM_FRAME. + * @param msg IMessage to populate with frame metadata. + * @param frame Source Frame to read from. + * + * @throws TraceException if direction is TO_FRAME. + */ void copy(CopyDirection copy, IMessage &msg, const Frame &frame); }; +/** + * @class FrameFactory + * @brief Constructs Frame headers from various message representations. + * + * Each overload computes the payload size and resolves the message type ID + * appropriate to the source type. The context field is only populated when + * constructing from an IMessage that carries it. + */ class FrameFactory { public: + /** + * @brief Creates a Frame from a protobuf Message, resolving the type ID + * via ProtoBufMap. + * + * The context field is left at its default (0). + * + * @param a_msg Protobuf message (ByteSizeLong() determines frame size). + * @param proto_map Registry used to look up the message type ID. + * @return A Frame with size and msg_type populated. + */ Frame create(::google::protobuf::Message &a_msg, ProtoBufMap &proto_map); + + /** + * @brief Creates a Frame by extracting metadata from an IMessage. + * + * All three fields (FRAME_SIZE, MSG_TYPE, CONTEXT) must be present in the + * IMessage or the call will throw. + * + * @param msg The IMessage containing frame metadata. + * @return A fully populated Frame. + * @throws TraceException if any required field is missing from the IMessage. + */ Frame create(const IMessage &msg); + + /** + * @brief Creates a Frame by deserializing a raw ZMQ message. + * + * Returns a zeroed Frame if the ZMQ message is empty. + * + * @param zmq_msg Raw ZMQ message containing serialized frame bytes. + * @return The deserialized Frame, or a zeroed Frame if empty. + */ Frame create(zmq_msg_t &zmq_msg); }; diff --git a/common/source/ProtoBufFactory.cpp b/common/source/ProtoBufFactory.cpp index 6580f0ccd..99a7a7329 100644 --- a/common/source/ProtoBufFactory.cpp +++ b/common/source/ProtoBufFactory.cpp @@ -2,8 +2,7 @@ #include "ProtoBufFactory.hpp" // Local public includes -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" +#include "common/envelope.pb.h" #include "common/TraceException.hpp" // Standard includes @@ -11,24 +10,19 @@ namespace SDMS { -ProtoBufFactory::ProtoBufFactory() { - Anon::Protocol_descriptor(); - Auth::Protocol_descriptor(); - m_factory = ::google::protobuf::MessageFactory::generated_factory(); -} - -std::unique_ptr<::google::protobuf::Message> -ProtoBufFactory::create(uint16_t desc_type) { - const ::google::protobuf::Descriptor *msg_descriptor = - m_proto_map.getDescriptorType(desc_type); - return create(msg_descriptor); -} - -// https://stackoverflow.com/questions/29960871/protobuf-message-object-creation-by-name -std::unique_ptr<::google::protobuf::Message> -ProtoBufFactory::create(const ::google::protobuf::Descriptor *msg_descriptor) { - const ::google::protobuf::Message *prototype_msg = - m_factory->GetPrototype(msg_descriptor); +ProtoBufFactory::ProtoBufFactory() {} + +std::unique_ptr ProtoBufFactory::create(uint16_t desc_type) { + const google::protobuf::Descriptor* msg_descriptor = + m_proto_map.getDescriptorType(desc_type); + return create(msg_descriptor); + } + +std::unique_ptr<::google::protobuf::Message> + ProtoBufFactory::create(const ::google::protobuf::Descriptor* msg_descriptor) { + const google::protobuf::Message* prototype_msg = + google::protobuf::MessageFactory::generated_factory() + ->GetPrototype(msg_descriptor); if (prototype_msg == nullptr) { EXCEPT(1, "Cannot create prototype message from message descriptor"); @@ -41,6 +35,7 @@ ProtoBufFactory::create(const ::google::protobuf::Descriptor *msg_descriptor) { } return std::unique_ptr<::google::protobuf::Message>(mutable_msg); -} + + } } // namespace SDMS diff --git a/common/source/ProtoBufFactory.hpp b/common/source/ProtoBufFactory.hpp index 444d5a26f..f6766b8e0 100644 --- a/common/source/ProtoBufFactory.hpp +++ b/common/source/ProtoBufFactory.hpp @@ -7,8 +7,7 @@ #include "common/TraceException.hpp" // Local protobuf includes -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" +#include "common/envelope.pb.h" // Standard includes #include @@ -17,13 +16,11 @@ namespace SDMS { class ProtoBufFactory { ProtoBufMap m_proto_map; - ::google::protobuf::MessageFactory *m_factory; public: ProtoBufFactory(); std::unique_ptr<::google::protobuf::Message> create(uint16_t desc_type); - std::unique_ptr<::google::protobuf::Message> - create(const ::google::protobuf::Descriptor *msg_descriptor); + std::unique_ptr<::google::protobuf::Message> create(const ::google::protobuf::Descriptor *msg_descriptor); }; } // namespace SDMS diff --git a/common/source/ProtoBufMap.cpp b/common/source/ProtoBufMap.cpp index 6c6fcf58b..731009370 100644 --- a/common/source/ProtoBufMap.cpp +++ b/common/source/ProtoBufMap.cpp @@ -1,8 +1,7 @@ // Local public includes #include "common/ProtoBufMap.hpp" -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" +#include "common/envelope.pb.h" #include "common/TraceException.hpp" // Third party includes @@ -13,135 +12,107 @@ namespace proto = ::google::protobuf; namespace SDMS { -ProtoBufMap::ProtoBufMap() { - // These two code blocks should be templatized to make them DRY - - { - auto a_enum_desc = Anon::Protocol_descriptor(); - if (a_enum_desc->name() != "Protocol") - EXCEPT(EC_PROTO_INIT, "Must register with Protocol EnumDescriptor."); - - const proto::FileDescriptor *file = a_enum_desc->file(); - if (!file) - EXCEPT(EC_PROTO_INIT, - "Failed to acquire protocol buffer file descriptor."); - - const proto::EnumValueDescriptor *val_desc = - a_enum_desc->FindValueByName("ID"); - if (!val_desc) - EXCEPT(EC_PROTO_INIT, "Protocol enum missing required ID field."); - - uint16_t id = val_desc->number(); - // std::cout << __FILE__ << ":" << __LINE__ << " PROTOCOL id is " << id << - // std::endl; - m_file_descriptor_map[id] = file; - - int count = file->message_type_count(); - uint16_t msg_type = id << 8; - - for (int i = 0; i < count; i++, msg_type++) { - const proto::Descriptor *desc = file->message_type(i); - m_descriptor_map[msg_type] = desc; - // Register Message types from Anon - m_msg_type_map[desc] = msg_type; - } - m_protocol_ids[MessageProtocol::GOOGLE_ANONONYMOUS] = id; - } - { - auto a_enum_desc = Auth::Protocol_descriptor(); - if (a_enum_desc->name() != "Protocol") - EXCEPT(EC_PROTO_INIT, "Must register with Protocol EnumDescriptor."); - - const proto::FileDescriptor *file = a_enum_desc->file(); - if (!file) - EXCEPT(EC_PROTO_INIT, - "Failed to acquire protocol buffer file descriptor."); - - const proto::EnumValueDescriptor *val_desc = - a_enum_desc->FindValueByName("ID"); - if (!val_desc) - EXCEPT(EC_PROTO_INIT, "Protocol enum missing required ID field."); - - uint16_t id = val_desc->number(); - // std::cout << "PROTOCOL id is " << id << std::endl; - // std::cout << __FILE__ << ":" << __LINE__ << " PROTOCOL id is " << id << - // std::endl; - m_file_descriptor_map[id] = file; - - int count = file->message_type_count(); - uint16_t msg_type = id << 8; - - for (int i = 0; i < count; i++, msg_type++) { - const proto::Descriptor *desc = file->message_type(i); - m_descriptor_map[msg_type] = desc; - m_msg_type_map[desc] = msg_type; +ProtoBufMap::ProtoBufMap() {} + +std::unique_ptr +ProtoBufMap::wrapInEnvelope(const proto::Message& inner) const { + // If already an Envelope, return a copy — no double-wrapping + const auto* env = dynamic_cast(&inner); + if (env) { + return std::make_unique(*env); } - m_protocol_ids[MessageProtocol::GOOGLE_AUTHORIZED] = id; - } -} -uint16_t ProtoBufMap::getMessageType(proto::Message &a_msg) { - const proto::Descriptor *desc = a_msg.GetDescriptor(); - if (m_msg_type_map.count(desc) == 0) { - EXCEPT_PARAM(EC_INVALID_PARAM, - "Unknown descriptor encountered: " << desc->name()); - } - return m_msg_type_map.at(desc); + uint16_t field_number = getMessageType(inner); + auto envelope = std::make_unique(); + const auto* field_desc = + envelope->GetDescriptor()->FindFieldByNumber(field_number); + if (!field_desc) { + EXCEPT_PARAM(EC_INVALID_PARAM, + "Cannot wrap message in envelope: no field number " + << field_number << " for type " + << inner.GetDescriptor()->name()); + } + envelope->GetReflection() + ->MutableMessage(envelope.get(), field_desc) + ->CopyFrom(inner); + return envelope; } -std::string ProtoBufMap::toString(uint16_t msg_type) const { - if (m_descriptor_map.count(msg_type)) { - return m_descriptor_map.at(msg_type)->name(); - } - EXCEPT_PARAM(1, "Provided message type is unknown cannot retrieve name."); +std::unique_ptr +ProtoBufMap::unwrapFromEnvelope(SDMS::Envelope& envelope) const { + uint16_t msg_type = getMessageType(envelope); + const auto* field_desc = + envelope.GetDescriptor()->FindFieldByNumber(msg_type); + if (!field_desc) { + EXCEPT_PARAM(EC_INVALID_PARAM, + "Cannot unwrap envelope: unknown field number " + << msg_type); + } + // ReleaseMessage transfers ownership out of the envelope + return std::unique_ptr( + envelope.GetReflection()->ReleaseMessage(&envelope, field_desc)); } -uint16_t ProtoBufMap::getMessageType(uint8_t a_proto_id, - const std::string &a_message_name) { +uint16_t ProtoBufMap::getMessageType(const ::google::protobuf::Message& msg) const { + // If it's an Envelope, use the envelope method + const SDMS::Envelope* env = dynamic_cast(&msg); + if (env) { + + const auto* reflection = env->GetReflection(); + const auto* descriptor = env->GetDescriptor(); + + for (int i = 0; i < descriptor->field_count(); ++i) { + const auto* field = descriptor->field(i); + if (field->type() == proto::FieldDescriptor::TYPE_MESSAGE) { + if (reflection->HasField(*env, field)) { + return static_cast(field->number()); + } + } + } - // std::cout << "PROTOCOL id is " << a_proto_id << std::endl; - // std::cout << __FILE__ << ":" << __LINE__ << " PROTOCOL id is " << - // static_cast(a_proto_id) << std::endl; - if (m_file_descriptor_map.count(a_proto_id) == 0) { - EXCEPT_PARAM(EC_INVALID_PARAM, - "Protocol ID " << a_proto_id << " has not been registered."); } + + // Otherwise, look up by message type name + const auto* msg_desc = msg.GetDescriptor(); + return getMessageType(msg_desc->name()); +} - const proto::Descriptor *desc = m_file_descriptor_map.at(a_proto_id) - ->FindMessageTypeByName(a_message_name); - if (!desc) - EXCEPT_PARAM(EC_PROTO_INIT, "Could not find specified message: " - << a_message_name << " for protocol: " - << (unsigned int)a_proto_id); - - if (m_msg_type_map.count(desc) == 0) { - EXCEPT_PARAM(EC_INVALID_PARAM, "Message name \"" - << a_message_name - << "\" is not registered with protocol " - << a_proto_id); - } +const proto::Descriptor* ProtoBufMap::getDescriptorType(uint16_t field_number) const { + const auto* envelope_desc = SDMS::Envelope::descriptor(); + const auto* field = envelope_desc->FindFieldByNumber(field_number); + if (field && field->type() == proto::FieldDescriptor::TYPE_MESSAGE) { + return field->message_type(); + } + return nullptr; +} - return m_msg_type_map.at(desc); +uint16_t ProtoBufMap::getMessageType(const std::string& message_name) const { + const auto* envelope_desc = SDMS::Envelope::descriptor(); + + for (int i = 0; i < envelope_desc->field_count(); ++i) { + const auto* field = envelope_desc->field(i); + if (field->type() == proto::FieldDescriptor::TYPE_MESSAGE) { + if (field->message_type()->name() == message_name) { + return static_cast(field->number()); + } + } + } + + EXCEPT_PARAM(EC_INVALID_PARAM, + "Message name \"" << message_name << "\" not found in Envelope"); } -const proto::Descriptor * -ProtoBufMap::getDescriptorType(uint16_t message_type) const { - if (m_descriptor_map.count(message_type)) { - return m_descriptor_map.at(message_type); - } else { - EXCEPT_PARAM(EC_PROTO_INIT, - "Descriptor type mapping failed, unregistered message type " - << message_type); - } +std::string ProtoBufMap::toString(uint16_t field_number) const { + const auto* desc = getDescriptorType(field_number); + if (desc) { + return desc->name(); + } + return "Unknown(" + std::to_string(field_number) + ")"; } -uint8_t ProtoBufMap::getProtocolID(MessageProtocol msg_protocol) const { - if (m_protocol_ids.count(msg_protocol)) { - return static_cast(m_protocol_ids.at(msg_protocol)); - } else { - EXCEPT( - 1, - "Unsupported MessageProtocol specified, cannot map to a protocol id"); - } +bool ProtoBufMap::exists(uint16_t field_number) const { + return getDescriptorType(field_number) != nullptr; } + + } // namespace SDMS diff --git a/common/source/Util.cpp b/common/source/Util.cpp index 1170e265d..419f8b5f7 100644 --- a/common/source/Util.cpp +++ b/common/source/Util.cpp @@ -1,6 +1,5 @@ // Local public includes #include "common/Util.hpp" -#include "common/SDMS.pb.h" #include "common/TraceException.hpp" // Third party includes diff --git a/common/source/communicators/ZeroMQCommunicator.cpp b/common/source/communicators/ZeroMQCommunicator.cpp index c2d402fe4..f5f3e2e45 100644 --- a/common/source/communicators/ZeroMQCommunicator.cpp +++ b/common/source/communicators/ZeroMQCommunicator.cpp @@ -336,13 +336,9 @@ void receiveFrame(IMessage &msg, void *incoming_zmq_socket, void sendFrame(IMessage &msg, void *outgoing_zmq_socket) { zmq_msg_t zmq_msg; zmq_msg_init_size(&zmq_msg, 8); - // WARNING do not call zmq_msg_init it is called in copy method - // this is a code smell and should be fixed in the future FrameFactory factory; Frame frame = factory.create(msg); FrameConverter converter; - // Will call zmq_msg_init and create space for 8 bytes - // Convert host binary to network (endian) format converter.copy(FrameConverter::CopyDirection::FROM_FRAME, zmq_msg, frame); int number_of_bytes = @@ -374,7 +370,6 @@ void receiveBody(IMessage &msg, Buffer &buffer, ProtoBufFactory &factory, << frame_size << " received " << number_of_bytes); } - // Only set payload if there is a payload if (frame_size > 0) { if (zmq_msg_size(&zmq_msg) != frame_size) { @@ -384,20 +379,20 @@ void receiveBody(IMessage &msg, Buffer &buffer, ProtoBufFactory &factory, << ", got: " << zmq_msg_size(&zmq_msg)); } + // Deserialize wire bytes into Envelope copyToBuffer(buffer, zmq_msg_data(&zmq_msg), frame_size); - uint16_t desc_type = std::get(msg.get(MSG_TYPE)); - std::unique_ptr payload = factory.create(desc_type); - if (payload == nullptr) { - zmq_msg_close(&zmq_msg); - EXCEPT(1, "No payload was assigned something is wrong"); - } - copyFromBuffer(payload.get(), buffer); - msg.setPayload(std::move(payload)); - } else { + SDMS::Envelope envelope; + copyFromBuffer(&envelope, buffer); + + // Extract inner message from Envelope + ProtoBufMap proto_map; + std::unique_ptr inner = + proto_map.unwrapFromEnvelope(envelope); - // Even if the frame has 0 size it does not mean it is not a legitimate - // message some messages have zero size but are still legitimate such - // as a NACK + msg.setPayload(std::move(inner)); + + } else { + // Zero-size: no envelope on the wire. Frame msg_type identifies it. uint16_t msg_type = std::get(msg.get(MSG_TYPE)); ProtoBufMap proto_map; @@ -429,29 +424,31 @@ void sendBody(IMessage &msg, Buffer &buffer, void *outgoing_zmq_socket) { uint32_t frame_size = std::get(msg.get(FRAME_SIZE)); if (frame_size > 0) { zmq_msg_t zmq_msg; - zmq_msg_init_size(&zmq_msg, frame_size); - proto::Message *payload; + proto::Message *inner = nullptr; try { - payload = std::get(msg.getPayload()); + inner = std::get(msg.getPayload()); } catch (std::bad_variant_access const &ex) { EXCEPT(1, ex.what()); } - if (payload) { - auto size = payload->ByteSizeLong(); + if (inner) { + ProtoBufMap proto_map; + auto envelope = proto_map.wrapInEnvelope(*inner); + + auto size = envelope->ByteSizeLong(); if (size != frame_size) { zmq_msg_close(&zmq_msg); - EXCEPT_PARAM(1, "Frame and message sizes differ message size: " + EXCEPT_PARAM(1, "Frame and envelope sizes differ. Envelope size: " << size << " frame size: " << frame_size); } - copyToBuffer(buffer, payload, size); + copyToBuffer(buffer, envelope.get(), size); copyFromBuffer(zmq_msg_data(&zmq_msg), buffer); int number_of_bytes = 0; - if ((number_of_bytes = zmq_msg_send(&zmq_msg, outgoing_zmq_socket, 0)) < - 0) { + if ((number_of_bytes = + zmq_msg_send(&zmq_msg, outgoing_zmq_socket, 0)) < 0) { zmq_msg_close(&zmq_msg); EXCEPT(1, "zmq_msg_send (body) failed."); } @@ -462,7 +459,6 @@ void sendBody(IMessage &msg, Buffer &buffer, void *outgoing_zmq_socket) { zmq_msg_close(&zmq_msg); } else { - sendFinalDelimiter(outgoing_zmq_socket); } } else { diff --git a/common/source/communicators/ZeroMQCommunicator.hpp b/common/source/communicators/ZeroMQCommunicator.hpp index ac131d5b1..8c8130608 100644 --- a/common/source/communicators/ZeroMQCommunicator.hpp +++ b/common/source/communicators/ZeroMQCommunicator.hpp @@ -4,9 +4,9 @@ // Local private includes #include "../Buffer.hpp" +#include "../ProtoBufFactory.hpp" // Local public includes -#include "../ProtoBufFactory.hpp" #include "common/DynaLog.hpp" #include "common/ICommunicator.hpp" #include "common/IMessage.hpp" diff --git a/common/source/communicators/ZeroMQCommunicatorSecure.cpp b/common/source/communicators/ZeroMQCommunicatorSecure.cpp index b6e59b871..4a5608e4e 100644 --- a/common/source/communicators/ZeroMQCommunicatorSecure.cpp +++ b/common/source/communicators/ZeroMQCommunicatorSecure.cpp @@ -1,6 +1,5 @@ // Local private includes #include "ZeroMQCommunicatorSecure.hpp" -#include "ProtoBufFactory.hpp" #include "support/zeromq/Context.hpp" #include "support/zeromq/SocketTranslator.hpp" diff --git a/common/source/messages/GoogleProtoMessage.cpp b/common/source/messages/GoogleProtoMessage.cpp index d7daafc70..fd237adee 100644 --- a/common/source/messages/GoogleProtoMessage.cpp +++ b/common/source/messages/GoogleProtoMessage.cpp @@ -21,8 +21,6 @@ namespace SDMS { GoogleProtoMessage::GoogleProtoMessage() { m_dyn_attributes[constants::message::google::FRAME_SIZE] = (uint32_t)0; - m_dyn_attributes[constants::message::google::PROTO_ID] = (uint8_t)0; - m_dyn_attributes[constants::message::google::MSG_ID] = (uint8_t)0; m_dyn_attributes[constants::message::google::MSG_TYPE] = (uint16_t)0; m_dyn_attributes[constants::message::google::CONTEXT] = (uint16_t)0; @@ -42,25 +40,30 @@ bool GoogleProtoMessage::exists(const std::string &attribute_type) const { /** * Setters **/ + void GoogleProtoMessage::setPayload( std::variant, std::string> payload) { if (std::holds_alternative>( payload)) { - // Because the frame depends on the payload, the frame needs to be created - // here + auto &inner = + std::get>(payload); + + // msg_type come from the inner message's + // envelope field number — this is already correct FrameFactory frame_factory; - Frame frame = frame_factory.create( - *std::get>(payload), - m_proto_map); + Frame frame = frame_factory.create(*inner, m_proto_map); + + // FRAME_SIZE must reflect what goes on the wire: the Envelope, + // not the inner message + auto temp_envelope = m_proto_map.wrapInEnvelope(*inner); + frame.size = static_cast(temp_envelope->ByteSizeLong()); + m_dyn_attributes[constants::message::google::FRAME_SIZE] = frame.size; - m_dyn_attributes[constants::message::google::PROTO_ID] = frame.proto_id; - m_dyn_attributes[constants::message::google::MSG_ID] = frame.msg_id; - m_dyn_attributes[constants::message::google::MSG_TYPE] = frame.getMsgType(); - // Do not overload the context because this is not associated with the - // message payload but with the response - m_payload = std::move( - std::get>(payload)); + m_dyn_attributes[constants::message::google::MSG_TYPE] = frame.msg_type; + + // Store the INNER message, not the envelope + m_payload = std::move(inner); } else { EXCEPT(1, "Attempt to add unsupported payload to GoogleProtoMessage."); } diff --git a/common/source/servers/Proxy.cpp b/common/source/servers/Proxy.cpp index e7b4cc5a7..000229446 100644 --- a/common/source/servers/Proxy.cpp +++ b/common/source/servers/Proxy.cpp @@ -7,10 +7,6 @@ #include "common/ICommunicator.hpp" #include "common/TraceException.hpp" -// Proto file includes -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" - // Standard includes #include #include diff --git a/common/tests/security/tcp_secure/test_tcp_secure_client.cpp b/common/tests/security/tcp_secure/test_tcp_secure_client.cpp index 2568c0a43..1f2a9f009 100644 --- a/common/tests/security/tcp_secure/test_tcp_secure_client.cpp +++ b/common/tests/security/tcp_secure/test_tcp_secure_client.cpp @@ -8,9 +8,7 @@ #include "common/TraceException.hpp" // Proto file includes -#include "common/SDMS.pb.h" -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" +#include "common/envelope.pb.h" // Third party includes #include @@ -155,11 +153,12 @@ int main(int a_argc, char **a_argv) { uint16_t context = 0; msg_from_client->set(constants::message::google::CONTEXT, context); - auto auth_by_token_req = - std::make_unique(); + auto envelope = + std::make_unique(); + auto auth_by_token_req = envelope->mutable_authenticate_by_token_request(); auth_by_token_req->set_token(token); - msg_from_client->setPayload(std::move(auth_by_token_req)); + msg_from_client->setPayload(std::move(envelope)); client->send(*msg_from_client); } std::cout << client->id() << " Message sent..." << std::endl; @@ -192,26 +191,34 @@ int main(int a_argc, char **a_argv) { auto response_google_msg_ptr = std::get<::google::protobuf::Message *>( response_client.message->getPayload()); - Anon::NackReply *response_payload = - dynamic_cast(response_google_msg_ptr); + auto envelope = + dynamic_cast(response_google_msg_ptr); std::cout << client->id() << " Validating message content received from server..." << std::endl; - if (response_payload->err_code() != ErrorCode::ID_SERVICE_ERROR) { + // Changed: BOOST_CHECK -> if check (this isn't a Boost test file) + if (!envelope || !envelope->has_nack_reply()) { + std::cout << client->id() << " FAILED" << std::endl; + EXCEPT_PARAM(1, "TCP Secure test failed - no nack_reply in envelope"); + } + + // Changed: SERVICE_ERROR -> ID_SERVICE_ERROR + if (envelope->nack_reply().err_code() != ErrorCode::SERVICE_ERROR) { std::cout << client->id() << " FAILED" << std::endl; EXCEPT_PARAM(1, "TCP Secure test failed unexpected ErrorCode returned by " "NACK reply from server, client failing."); } - if (response_payload->err_msg().compare(error_msg) != 0) { + + // Changed: response_payload -> envelope->nack_reply() + if (envelope->nack_reply().err_msg().compare(error_msg) != 0) { std::cout << client->id() << " FAILED" << std::endl; EXCEPT_PARAM(1, "TCP Secure test failed unexpected error message " "returned from server provided: " - << response_payload->err_msg() + << envelope->nack_reply().err_msg() << " Expected: " << error_msg); } } - std::cout << client->id() << " SUCCESS" << std::endl; return 0; } diff --git a/common/tests/security/tcp_secure/test_tcp_secure_server.cpp b/common/tests/security/tcp_secure/test_tcp_secure_server.cpp index e5a52e8a7..566200ab0 100644 --- a/common/tests/security/tcp_secure/test_tcp_secure_server.cpp +++ b/common/tests/security/tcp_secure/test_tcp_secure_server.cpp @@ -9,9 +9,7 @@ #include "common/TraceException.hpp" // Proto file includes -#include "common/SDMS.pb.h" -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" +#include "common/envelope.pb.h" // Third party includes #include @@ -180,16 +178,23 @@ int main(int a_argc, char **a_argv) { } std::cout << std::endl; + // Changed: cast to Envelope, access inner message auto google_msg_ptr = std::get<::google::protobuf::Message *>(response.message->getPayload()); - Anon::AuthenticateByTokenRequest *payload = - dynamic_cast(google_msg_ptr); + auto envelope = + dynamic_cast(google_msg_ptr); - if (payload->token().compare(token) != 0) { + if (!envelope || !envelope->has_authenticate_by_token_request()) { + std::cout << server->id() << " FAILED" << std::endl; + EXCEPT_PARAM(1, "Error detected in server, expected authenticate_by_token_request not found in envelope"); + } + + if (envelope->authenticate_by_token_request().token().compare(token) != 0) { std::cout << server->id() << " FAILED" << std::endl; EXCEPT_PARAM(1, "Error detected in server, expected message content is " "incorrect. Actual token value is " - << payload->token() << " Expected token value is " + << envelope->authenticate_by_token_request().token() + << " Expected token value is " << token); } @@ -197,13 +202,14 @@ int main(int a_argc, char **a_argv) { // Server send a reply auto nack_msg = msg_factory.createResponseEnvelope(*response.message); - // Create Google proto message - auto nack_reply = std::make_unique(); - nack_reply->set_err_code(ErrorCode::ID_SERVICE_ERROR); + // Changed: wrap in envelope + auto nack_envelope = std::make_unique(); + auto* nack_reply = nack_envelope->mutable_nack_reply(); + nack_reply->set_err_code(ErrorCode::SERVICE_ERROR); nack_reply->set_err_msg(error_msg); // Place google proto message in IMessage - nack_msg->setPayload(std::move(nack_reply)); + nack_msg->setPayload(std::move(nack_envelope)); server->send(*nack_msg); } diff --git a/common/tests/unit/CMakeLists.txt b/common/tests/unit/CMakeLists.txt index 60f169546..78a65fba4 100644 --- a/common/tests/unit/CMakeLists.txt +++ b/common/tests/unit/CMakeLists.txt @@ -7,7 +7,6 @@ foreach(PROG test_Value test_MessageFactory test_OperatorFactory - test_ProtoBufFactory test_ProtoBufMap test_Proxy test_ProxyBasicZMQ diff --git a/common/tests/unit/test_Buffer.cpp b/common/tests/unit/test_Buffer.cpp index e258b2b30..cd22284ba 100644 --- a/common/tests/unit/test_Buffer.cpp +++ b/common/tests/unit/test_Buffer.cpp @@ -113,7 +113,7 @@ BOOST_AUTO_TEST_CASE(testing_Buffer_googleprotobuf_repo_create_request) { ProtoBufMap proto_map; ProtoBufFactory proto_factory; - SDMS::Auth::RepoCreateRequest repo_create_req; + SDMS::RepoCreateRequest repo_create_req; const std::string id = "bonanza"; const std::string title = "All you can eat."; @@ -158,7 +158,7 @@ BOOST_AUTO_TEST_CASE(testing_Buffer_googleprotobuf_repo_create_request) { copyFromBuffer(new_msg.get(), buffer); auto new_repo_create_req = - dynamic_cast(new_msg.get()); + dynamic_cast(new_msg.get()); BOOST_CHECK(new_repo_create_req->id().compare(id) == 0); BOOST_CHECK(new_repo_create_req->title().compare(title) == 0); @@ -177,7 +177,7 @@ BOOST_AUTO_TEST_CASE(testing_Buffer_googleprotobuf) { ProtoBufMap proto_map; ProtoBufFactory proto_factory; - SDMS::Anon::AuthenticateByPasswordRequest auth_by_pass_req; + SDMS::AuthenticateByPasswordRequest auth_by_pass_req; const std::string uid = "tonystark"; const std::string password = "skeleton_key"; @@ -203,7 +203,7 @@ BOOST_AUTO_TEST_CASE(testing_Buffer_googleprotobuf) { copyFromBuffer(new_msg.get(), buffer); auto new_auth_by_pass_req = - dynamic_cast(new_msg.get()); + dynamic_cast(new_msg.get()); BOOST_CHECK(new_auth_by_pass_req->password().compare(password) == 0); BOOST_CHECK(new_auth_by_pass_req->uid().compare(uid) == 0); @@ -214,7 +214,7 @@ BOOST_AUTO_TEST_CASE(testing_Buffer_googleprotobuf_empty_payload) { ProtoBufMap proto_map; ProtoBufFactory proto_factory; - SDMS::Anon::AckReply ack_reply; + SDMS::AckReply ack_reply; Buffer buffer; std::cout << "Calling Copy to buffer" << std::endl; @@ -232,6 +232,6 @@ BOOST_AUTO_TEST_CASE(testing_Buffer_googleprotobuf_empty_payload) { copyFromBuffer(new_msg.get(), buffer); auto new_auth_by_pass_req = - dynamic_cast(new_msg.get()); + dynamic_cast(new_msg.get()); } BOOST_AUTO_TEST_SUITE_END() diff --git a/common/tests/unit/test_CommunicatorFactory.cpp b/common/tests/unit/test_CommunicatorFactory.cpp index a5c6f3a77..3ebf1dd56 100644 --- a/common/tests/unit/test_CommunicatorFactory.cpp +++ b/common/tests/unit/test_CommunicatorFactory.cpp @@ -12,8 +12,7 @@ #include "common/ProtocolTypes.hpp" // Proto file includes -#include "common/SDMS.pb.h" -#include "common/SDMS_Anon.pb.h" +#include "common/envelope.pb.h" // Standard includes #include @@ -139,7 +138,7 @@ BOOST_AUTO_TEST_CASE(testing_CommunicatorFactory) { msg_from_client->set(MessageAttribute::KEY, key); auto auth_by_token_req = - std::make_unique(); + std::make_unique(); auth_by_token_req->set_token(token); msg_from_client->setPayload(std::move(auth_by_token_req)); @@ -178,8 +177,8 @@ BOOST_AUTO_TEST_CASE(testing_CommunicatorFactory) { auto google_msg_ptr = std::get<::google::protobuf::Message *>(response.message->getPayload()); - Anon::AuthenticateByTokenRequest *payload = - dynamic_cast(google_msg_ptr); + SDMS::AuthenticateByTokenRequest *payload = + dynamic_cast(google_msg_ptr); BOOST_CHECK(payload->token().compare(token) == 0); } @@ -269,7 +268,7 @@ BOOST_AUTO_TEST_CASE(testing_CommunicatorFactorySecure) { msg_from_client->set(MessageAttribute::KEY, key); auto auth_by_token_req = - std::make_unique(); + std::make_unique(); auth_by_token_req->set_token(token); msg_from_client->setPayload(std::move(auth_by_token_req)); @@ -308,8 +307,8 @@ BOOST_AUTO_TEST_CASE(testing_CommunicatorFactorySecure) { auto google_msg_ptr = std::get<::google::protobuf::Message *>(response.message->getPayload()); - Anon::AuthenticateByTokenRequest *payload = - dynamic_cast(google_msg_ptr); + SDMS::AuthenticateByTokenRequest *payload = + dynamic_cast(google_msg_ptr); BOOST_CHECK(payload->token().compare(token) == 0); } @@ -320,7 +319,7 @@ BOOST_AUTO_TEST_CASE(testing_CommunicatorFactorySecure) { msg_from_client->set(MessageAttribute::ID, id); msg_from_client->set(MessageAttribute::KEY, key); - auto ack_reply = std::make_unique(); + auto ack_reply = std::make_unique(); msg_from_client->setPayload(std::move(ack_reply)); @@ -358,7 +357,7 @@ BOOST_AUTO_TEST_CASE(testing_CommunicatorFactorySecure) { auto google_msg_ptr = std::get<::google::protobuf::Message *>(response.message->getPayload()); - dynamic_cast(google_msg_ptr); + dynamic_cast(google_msg_ptr); } } @@ -454,7 +453,7 @@ BOOST_AUTO_TEST_CASE(testing_CommunicatorFactoryReply) { msg_from_client->set(MessageAttribute::ID, id); msg_from_client->set(MessageAttribute::KEY, key); - auto auth_by_token_req = std::make_unique(); + auto auth_by_token_req = std::make_unique(); auth_by_token_req->set_token(token); msg_from_client->setPayload(std::move(auth_by_token_req)); @@ -494,8 +493,8 @@ BOOST_AUTO_TEST_CASE(testing_CommunicatorFactoryReply) { auto google_msg_ptr = std::get<::google::protobuf::Message *>(response.message->getPayload()); - Anon::AuthenticateByTokenRequest *payload = - dynamic_cast(google_msg_ptr); + SDMS::AuthenticateByTokenRequest *payload = + dynamic_cast(google_msg_ptr); BOOST_CHECK(payload->token().compare(token) == 0); // Server receive @@ -504,8 +503,8 @@ BOOST_AUTO_TEST_CASE(testing_CommunicatorFactoryReply) { auto nack_msg = msg_factory.createResponseEnvelope(*response.message); // Create Google proto message - auto nack_reply = std::make_unique(); - nack_reply->set_err_code(ErrorCode::ID_SERVICE_ERROR); + auto nack_reply = std::make_unique(); + nack_reply->set_err_code(ErrorCode::SERVICE_ERROR); std::string error_msg = "testing_no_error"; nack_reply->set_err_msg(error_msg); @@ -530,10 +529,10 @@ BOOST_AUTO_TEST_CASE(testing_CommunicatorFactoryReply) { auto response_google_msg_ptr = std::get<::google::protobuf::Message *>( response_client.message->getPayload()); - Anon::NackReply *response_payload = - dynamic_cast(response_google_msg_ptr); + SDMS::NackReply *response_payload = + dynamic_cast(response_google_msg_ptr); - BOOST_CHECK(response_payload->err_code() == ErrorCode::ID_SERVICE_ERROR); + BOOST_CHECK(response_payload->err_code() == ErrorCode::SERVICE_ERROR); BOOST_CHECK(response_payload->err_msg().compare(error_msg) == 0); // Client receive diff --git a/common/tests/unit/test_Frame.cpp b/common/tests/unit/test_Frame.cpp index 2ee80b09e..40493c41f 100644 --- a/common/tests/unit/test_Frame.cpp +++ b/common/tests/unit/test_Frame.cpp @@ -12,7 +12,7 @@ #include "common/ProtoBufMap.hpp" // Proto file includes -#include "common/SDMS_Anon.pb.h" +#include "common/envelope.pb.h" // Standard includes #include @@ -36,25 +36,21 @@ BOOST_AUTO_TEST_CASE(testing_Frame) { Frame frame; BOOST_CHECK(frame.size == 0); - BOOST_CHECK(frame.proto_id == 0); - BOOST_CHECK(frame.msg_id == 0); + BOOST_CHECK(frame.msg_type == 0); BOOST_CHECK(frame.context == 0); frame.size = 4; - frame.proto_id = 3; - frame.msg_id = 1; + frame.msg_type = 1; frame.context = 2; BOOST_CHECK(frame.size == 4); - BOOST_CHECK(frame.proto_id == 3); - BOOST_CHECK(frame.msg_id == 1); + BOOST_CHECK(frame.msg_type == 1); BOOST_CHECK(frame.context == 2); frame.clear(); BOOST_CHECK(frame.size == 0); - BOOST_CHECK(frame.proto_id == 0); - BOOST_CHECK(frame.msg_id == 0); + BOOST_CHECK(frame.msg_type == 0); BOOST_CHECK(frame.context == 0); } @@ -62,8 +58,7 @@ BOOST_AUTO_TEST_CASE(testing_FrameConverter) { Frame frame; frame.size = 4; - frame.proto_id = 3; - frame.msg_id = 1; + frame.msg_type = 1; frame.context = 2; FrameConverter converter; @@ -76,8 +71,7 @@ BOOST_AUTO_TEST_CASE(testing_FrameConverter) { converter.copy(FrameConverter::CopyDirection::TO_FRAME, zmq_msg, frame_new); BOOST_CHECK(frame_new.size == 4); - BOOST_CHECK(frame_new.proto_id == 3); - BOOST_CHECK(frame_new.msg_id == 1); + BOOST_CHECK(frame_new.msg_type == 1); BOOST_CHECK(frame_new.context == 2); } @@ -87,7 +81,7 @@ BOOST_AUTO_TEST_CASE(testing_FrameFactory) { ProtoBufMap proto_map; - SDMS::Anon::AuthenticateByPasswordRequest auth_by_pass_req; + SDMS::AuthenticateByPasswordRequest auth_by_pass_req; const std::string uid = "tonystark"; const std::string password = "skeleton_key"; auth_by_pass_req.set_uid(uid); @@ -99,7 +93,7 @@ BOOST_AUTO_TEST_CASE(testing_FrameFactory) { Frame frame = factory.create(auth_by_pass_req, proto_map); BOOST_CHECK(frame.size == expected_size); - BOOST_CHECK(frame.getMsgType() == expected_msg_type); + BOOST_CHECK(frame.msg_type == expected_msg_type); } BOOST_AUTO_TEST_CASE(testing_FrameFactory_EmptyPayload) { @@ -111,9 +105,7 @@ BOOST_AUTO_TEST_CASE(testing_FrameFactory_EmptyPayload) { Frame frame = factory.create(*msg); BOOST_CHECK(frame.size == 0); - BOOST_CHECK(frame.proto_id == 0); - BOOST_CHECK(frame.msg_id == 0); - BOOST_CHECK(frame.getMsgType() == 0); + BOOST_CHECK(frame.msg_type == 0); auto msg_new = msg_factory.create(MessageType::GOOGLE_PROTOCOL_BUFFER); FrameConverter converter; @@ -122,9 +114,7 @@ BOOST_AUTO_TEST_CASE(testing_FrameFactory_EmptyPayload) { frame_new); BOOST_CHECK(frame_new.size == 0); - BOOST_CHECK(frame_new.proto_id == 0); - BOOST_CHECK(frame_new.msg_id == 0); - BOOST_CHECK(frame_new.getMsgType() == 0); + BOOST_CHECK(frame_new.msg_type == 0); } BOOST_AUTO_TEST_CASE(testing_FrameFactory2) { @@ -134,30 +124,21 @@ BOOST_AUTO_TEST_CASE(testing_FrameFactory2) { ProtoBufMap proto_map; auto msg = msg_factory.create(MessageType::GOOGLE_PROTOCOL_BUFFER); - auto auth_by_token_req = std::make_unique(); + auto auth_by_token_req = std::make_unique(); auth_by_token_req->set_token("magic_token"); + // Reference frame should reflect envelope (wire) size + auto temp_envelope = proto_map.wrapInEnvelope(*auth_by_token_req); Frame frame_from_protocol_msg = frame_factory.create(*auth_by_token_req, proto_map); + frame_from_protocol_msg.size = + static_cast(temp_envelope->ByteSizeLong()); msg->setPayload(std::move(auth_by_token_req)); Frame frame_IMessage = frame_factory.create(*msg); - std::cout << "frame_generated_from_IMessage" << std::endl; - std::cout << frame_IMessage.size << std::endl; - std::cout << frame_IMessage.proto_id << std::endl; - std::cout << frame_IMessage.msg_id << std::endl; - std::cout << frame_IMessage.context << std::endl; - std::cout << "frame_generated_from_protocol_msg" << std::endl; - std::cout << frame_from_protocol_msg.size << std::endl; - std::cout << frame_from_protocol_msg.proto_id << std::endl; - std::cout << frame_from_protocol_msg.msg_id << std::endl; - std::cout << frame_from_protocol_msg.context << std::endl; - BOOST_CHECK(frame_IMessage.size == frame_from_protocol_msg.size); - BOOST_CHECK(frame_IMessage.proto_id == frame_from_protocol_msg.proto_id); - BOOST_CHECK(frame_IMessage.msg_id == frame_from_protocol_msg.msg_id); BOOST_CHECK(frame_IMessage.context == frame_from_protocol_msg.context); } diff --git a/common/tests/unit/test_MessageFactory.cpp b/common/tests/unit/test_MessageFactory.cpp index 3df758e7e..65130756b 100644 --- a/common/tests/unit/test_MessageFactory.cpp +++ b/common/tests/unit/test_MessageFactory.cpp @@ -9,7 +9,7 @@ #include "common/ProtoBufMap.hpp" // Proto file includes -#include "common/SDMS_Anon.pb.h" +#include "common/envelope.pb.h" using namespace SDMS; @@ -39,7 +39,7 @@ BOOST_AUTO_TEST_CASE(testing_MessageFactory) { message->set(MessageAttribute::KEY, key); message->set(MessageAttribute::STATE, MessageState::REQUEST); message->set(constants::message::google::CONTEXT, context); - auto auth_by_token_req = std::make_unique(); + auto auth_by_token_req = std::make_unique(); std::string token = "golden_chest"; auth_by_token_req->set_token(token); diff --git a/common/tests/unit/test_ProtoBufFactory.cpp b/common/tests/unit/test_ProtoBufFactory.cpp deleted file mode 100644 index a3d0c4910..000000000 --- a/common/tests/unit/test_ProtoBufFactory.cpp +++ /dev/null @@ -1,59 +0,0 @@ -#define BOOST_TEST_MAIN - -#define BOOST_TEST_MODULE protobuffactory -#include -#include - -// Local private includes -#include "ProtoBufFactory.hpp" - -// Local public includes -#include "common/ProtoBufMap.hpp" - -// Proto file includes -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" - -// Standard includes -#include - -using namespace SDMS; - -struct GlobalProtobufTeardown { - ~GlobalProtobufTeardown() { - // This is the teardown function that runs once at the end - google::protobuf::ShutdownProtobufLibrary(); - } -}; - -// Declare a global fixture instance -BOOST_GLOBAL_FIXTURE(GlobalProtobufTeardown); - -BOOST_AUTO_TEST_SUITE(ProtoBufFactoryTest) - -BOOST_AUTO_TEST_CASE(testing_ProtoBufFactory) { - - ProtoBufMap proto_map; - ProtoBufFactory proto_factory; - - SDMS::Anon::VersionRequest version_request; - uint16_t msg_type = proto_map.getMessageType(version_request); - auto msg = proto_factory.create(msg_type); - BOOST_CHECK(msg_type == proto_map.getMessageType(*msg)); -} - -BOOST_AUTO_TEST_CASE(testing_ProtoBufFactory2) { - - ProtoBufMap proto_map; - ProtoBufFactory proto_factory; - - SDMS::Anon::NackReply nack_reply; - uint16_t msg_type = proto_map.getMessageType(nack_reply); - auto msg = proto_factory.create(msg_type); - BOOST_CHECK(msg_type == proto_map.getMessageType(*msg)); - - auto nack_reply_new = dynamic_cast(*msg); - - nack_reply_new.set_err_msg("This is working"); -} -BOOST_AUTO_TEST_SUITE_END() diff --git a/common/tests/unit/test_ProtoBufMap.cpp b/common/tests/unit/test_ProtoBufMap.cpp index 21c12c2e0..d12f83edf 100644 --- a/common/tests/unit/test_ProtoBufMap.cpp +++ b/common/tests/unit/test_ProtoBufMap.cpp @@ -27,21 +27,11 @@ BOOST_GLOBAL_FIXTURE(GlobalProtobufTeardown); BOOST_AUTO_TEST_SUITE(ProtoBufFactoryTest) -BOOST_AUTO_TEST_CASE(testing_ProtoBufFactory_ProtocolID) { - ProtoBufMap proto_map; - - uint8_t proto_id = - proto_map.getProtocolID(MessageProtocol::GOOGLE_ANONONYMOUS); - BOOST_CHECK(proto_id == 1); - proto_id = proto_map.getProtocolID(MessageProtocol::GOOGLE_AUTHORIZED); - BOOST_CHECK(proto_id == 2); -} - BOOST_AUTO_TEST_CASE(testing_ProtoBufFactory) { ProtoBufMap proto_map; ProtoBufFactory proto_factory; - SDMS::Anon::VersionRequest version_request; + SDMS::VersionRequest version_request; uint16_t msg_type = proto_map.getMessageType(version_request); auto msg = proto_factory.create(msg_type); std::cout << "VersionRequest msg_type of VersionRequest, " << msg_type @@ -50,7 +40,7 @@ BOOST_AUTO_TEST_CASE(testing_ProtoBufFactory) { BOOST_AUTO_TEST_CASE(testing_ProtoBufMap_toString) { ProtoBufMap proto_map; - SDMS::Anon::VersionRequest version_request; + SDMS::VersionRequest version_request; uint16_t msg_type = proto_map.getMessageType(version_request); auto name = proto_map.toString(msg_type); BOOST_CHECK(name.compare("VersionRequest") == 0); diff --git a/common/tests/unit/test_Proxy.cpp b/common/tests/unit/test_Proxy.cpp index 5156756d6..b4ab4c4fe 100644 --- a/common/tests/unit/test_Proxy.cpp +++ b/common/tests/unit/test_Proxy.cpp @@ -19,8 +19,7 @@ #include "common/SocketOptions.hpp" // Proto file includes -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" +#include "common/envelope.pb.h" // Standard includes #include @@ -229,7 +228,7 @@ BOOST_AUTO_TEST_CASE(testing_Proxy) { std::move(incoming_operators), log_context_proxy); std::chrono::duration duration = - std::chrono::milliseconds(100); + std::chrono::milliseconds(1000); proxy.setRunDuration(duration); proxy.run(); @@ -251,7 +250,7 @@ BOOST_AUTO_TEST_CASE(testing_Proxy) { msg_from_client->set(MessageAttribute::KEY, key); msg_from_client->set(constants::message::google::CONTEXT, context); auto auth_by_token_req = - std::make_unique(); + std::make_unique(); auth_by_token_req->set_token(token); msg_from_client->setPayload(std::move(auth_by_token_req)); client->send(*msg_from_client); @@ -449,7 +448,7 @@ BOOST_AUTO_TEST_CASE(testing_Proxy2) { log_context_proxy_middle); std::chrono::duration duration = - std::chrono::milliseconds(30); + std::chrono::milliseconds(400); proxy.setRunDuration(duration); proxy.run(); @@ -468,7 +467,7 @@ BOOST_AUTO_TEST_CASE(testing_Proxy2) { msg_from_client->set(MessageAttribute::ID, id); msg_from_client->set(MessageAttribute::KEY, key); auto auth_by_token_req = - std::make_unique(); + std::make_unique(); auth_by_token_req->set_token(token); msg_from_client->setPayload(std::move(auth_by_token_req)); client->send(*msg_from_client); @@ -506,7 +505,7 @@ BOOST_AUTO_TEST_CASE(testing_Proxy2) { auto google_msg = std::get<::google::protobuf::Message *>(response.message->getPayload()); auto new_auth_by_pass_req = - dynamic_cast(google_msg); + dynamic_cast(google_msg); BOOST_CHECK(new_auth_by_pass_req->token().compare(token) == 0); @@ -766,7 +765,7 @@ BOOST_AUTO_TEST_CASE(testing_ProxyChain) { msg_factory.create(MessageType::GOOGLE_PROTOCOL_BUFFER); msg_from_client->set(MessageAttribute::ID, id); msg_from_client->set(MessageAttribute::KEY, key); - auto auth_by_token_req = std::make_unique(); + auto auth_by_token_req = std::make_unique(); auth_by_token_req->set_token(token); msg_from_client->setPayload(std::move(auth_by_token_req)); client->send(*msg_from_client); @@ -807,7 +806,7 @@ BOOST_AUTO_TEST_CASE(testing_ProxyChain) { auto google_msg = std::get<::google::protobuf::Message *>(response.message->getPayload()); auto new_auth_by_pass_req = - dynamic_cast(google_msg); + dynamic_cast(google_msg); BOOST_CHECK(new_auth_by_pass_req->token().compare(token) == 0); @@ -816,8 +815,8 @@ BOOST_AUTO_TEST_CASE(testing_ProxyChain) { // the proxy chain auto return_msg = msg_factory.createResponseEnvelope(*response.message); // We will just pass a nack reply because it is easy - auto nack_reply = std::make_unique(); - nack_reply->set_err_code(ErrorCode::ID_SERVICE_ERROR); + auto nack_reply = std::make_unique(); + nack_reply->set_err_code(ErrorCode::SERVICE_ERROR); nack_reply->set_err_msg(error_msg); // Place google proto message in IMessage @@ -834,10 +833,10 @@ BOOST_AUTO_TEST_CASE(testing_ProxyChain) { auto response_google_msg_ptr = std::get<::google::protobuf::Message *>( msg_from_server.message->getPayload()); - Anon::NackReply *response_payload = - dynamic_cast(response_google_msg_ptr); + SDMS::NackReply *response_payload = + dynamic_cast(response_google_msg_ptr); - BOOST_CHECK(response_payload->err_code() == ErrorCode::ID_SERVICE_ERROR); + BOOST_CHECK(response_payload->err_code() == ErrorCode::SERVICE_ERROR); BOOST_CHECK(response_payload->err_msg().compare(error_msg) == 0); proxy_thread->join(); @@ -1015,7 +1014,7 @@ BOOST_AUTO_TEST_CASE(testing_Proxy_with_PERSISTENT_proxy_client) { msg_from_client->set(MessageAttribute::ID, id); msg_from_client->set(MessageAttribute::KEY, key); auto auth_by_token_req = - std::make_unique(); + std::make_unique(); auth_by_token_req->set_token(token); msg_from_client->setPayload(std::move(auth_by_token_req)); client->send(*msg_from_client); @@ -1053,7 +1052,7 @@ BOOST_AUTO_TEST_CASE(testing_Proxy_with_PERSISTENT_proxy_client) { auto google_msg = std::get<::google::protobuf::Message *>(response.message->getPayload()); auto new_auth_by_pass_req = - dynamic_cast(google_msg); + dynamic_cast(google_msg); BOOST_CHECK(new_auth_by_pass_req->token().compare(token) == 0); diff --git a/common/tests/unit/test_ProxyBasicZMQ.cpp b/common/tests/unit/test_ProxyBasicZMQ.cpp index 03b2f3328..a5e319ddd 100644 --- a/common/tests/unit/test_ProxyBasicZMQ.cpp +++ b/common/tests/unit/test_ProxyBasicZMQ.cpp @@ -20,8 +20,7 @@ #include "common/SocketOptions.hpp" // Proto file includes -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" +#include "common/envelope.pb.h" // Standard includes #include @@ -219,7 +218,7 @@ BOOST_AUTO_TEST_CASE(testing_ProxyBasicZMQ) { log_context1); std::chrono::duration duration = - std::chrono::milliseconds(400); + std::chrono::milliseconds(1000); proxy.setRunDuration(duration); proxy.run(); @@ -238,19 +237,31 @@ BOOST_AUTO_TEST_CASE(testing_ProxyBasicZMQ) { msg_from_client->set(MessageAttribute::ID, id); msg_from_client->set(MessageAttribute::KEY, key); auto auth_by_token_req = - std::make_unique(); + std::make_unique(); auth_by_token_req->set_token(token); msg_from_client->setPayload(std::move(auth_by_token_req)); - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - client->send(*msg_from_client); + std::this_thread::sleep_for(std::chrono::milliseconds(250)); + //client->send(*msg_from_client); + // Client send with retry + auto end_time = std::chrono::steady_clock::now() + std::chrono::milliseconds(2000); + bool sent = false; + while (!sent && std::chrono::steady_clock::now() < end_time) { + try { + client->send(*msg_from_client); + sent = true; + } catch (...) { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } + BOOST_REQUIRE(sent); // Client send { // Server receive ICommunicator::Response response = server->receive(MessageType::GOOGLE_PROTOCOL_BUFFER); - std::chrono::duration duration = std::chrono::milliseconds(50); + std::chrono::duration duration = std::chrono::milliseconds(400); auto end_time = std::chrono::steady_clock::now() + duration; while (response.time_out and end_time > std::chrono::steady_clock::now()) { @@ -291,7 +302,7 @@ BOOST_AUTO_TEST_CASE(testing_ProxyBasicZMQ) { auto google_msg = std::get<::google::protobuf::Message *>( response.message->getPayload()); auto new_auth_by_pass_req = - dynamic_cast(google_msg); + dynamic_cast(google_msg); BOOST_CHECK(new_auth_by_pass_req->token().compare(token) == 0); } // Server receive @@ -476,7 +487,7 @@ BOOST_AUTO_TEST_CASE(testing_ProxyBasicZMQ_Reply) { msg_from_client->set(MessageAttribute::ID, id); msg_from_client->set(MessageAttribute::KEY, key); auto auth_by_token_req = - std::make_unique(); + std::make_unique(); auth_by_token_req->set_token(token); msg_from_client->setPayload(std::move(auth_by_token_req)); @@ -506,8 +517,8 @@ BOOST_AUTO_TEST_CASE(testing_ProxyBasicZMQ_Reply) { // the proxy chain auto return_msg = msg_factory.createResponseEnvelope(*response.message); // We will just pass a nack reply because it is easy - auto nack_reply = std::make_unique(); - nack_reply->set_err_code(ErrorCode::ID_SERVICE_ERROR); + auto nack_reply = std::make_unique(); + nack_reply->set_err_code(ErrorCode::SERVICE_ERROR); nack_reply->set_err_msg(error_msg); // Place google proto message in IMessage @@ -528,11 +539,11 @@ BOOST_AUTO_TEST_CASE(testing_ProxyBasicZMQ_Reply) { std::cout << __FILE__ << ":" << __LINE__ << std::endl; auto response_google_msg_ptr = std::get<::google::protobuf::Message *>( msg_from_server.message->getPayload()); - Anon::NackReply *response_payload = - dynamic_cast(response_google_msg_ptr); + SDMS::NackReply *response_payload = + dynamic_cast(response_google_msg_ptr); std::cout << __FILE__ << ":" << __LINE__ << std::endl; - BOOST_CHECK(response_payload->err_code() == ErrorCode::ID_SERVICE_ERROR); + BOOST_CHECK(response_payload->err_code() == ErrorCode::SERVICE_ERROR); BOOST_CHECK(response_payload->err_msg().compare(error_msg) == 0); std::cout << __FILE__ << ":" << __LINE__ << std::endl; @@ -742,7 +753,7 @@ BOOST_AUTO_TEST_CASE(testing_ProxyBasicZMQ_TCPServer_Reply) { msg_from_client->set(MessageAttribute::KEY, key); msg_from_client->set(constants::message::google::CONTEXT, context); auto auth_by_token_req = - std::make_unique(); + std::make_unique(); auth_by_token_req->set_token(token); msg_from_client->setPayload(std::move(auth_by_token_req)); @@ -785,8 +796,8 @@ BOOST_AUTO_TEST_CASE(testing_ProxyBasicZMQ_TCPServer_Reply) { // the proxy chain auto return_msg = msg_factory.createResponseEnvelope(*response.message); // We will just pass a nack reply because it is easy - auto nack_reply = std::make_unique(); - nack_reply->set_err_code(ErrorCode::ID_SERVICE_ERROR); + auto nack_reply = std::make_unique(); + nack_reply->set_err_code(ErrorCode::SERVICE_ERROR); nack_reply->set_err_msg(error_msg); // Place google proto message in IMessage @@ -812,11 +823,11 @@ BOOST_AUTO_TEST_CASE(testing_ProxyBasicZMQ_TCPServer_Reply) { std::cout << __FILE__ << ":" << __LINE__ << std::endl; auto response_google_msg_ptr = std::get<::google::protobuf::Message *>( msg_from_server.message->getPayload()); - Anon::NackReply *response_payload = - dynamic_cast(response_google_msg_ptr); + SDMS::NackReply *response_payload = + dynamic_cast(response_google_msg_ptr); std::cout << __FILE__ << ":" << __LINE__ << std::endl; - BOOST_CHECK(response_payload->err_code() == ErrorCode::ID_SERVICE_ERROR); + BOOST_CHECK(response_payload->err_code() == ErrorCode::SERVICE_ERROR); BOOST_CHECK(response_payload->err_msg().compare(error_msg) == 0); std::cout << __FILE__ << ":" << __LINE__ << std::endl; diff --git a/core/server/ClientWorker.cpp b/core/server/ClientWorker.cpp index be85bae71..5691f73df 100644 --- a/core/server/ClientWorker.cpp +++ b/core/server/ClientWorker.cpp @@ -1,4 +1,3 @@ - // Local DataFed includes #include "ClientWorker.hpp" #include "TaskMgr.hpp" @@ -14,10 +13,7 @@ #include "common/libjson.hpp" // Proto files -#include "common/SDMS.pb.h" -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" -#include "common/Version.pb.h" +#include "common/envelope.pb.h" // Third party includes #include @@ -30,9 +26,6 @@ using namespace std; namespace SDMS { -using namespace SDMS::Anon; -using namespace SDMS::Auth; - namespace Core { map ClientWorker::m_msg_handlers; @@ -81,20 +74,19 @@ void ClientWorker::wait() { } } -#define SET_MSG_HANDLER(proto_id, msg, func) \ - m_msg_handlers[m_msg_mapper->getMessageType(proto_id, #msg)] = func -#define SET_MSG_HANDLER_DB(proto_id, rq, rp, func) \ - m_msg_handlers[m_msg_mapper->getMessageType(proto_id, #rq)] = \ +#define SET_MSG_HANDLER(msg, func) \ + m_msg_handlers[m_msg_mapper->getMessageType(#msg)] = func +#define SET_MSG_HANDLER_DB(rq, rp, func) \ + m_msg_handlers[m_msg_mapper->getMessageType(#rq)] = \ &ClientWorker::dbPassThrough /** * This method configures message handling by creating a map from message type - * to handler function. There are currently two protocol levels: anonymous and - * authenticated. Each is supported by a Google protobuf interface (in - * /common/proto). Most requests can be handled directly by the DB (via - * DatabaseAPI class), but some require local processing. This method maps the - * two classes of requests using the macros SET_MSG_HANDLER (for local) and - * SET_MSG_HANDLER_DB (for DB only). + * (envelope field number) to handler function. Message types are identified + * by their field number in the Envelope proto message. Most requests can be + * handled directly by the DB (via DatabaseAPI class), but some require local + * processing. This method maps the two classes of requests using the macros + * SET_MSG_HANDLER (for local) and SET_MSG_HANDLER_DB (for DB only). */ void ClientWorker::setupMsgHandlers() { static std::atomic_flag lock = ATOMIC_FLAG_INIT; @@ -105,192 +97,158 @@ void ClientWorker::setupMsgHandlers() { return; try { - // Register and setup handlers for the Anonymous interface - - uint8_t proto_id = m_msg_mapper->getProtocolID( - MessageProtocol::GOOGLE_ANONONYMOUS); // REG_PROTO( SDMS::Anon ); - // Requests that require the server to take action - SET_MSG_HANDLER(proto_id, VersionRequest, - &ClientWorker::procVersionRequest); - SET_MSG_HANDLER(proto_id, AuthenticateByPasswordRequest, + // Anonymous interface handlers + SET_MSG_HANDLER(VersionRequest, &ClientWorker::procVersionRequest); + SET_MSG_HANDLER(AuthenticateByPasswordRequest, &ClientWorker::procAuthenticateByPasswordRequest); - SET_MSG_HANDLER(proto_id, AuthenticateByTokenRequest, + SET_MSG_HANDLER(AuthenticateByTokenRequest, &ClientWorker::procAuthenticateByTokenRequest); - SET_MSG_HANDLER(proto_id, GetAuthStatusRequest, + SET_MSG_HANDLER(GetAuthStatusRequest, &ClientWorker::procGetAuthStatusRequest); + SET_MSG_HANDLER_DB(DailyMessageRequest, DailyMessageReply, dailyMessage); - // Requests that can be handled by DB client directly - SET_MSG_HANDLER_DB(proto_id, DailyMessageRequest, DailyMessageReply, - dailyMessage); - - // Register and setup handlers for the Authenticated interface - proto_id = m_msg_mapper->getProtocolID(MessageProtocol::GOOGLE_AUTHORIZED); - - // Requests that require the server to take action - SET_MSG_HANDLER(proto_id, GenerateCredentialsRequest, + // Authenticated interface handlers + SET_MSG_HANDLER(GenerateCredentialsRequest, &ClientWorker::procGenerateCredentialsRequest); - SET_MSG_HANDLER(proto_id, RevokeCredentialsRequest, + SET_MSG_HANDLER(RevokeCredentialsRequest, &ClientWorker::procRevokeCredentialsRequest); - SET_MSG_HANDLER(proto_id, DataGetRequest, - &ClientWorker::procDataGetRequest); - SET_MSG_HANDLER(proto_id, DataPutRequest, - &ClientWorker::procDataPutRequest); - SET_MSG_HANDLER(proto_id, RecordCreateRequest, + SET_MSG_HANDLER(DataGetRequest, &ClientWorker::procDataGetRequest); + SET_MSG_HANDLER(DataPutRequest, &ClientWorker::procDataPutRequest); + SET_MSG_HANDLER(RecordCreateRequest, &ClientWorker::procRecordCreateRequest); - SET_MSG_HANDLER(proto_id, RecordUpdateRequest, + SET_MSG_HANDLER(RecordUpdateRequest, &ClientWorker::procRecordUpdateRequest); - SET_MSG_HANDLER(proto_id, RecordUpdateBatchRequest, + SET_MSG_HANDLER(RecordUpdateBatchRequest, &ClientWorker::procRecordUpdateBatchRequest); - SET_MSG_HANDLER(proto_id, RecordDeleteRequest, + SET_MSG_HANDLER(RecordDeleteRequest, &ClientWorker::procRecordDeleteRequest); - SET_MSG_HANDLER(proto_id, RecordAllocChangeRequest, + SET_MSG_HANDLER(RecordAllocChangeRequest, &ClientWorker::procRecordAllocChangeRequest); - SET_MSG_HANDLER(proto_id, RecordOwnerChangeRequest, + SET_MSG_HANDLER(RecordOwnerChangeRequest, &ClientWorker::procRecordOwnerChangeRequest); - SET_MSG_HANDLER(proto_id, ProjectSearchRequest, + SET_MSG_HANDLER(ProjectSearchRequest, &ClientWorker::procProjectSearchRequest); - SET_MSG_HANDLER(proto_id, CollDeleteRequest, + SET_MSG_HANDLER(CollDeleteRequest, &ClientWorker::procCollectionDeleteRequest); - SET_MSG_HANDLER(proto_id, ProjectDeleteRequest, + SET_MSG_HANDLER(ProjectDeleteRequest, &ClientWorker::procProjectDeleteRequest); - SET_MSG_HANDLER(proto_id, RepoAuthzRequest, - &ClientWorker::procRepoAuthzRequest); - SET_MSG_HANDLER(proto_id, RepoAllocationCreateRequest, + SET_MSG_HANDLER(RepoAuthzRequest, &ClientWorker::procRepoAuthzRequest); + SET_MSG_HANDLER(RepoAllocationCreateRequest, &ClientWorker::procRepoAllocationCreateRequest); - SET_MSG_HANDLER(proto_id, RepoAllocationDeleteRequest, + SET_MSG_HANDLER(RepoAllocationDeleteRequest, &ClientWorker::procRepoAllocationDeleteRequest); - SET_MSG_HANDLER(proto_id, UserGetAccessTokenRequest, + SET_MSG_HANDLER(UserGetAccessTokenRequest, &ClientWorker::procUserGetAccessTokenRequest); - SET_MSG_HANDLER(proto_id, SchemaCreateRequest, + SET_MSG_HANDLER(SchemaCreateRequest, &ClientWorker::procSchemaCreateRequest); - SET_MSG_HANDLER(proto_id, SchemaReviseRequest, + SET_MSG_HANDLER(SchemaReviseRequest, &ClientWorker::procSchemaReviseRequest); - SET_MSG_HANDLER(proto_id, SchemaUpdateRequest, + SET_MSG_HANDLER(SchemaUpdateRequest, &ClientWorker::procSchemaUpdateRequest); - SET_MSG_HANDLER(proto_id, MetadataValidateRequest, + SET_MSG_HANDLER(MetadataValidateRequest, &ClientWorker::procMetadataValidateRequest); // Requires updating repo cache - SET_MSG_HANDLER(proto_id, RepoCreateRequest, &ClientWorker::procRepoCreate); - SET_MSG_HANDLER(proto_id, RepoUpdateRequest, &ClientWorker::procRepoUpdate); - SET_MSG_HANDLER(proto_id, RepoDeleteRequest, &ClientWorker::procRepoDelete); + SET_MSG_HANDLER(RepoCreateRequest, &ClientWorker::procRepoCreate); + SET_MSG_HANDLER(RepoUpdateRequest, &ClientWorker::procRepoUpdate); + SET_MSG_HANDLER(RepoDeleteRequest, &ClientWorker::procRepoDelete); // Requests that can be handled by DB client directly - SET_MSG_HANDLER_DB(proto_id, CheckPermsRequest, CheckPermsReply, - checkPerms); - SET_MSG_HANDLER_DB(proto_id, GetPermsRequest, GetPermsReply, getPerms); - SET_MSG_HANDLER_DB(proto_id, UserViewRequest, UserDataReply, userView); - SET_MSG_HANDLER_DB(proto_id, UserSetAccessTokenRequest, AckReply, + SET_MSG_HANDLER_DB(CheckPermsRequest, CheckPermsReply, checkPerms); + SET_MSG_HANDLER_DB(GetPermsRequest, GetPermsReply, getPerms); + SET_MSG_HANDLER_DB(UserViewRequest, UserDataReply, userView); + SET_MSG_HANDLER_DB(UserSetAccessTokenRequest, AckReply, userSetAccessToken); - SET_MSG_HANDLER_DB(proto_id, UserCreateRequest, UserDataReply, userCreate); - SET_MSG_HANDLER_DB(proto_id, UserUpdateRequest, UserDataReply, userUpdate); - SET_MSG_HANDLER_DB(proto_id, UserListAllRequest, UserDataReply, - userListAll); - SET_MSG_HANDLER_DB(proto_id, UserListCollabRequest, UserDataReply, - userListCollab); - SET_MSG_HANDLER_DB(proto_id, UserFindByUUIDsRequest, UserDataReply, + SET_MSG_HANDLER_DB(UserCreateRequest, UserDataReply, userCreate); + SET_MSG_HANDLER_DB(UserUpdateRequest, UserDataReply, userUpdate); + SET_MSG_HANDLER_DB(UserListAllRequest, UserDataReply, userListAll); + SET_MSG_HANDLER_DB(UserListCollabRequest, UserDataReply, userListCollab); + SET_MSG_HANDLER_DB(UserFindByUUIDsRequest, UserDataReply, userFindByUUIDs); - SET_MSG_HANDLER_DB(proto_id, UserFindByNameUIDRequest, UserDataReply, + SET_MSG_HANDLER_DB(UserFindByNameUIDRequest, UserDataReply, userFindByNameUID); - SET_MSG_HANDLER_DB(proto_id, UserGetRecentEPRequest, UserGetRecentEPReply, + SET_MSG_HANDLER_DB(UserGetRecentEPRequest, UserGetRecentEPReply, userGetRecentEP); - SET_MSG_HANDLER_DB(proto_id, UserSetRecentEPRequest, AckReply, - userSetRecentEP); - SET_MSG_HANDLER_DB(proto_id, ProjectViewRequest, ProjectDataReply, - projView); - SET_MSG_HANDLER_DB(proto_id, ProjectCreateRequest, ProjectDataReply, - projCreate); - SET_MSG_HANDLER_DB(proto_id, ProjectUpdateRequest, ProjectDataReply, - projUpdate); - SET_MSG_HANDLER_DB(proto_id, ProjectListRequest, ListingReply, projList); - SET_MSG_HANDLER_DB(proto_id, ProjectGetRoleRequest, ProjectGetRoleReply, + SET_MSG_HANDLER_DB(UserSetRecentEPRequest, AckReply, userSetRecentEP); + SET_MSG_HANDLER_DB(ProjectViewRequest, ProjectDataReply, projView); + SET_MSG_HANDLER_DB(ProjectCreateRequest, ProjectDataReply, projCreate); + SET_MSG_HANDLER_DB(ProjectUpdateRequest, ProjectDataReply, projUpdate); + SET_MSG_HANDLER_DB(ProjectListRequest, ListingReply, projList); + SET_MSG_HANDLER_DB(ProjectGetRoleRequest, ProjectGetRoleReply, projGetRole); - SET_MSG_HANDLER_DB(proto_id, RecordViewRequest, RecordDataReply, - recordView); - SET_MSG_HANDLER_DB(proto_id, RecordCreateBatchRequest, RecordDataReply, + SET_MSG_HANDLER_DB(RecordViewRequest, RecordDataReply, recordView); + SET_MSG_HANDLER_DB(RecordCreateBatchRequest, RecordDataReply, recordCreateBatch); - SET_MSG_HANDLER_DB(proto_id, RecordExportRequest, RecordExportReply, - recordExport); - SET_MSG_HANDLER_DB(proto_id, RecordLockRequest, ListingReply, recordLock); - SET_MSG_HANDLER_DB(proto_id, RecordListByAllocRequest, ListingReply, + SET_MSG_HANDLER_DB(RecordExportRequest, RecordExportReply, recordExport); + SET_MSG_HANDLER_DB(RecordLockRequest, ListingReply, recordLock); + SET_MSG_HANDLER_DB(RecordListByAllocRequest, ListingReply, recordListByAlloc); - SET_MSG_HANDLER_DB(proto_id, RecordGetDependencyGraphRequest, ListingReply, + SET_MSG_HANDLER_DB(RecordGetDependencyGraphRequest, ListingReply, recordGetDependencyGraph); - SET_MSG_HANDLER_DB(proto_id, SearchRequest, ListingReply, generalSearch); - SET_MSG_HANDLER_DB(proto_id, DataPathRequest, DataPathReply, dataPath); - SET_MSG_HANDLER_DB(proto_id, CollViewRequest, CollDataReply, collView); - SET_MSG_HANDLER_DB(proto_id, CollReadRequest, ListingReply, collRead); - SET_MSG_HANDLER_DB(proto_id, CollListPublishedRequest, ListingReply, + SET_MSG_HANDLER_DB(SearchRequest, ListingReply, generalSearch); + SET_MSG_HANDLER_DB(DataPathRequest, DataPathReply, dataPath); + SET_MSG_HANDLER_DB(CollViewRequest, CollDataReply, collView); + SET_MSG_HANDLER_DB(CollReadRequest, ListingReply, collRead); + SET_MSG_HANDLER_DB(CollListPublishedRequest, ListingReply, collListPublished); - SET_MSG_HANDLER_DB(proto_id, CollCreateRequest, CollDataReply, collCreate); - SET_MSG_HANDLER_DB(proto_id, CollUpdateRequest, CollDataReply, collUpdate); - SET_MSG_HANDLER_DB(proto_id, CollWriteRequest, ListingReply, collWrite); - SET_MSG_HANDLER_DB(proto_id, CollMoveRequest, AckReply, collMove); - SET_MSG_HANDLER_DB(proto_id, CollGetParentsRequest, CollPathReply, - collGetParents); - SET_MSG_HANDLER_DB(proto_id, CollGetOffsetRequest, CollGetOffsetReply, + SET_MSG_HANDLER_DB(CollCreateRequest, CollDataReply, collCreate); + SET_MSG_HANDLER_DB(CollUpdateRequest, CollDataReply, collUpdate); + SET_MSG_HANDLER_DB(CollWriteRequest, ListingReply, collWrite); + SET_MSG_HANDLER_DB(CollMoveRequest, AckReply, collMove); + SET_MSG_HANDLER_DB(CollGetParentsRequest, CollPathReply, collGetParents); + SET_MSG_HANDLER_DB(CollGetOffsetRequest, CollGetOffsetReply, collGetOffset); - SET_MSG_HANDLER_DB(proto_id, QueryListRequest, ListingReply, queryList); - SET_MSG_HANDLER_DB(proto_id, QueryViewRequest, QueryDataReply, queryView); - SET_MSG_HANDLER_DB(proto_id, QueryExecRequest, ListingReply, queryExec); - SET_MSG_HANDLER_DB(proto_id, QueryCreateRequest, QueryDataReply, - queryCreate); - SET_MSG_HANDLER_DB(proto_id, QueryUpdateRequest, QueryDataReply, - queryUpdate); - SET_MSG_HANDLER_DB(proto_id, QueryDeleteRequest, AckReply, queryDelete); - SET_MSG_HANDLER_DB(proto_id, NoteViewRequest, NoteDataReply, noteView); - SET_MSG_HANDLER_DB(proto_id, NoteListBySubjectRequest, NoteDataReply, + SET_MSG_HANDLER_DB(QueryListRequest, ListingReply, queryList); + SET_MSG_HANDLER_DB(QueryViewRequest, QueryDataReply, queryView); + SET_MSG_HANDLER_DB(QueryExecRequest, ListingReply, queryExec); + SET_MSG_HANDLER_DB(QueryCreateRequest, QueryDataReply, queryCreate); + SET_MSG_HANDLER_DB(QueryUpdateRequest, QueryDataReply, queryUpdate); + SET_MSG_HANDLER_DB(QueryDeleteRequest, AckReply, queryDelete); + SET_MSG_HANDLER_DB(NoteViewRequest, NoteDataReply, noteView); + SET_MSG_HANDLER_DB(NoteListBySubjectRequest, NoteDataReply, noteListBySubject); - SET_MSG_HANDLER_DB(proto_id, NoteCreateRequest, NoteDataReply, noteCreate); - SET_MSG_HANDLER_DB(proto_id, NoteUpdateRequest, NoteDataReply, noteUpdate); - SET_MSG_HANDLER_DB(proto_id, NoteCommentEditRequest, NoteDataReply, + SET_MSG_HANDLER_DB(NoteCreateRequest, NoteDataReply, noteCreate); + SET_MSG_HANDLER_DB(NoteUpdateRequest, NoteDataReply, noteUpdate); + SET_MSG_HANDLER_DB(NoteCommentEditRequest, NoteDataReply, noteCommentEdit); - SET_MSG_HANDLER_DB(proto_id, TaskListRequest, TaskDataReply, taskList); - SET_MSG_HANDLER_DB(proto_id, TaskViewRequest, TaskDataReply, taskView); - SET_MSG_HANDLER_DB(proto_id, ACLViewRequest, ACLDataReply, aclView); - SET_MSG_HANDLER_DB(proto_id, ACLUpdateRequest, ACLDataReply, aclUpdate); - SET_MSG_HANDLER_DB(proto_id, ACLSharedListRequest, ListingReply, - aclSharedList); - SET_MSG_HANDLER_DB(proto_id, ACLSharedListItemsRequest, ListingReply, + SET_MSG_HANDLER_DB(TaskListRequest, TaskDataReply, taskList); + SET_MSG_HANDLER_DB(TaskViewRequest, TaskDataReply, taskView); + SET_MSG_HANDLER_DB(ACLViewRequest, ACLDataReply, aclView); + SET_MSG_HANDLER_DB(ACLUpdateRequest, ACLDataReply, aclUpdate); + SET_MSG_HANDLER_DB(ACLSharedListRequest, ListingReply, aclSharedList); + SET_MSG_HANDLER_DB(ACLSharedListItemsRequest, ListingReply, aclSharedListItems); - SET_MSG_HANDLER_DB(proto_id, GroupCreateRequest, GroupDataReply, - groupCreate); - SET_MSG_HANDLER_DB(proto_id, GroupUpdateRequest, GroupDataReply, - groupUpdate); - SET_MSG_HANDLER_DB(proto_id, GroupDeleteRequest, AckReply, groupDelete); - SET_MSG_HANDLER_DB(proto_id, GroupListRequest, GroupDataReply, groupList); - SET_MSG_HANDLER_DB(proto_id, GroupViewRequest, GroupDataReply, groupView); - SET_MSG_HANDLER_DB(proto_id, RepoListRequest, RepoDataReply, repoList); - SET_MSG_HANDLER_DB(proto_id, RepoViewRequest, RepoDataReply, repoView); - SET_MSG_HANDLER_DB(proto_id, RepoCalcSizeRequest, RepoCalcSizeReply, - repoCalcSize); - SET_MSG_HANDLER_DB(proto_id, RepoListAllocationsRequest, - RepoAllocationsReply, repoListAllocations); - SET_MSG_HANDLER_DB(proto_id, RepoListSubjectAllocationsRequest, + SET_MSG_HANDLER_DB(GroupCreateRequest, GroupDataReply, groupCreate); + SET_MSG_HANDLER_DB(GroupUpdateRequest, GroupDataReply, groupUpdate); + SET_MSG_HANDLER_DB(GroupDeleteRequest, AckReply, groupDelete); + SET_MSG_HANDLER_DB(GroupListRequest, GroupDataReply, groupList); + SET_MSG_HANDLER_DB(GroupViewRequest, GroupDataReply, groupView); + SET_MSG_HANDLER_DB(RepoListRequest, RepoDataReply, repoList); + SET_MSG_HANDLER_DB(RepoViewRequest, RepoDataReply, repoView); + SET_MSG_HANDLER_DB(RepoCalcSizeRequest, RepoCalcSizeReply, repoCalcSize); + SET_MSG_HANDLER_DB(RepoListAllocationsRequest, RepoAllocationsReply, + repoListAllocations); + SET_MSG_HANDLER_DB(RepoListSubjectAllocationsRequest, RepoAllocationsReply, repoListSubjectAllocations); - SET_MSG_HANDLER_DB(proto_id, RepoListObjectAllocationsRequest, + SET_MSG_HANDLER_DB(RepoListObjectAllocationsRequest, RepoAllocationsReply, repoListObjectAllocations); - SET_MSG_HANDLER_DB(proto_id, RepoViewAllocationRequest, - RepoAllocationsReply, repoViewAllocation); - SET_MSG_HANDLER_DB(proto_id, RepoAllocationSetRequest, AckReply, + SET_MSG_HANDLER_DB(RepoViewAllocationRequest, RepoAllocationsReply, + repoViewAllocation); + SET_MSG_HANDLER_DB(RepoAllocationSetRequest, AckReply, repoAllocationSet); - SET_MSG_HANDLER_DB(proto_id, RepoAllocationSetDefaultRequest, AckReply, + SET_MSG_HANDLER_DB(RepoAllocationSetDefaultRequest, AckReply, repoAllocationSetDefault); - SET_MSG_HANDLER_DB(proto_id, RepoAllocationStatsRequest, - RepoAllocationStatsReply, repoAllocationStats); - SET_MSG_HANDLER_DB(proto_id, SchemaSearchRequest, SchemaDataReply, - schemaSearch); - SET_MSG_HANDLER_DB(proto_id, SchemaViewRequest, SchemaDataReply, - schemaView); - SET_MSG_HANDLER_DB(proto_id, SchemaDeleteRequest, AckReply, schemaDelete); - SET_MSG_HANDLER_DB(proto_id, TagSearchRequest, TagDataReply, tagSearch); - SET_MSG_HANDLER_DB(proto_id, TagListByCountRequest, TagDataReply, - tagListByCount); - SET_MSG_HANDLER_DB(proto_id, TopicListTopicsRequest, TopicDataReply, + SET_MSG_HANDLER_DB(RepoAllocationStatsRequest, RepoAllocationStatsReply, + repoAllocationStats); + SET_MSG_HANDLER_DB(SchemaSearchRequest, SchemaDataReply, schemaSearch); + SET_MSG_HANDLER_DB(SchemaViewRequest, SchemaDataReply, schemaView); + SET_MSG_HANDLER_DB(SchemaDeleteRequest, AckReply, schemaDelete); + SET_MSG_HANDLER_DB(TagSearchRequest, TagDataReply, tagSearch); + SET_MSG_HANDLER_DB(TagListByCountRequest, TagDataReply, tagListByCount); + SET_MSG_HANDLER_DB(TopicListTopicsRequest, TopicDataReply, topicListTopics); - SET_MSG_HANDLER_DB(proto_id, TopicViewRequest, TopicDataReply, topicView); - SET_MSG_HANDLER_DB(proto_id, TopicSearchRequest, TopicDataReply, - topicSearch); + SET_MSG_HANDLER_DB(TopicViewRequest, TopicDataReply, topicView); + SET_MSG_HANDLER_DB(TopicSearchRequest, TopicDataReply, topicSearch); } catch (TraceException &e) { DL_ERROR(m_log_context, "exception: " << e.toString()); throw; @@ -335,7 +293,7 @@ void ClientWorker::workerThread(LogContext log_context) { }(); ProtoBufMap proto_map; - uint16_t task_list_msg_type = proto_map.getMessageType(2, "TaskListRequest"); + uint16_t task_list_msg_type = proto_map.getMessageType("TaskListRequest"); DL_DEBUG(log_context, "W" << m_tid << " m_run " << m_run); @@ -368,7 +326,7 @@ void ClientWorker::workerThread(LogContext log_context) { << " [" << uid << "]"); } - if (uid.compare("anon") == 0 && msg_type > 0x1FF) { + if (uid.compare("anon") == 0 && proto_map.requiresAuth(proto_map.toString(msg_type))) { DL_WARNING(message_log_context, "W" << m_tid << " unauthorized access attempt from anon user"); @@ -376,8 +334,8 @@ void ClientWorker::workerThread(LogContext log_context) { // I know this is not great... allocating memory here slow // This will need to be fixed - auto nack = std::make_unique(); - nack->set_err_code(ID_AUTHN_REQUIRED); + auto nack = std::make_unique(); + nack->set_err_code(AUTHN_REQUIRED); nack->set_err_msg("Authentication required"); response_msg->setPayload(std::move(nack)); client->send(*response_msg); @@ -471,7 +429,7 @@ void ClientWorker::workerThread(LogContext log_context) { if (send_reply) { \ auto msg_reply = m_msg_factory.createResponseEnvelope(*msg_request); \ auto nack = std::make_unique(); \ - nack->set_err_code(ID_INTERNAL_ERROR); \ + nack->set_err_code(INTERNAL_ERROR); \ nack->set_err_msg(e.what()); \ msg_reply->setPayload(std::move(nack)); \ return msg_reply; \ @@ -483,7 +441,7 @@ void ClientWorker::workerThread(LogContext log_context) { if (send_reply) { \ auto msg_reply = m_msg_factory.createResponseEnvelope(*msg_request); \ auto nack = std::make_unique(); \ - nack->set_err_code(ID_INTERNAL_ERROR); \ + nack->set_err_code(INTERNAL_ERROR); \ nack->set_err_msg("Unknown exception type"); \ msg_reply->setPayload(std::move(nack)); \ return msg_reply; \ @@ -502,7 +460,7 @@ void ClientWorker::workerThread(LogContext log_context) { "unregistered msg type)."); \ auto msg_reply = m_msg_factory.createResponseEnvelope(*msg_request); \ auto nack = std::make_unique(); \ - nack->set_err_code(ID_BAD_REQUEST); \ + nack->set_err_code(BAD_REQUEST); \ nack->set_err_msg( \ "Message parse failed (malformed or unregistered msg type)"); \ msg_reply->setPayload(std::move(nack)); \ @@ -587,15 +545,15 @@ ClientWorker::procVersionRequest(const std::string &a_uid, (void)a_uid; DL_TRACE(log_context, "Version request"); - reply.set_release_year(DATAFED_RELEASE_YEAR); - reply.set_release_month(DATAFED_RELEASE_MONTH); - reply.set_release_day(DATAFED_RELEASE_DAY); - reply.set_release_hour(DATAFED_RELEASE_HOUR); - reply.set_release_minute(DATAFED_RELEASE_MINUTE); + reply.set_release_year( release::YEAR); + reply.set_release_month( release::MONTH); + reply.set_release_day( release::DAY); + reply.set_release_hour( release::HOUR); + reply.set_release_minute(release::MINUTE); - reply.set_api_major(DATAFED_COMMON_PROTOCOL_API_MAJOR); - reply.set_api_minor(DATAFED_COMMON_PROTOCOL_API_MINOR); - reply.set_api_patch(DATAFED_COMMON_PROTOCOL_API_PATCH); + reply.set_api_major(protocol::version::MAJOR); + reply.set_api_minor(protocol::version::MINOR); + reply.set_api_patch(protocol::version::PATCH); reply.set_component_major(core::version::MAJOR); reply.set_component_minor(core::version::MINOR); @@ -692,7 +650,7 @@ std::unique_ptr ClientWorker::procGenerateCredentialsRequest( char secret_key[41]; if (zmq_curve_keypair(public_key, secret_key) != 0) - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Key generation failed: " << zmq_strerror(errno)); pub_key = public_key; diff --git a/core/server/ClientWorker.hpp b/core/server/ClientWorker.hpp index cee872363..1da7af103 100644 --- a/core/server/ClientWorker.hpp +++ b/core/server/ClientWorker.hpp @@ -182,7 +182,7 @@ class ClientWorker : public nlohmann::json_schema::basic_error_handler { void schemaEnforceRequiredProperties(const nlohmann::json &a_schema); void recordCollectionDelete(const std::vector &a_ids, - Auth::TaskDataReply &a_reply, + SDMS::TaskDataReply &a_reply, LogContext log_context); void handleTaskResponse(libjson::Value &a_result, LogContext log_context); diff --git a/core/server/Config.hpp b/core/server/Config.hpp index ffc19fb92..f1170bbbe 100644 --- a/core/server/Config.hpp +++ b/core/server/Config.hpp @@ -8,7 +8,7 @@ // DataFed Common public includes #include "common/DynaLog.hpp" #include "common/ICredentials.hpp" -#include "common/SDMS.pb.h" +#include "common/envelope.pb.h" // Standard includes #include diff --git a/core/server/DatabaseAPI.cpp b/core/server/DatabaseAPI.cpp index aa0d35905..7a297b21e 100644 --- a/core/server/DatabaseAPI.cpp +++ b/core/server/DatabaseAPI.cpp @@ -4,9 +4,11 @@ // Local public includes #include "common/DynaLog.hpp" -#include "common/SDMS.pb.h" +#include "common/envelope.pb.h" #include "common/TraceException.hpp" #include "common/Util.hpp" +#include "common/enums/access_token_type.pb.h" +#include "common/enums/search_mode.pb.h" // Third party includes #include @@ -26,7 +28,6 @@ using namespace std; namespace SDMS { namespace Core { -using namespace SDMS::Auth; using namespace libjson; #define TRANSLATE_BEGIN() try { @@ -44,7 +45,7 @@ DatabaseAPI::DatabaseAPI(const std::string &a_db_url, : m_client(0), m_db_url(a_db_url) { m_curl = curl_easy_init(); if (!m_curl) - EXCEPT(ID_INTERNAL_ERROR, "libcurl init failed"); + EXCEPT(INTERNAL_ERROR, "libcurl init failed"); setClient(""); @@ -146,7 +147,7 @@ long DatabaseAPI::dbGet(const char *a_url_path, a_result.fromString(res_json); } catch (libjson::ParseError &e) { DL_DEBUG(log_context, "PARSE [" << res_json << "]"); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Invalid JSON returned from DB: " << e.toString()); } } @@ -155,14 +156,14 @@ long DatabaseAPI::dbGet(const char *a_url_path, return http_code; } else { if (res_json.size() && a_result.asObject().has("errorMessage")) { - EXCEPT_PARAM(ID_BAD_REQUEST, a_result.asObject().asString()); + EXCEPT_PARAM(BAD_REQUEST, a_result.asObject().asString()); } else { - EXCEPT_PARAM(ID_BAD_REQUEST, "SDMS DB service call failed. Code: " + EXCEPT_PARAM(BAD_REQUEST, "SDMS DB service call failed. Code: " << http_code << ", err: " << error); } } } else { - EXCEPT_PARAM(ID_SERVICE_ERROR, "SDMS DB interface failed. error: " + EXCEPT_PARAM(SERVICE_ERROR, "SDMS DB interface failed. error: " << error << ", " << curl_easy_strerror(res)); } @@ -237,7 +238,7 @@ long DatabaseAPI::dbPost(const char *a_url_path, a_result.fromString(res_json); } catch (libjson::ParseError &e) { DL_DEBUG(log_context, "PARSE [" << res_json << "]"); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Invalid JSON returned from DB: " << e.toString()); } } @@ -250,14 +251,14 @@ long DatabaseAPI::dbPost(const char *a_url_path, << (a_body ? *a_body : "") << "]"); - EXCEPT_PARAM(ID_BAD_REQUEST, a_result.asObject().asString()); + EXCEPT_PARAM(BAD_REQUEST, a_result.asObject().asString()); } else { - EXCEPT_PARAM(ID_BAD_REQUEST, "SDMS DB service call failed. Code: " + EXCEPT_PARAM(BAD_REQUEST, "SDMS DB service call failed. Code: " << http_code << ", err: " << error); } } } else { - EXCEPT_PARAM(ID_SERVICE_ERROR, "SDMS DB interface failed. error: " + EXCEPT_PARAM(SERVICE_ERROR, "SDMS DB interface failed. error: " << error << ", " << curl_easy_strerror(res)); } @@ -270,7 +271,7 @@ void DatabaseAPI::serverPing(LogContext log_context) { } void DatabaseAPI::clientAuthenticateByPassword(const std::string &a_password, - Anon::AuthStatusReply &a_reply, + SDMS::AuthStatusReply &a_reply, LogContext log_context) { Value result; @@ -279,7 +280,7 @@ void DatabaseAPI::clientAuthenticateByPassword(const std::string &a_password, } void DatabaseAPI::clientAuthenticateByToken(const std::string &a_token, - Anon::AuthStatusReply &a_reply, + SDMS::AuthStatusReply &a_reply, LogContext log_context) { Value result; @@ -287,7 +288,7 @@ void DatabaseAPI::clientAuthenticateByToken(const std::string &a_token, setAuthStatus(a_reply, result); } -void DatabaseAPI::setAuthStatus(Anon::AuthStatusReply &a_reply, +void DatabaseAPI::setAuthStatus(SDMS::AuthStatusReply &a_reply, const Value &a_result) { const Value::Object &obj = a_result.asObject(); a_reply.set_uid(obj.getString("uid")); @@ -386,7 +387,7 @@ void DatabaseAPI::userSetAccessToken(const std::string &a_acc_tok, {"access", a_acc_tok}, {"refresh", a_ref_tok}, {"expires_in", to_string(a_expires_in)}}; - if (token_type != SDMS::AccessTokenType::ACCESS_SENTINEL) { + if (token_type != SDMS::AccessTokenType::TOKEN_UNSPECIFIED) { params.push_back({"type", to_string(token_type)}); } if (!other_token_data.empty()) { @@ -406,7 +407,7 @@ void DatabaseAPI::userSetAccessToken(const std::string &a_access_token, } void DatabaseAPI::userSetAccessToken( - const Auth::UserSetAccessTokenRequest &a_request, Anon::AckReply &a_reply, + const SDMS::UserSetAccessTokenRequest &a_request, SDMS::AckReply &a_reply, LogContext log_context) { (void)a_reply; userSetAccessToken(a_request.access(), a_request.expires_in(), @@ -451,8 +452,8 @@ void DatabaseAPI::purgeTransferRecords(size_t age) { dbGetRaw(url, result); } -void DatabaseAPI::userCreate(const Auth::UserCreateRequest &a_request, - Auth::UserDataReply &a_reply, +void DatabaseAPI::userCreate(const SDMS::UserCreateRequest &a_request, + SDMS::UserDataReply &a_reply, LogContext log_context) { DL_DEBUG(log_context, "DataFed user create - uid: " << a_request.uid() @@ -488,8 +489,8 @@ void DatabaseAPI::userCreate(const Auth::UserCreateRequest &a_request, setUserData(a_reply, result, log_context); } -void DatabaseAPI::userView(const Auth::UserViewRequest &a_request, - Auth::UserDataReply &a_reply, +void DatabaseAPI::userView(const SDMS::UserViewRequest &a_request, + SDMS::UserDataReply &a_reply, LogContext log_context) { vector> params; params.push_back({"subject", a_request.uid()}); @@ -503,7 +504,7 @@ void DatabaseAPI::userView(const Auth::UserViewRequest &a_request, } void DatabaseAPI::userUpdate(const UserUpdateRequest &a_request, - Auth::UserDataReply &a_reply, + SDMS::UserDataReply &a_reply, LogContext log_context) { Value result; @@ -522,7 +523,7 @@ void DatabaseAPI::userUpdate(const UserUpdateRequest &a_request, } void DatabaseAPI::userListAll(const UserListAllRequest &a_request, - Auth::UserDataReply &a_reply, + SDMS::UserDataReply &a_reply, LogContext log_context) { vector> params; if (a_request.has_offset() && a_request.has_count()) { @@ -537,7 +538,7 @@ void DatabaseAPI::userListAll(const UserListAllRequest &a_request, } void DatabaseAPI::userListCollab(const UserListCollabRequest &a_request, - Auth::UserDataReply &a_reply, + SDMS::UserDataReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -550,8 +551,8 @@ void DatabaseAPI::userListCollab(const UserListCollabRequest &a_request, setUserData(a_reply, result, log_context); } -void DatabaseAPI::userFindByUUIDs(const Auth::UserFindByUUIDsRequest &a_request, - Auth::UserDataReply &a_reply, +void DatabaseAPI::userFindByUUIDs(const SDMS::UserFindByUUIDsRequest &a_request, + SDMS::UserDataReply &a_reply, LogContext log_context) { string uuids = "["; @@ -570,8 +571,8 @@ void DatabaseAPI::userFindByUUIDs(const Auth::UserFindByUUIDsRequest &a_request, } void DatabaseAPI::userFindByNameUID( - const Auth::UserFindByNameUIDRequest &a_request, - Auth::UserDataReply &a_reply, LogContext log_context) { + const SDMS::UserFindByNameUIDRequest &a_request, + SDMS::UserDataReply &a_reply, LogContext log_context) { Value result; vector> params; params.push_back({"name_uid", a_request.name_uid()}); @@ -585,8 +586,8 @@ void DatabaseAPI::userFindByNameUID( setUserData(a_reply, result, log_context); } -void DatabaseAPI::userGetRecentEP(const Auth::UserGetRecentEPRequest &a_request, - Auth::UserGetRecentEPReply &a_reply, +void DatabaseAPI::userGetRecentEP(const SDMS::UserGetRecentEPRequest &a_request, + SDMS::UserGetRecentEPReply &a_reply, LogContext log_context) { (void)a_request; Value result; @@ -604,8 +605,8 @@ void DatabaseAPI::userGetRecentEP(const Auth::UserGetRecentEPRequest &a_request, TRANSLATE_END(result, log_context) } -void DatabaseAPI::userSetRecentEP(const Auth::UserSetRecentEPRequest &a_request, - Anon::AckReply &a_reply, +void DatabaseAPI::userSetRecentEP(const SDMS::UserSetRecentEPRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context) { (void)a_reply; Value result; @@ -621,7 +622,7 @@ void DatabaseAPI::userSetRecentEP(const Auth::UserSetRecentEPRequest &a_request, dbGet("usr/ep/set", {{"eps", eps}}, result, log_context); } -void DatabaseAPI::setUserData(Auth::UserDataReply &a_reply, +void DatabaseAPI::setUserData(SDMS::UserDataReply &a_reply, const Value &a_result, LogContext log_context) { UserData *user; Value::ArrayConstIter k; @@ -676,8 +677,8 @@ void DatabaseAPI::setUserData(Auth::UserDataReply &a_reply, TRANSLATE_END(a_result, log_context) } -void DatabaseAPI::projCreate(const Auth::ProjectCreateRequest &a_request, - Auth::ProjectDataReply &a_reply, +void DatabaseAPI::projCreate(const SDMS::ProjectCreateRequest &a_request, + SDMS::ProjectDataReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -715,8 +716,8 @@ void DatabaseAPI::projCreate(const Auth::ProjectCreateRequest &a_request, setProjectData(a_reply, result, log_context); } -void DatabaseAPI::projUpdate(const Auth::ProjectUpdateRequest &a_request, - Auth::ProjectDataReply &a_reply, +void DatabaseAPI::projUpdate(const SDMS::ProjectUpdateRequest &a_request, + SDMS::ProjectDataReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -756,8 +757,8 @@ void DatabaseAPI::projUpdate(const Auth::ProjectUpdateRequest &a_request, setProjectData(a_reply, result, log_context); } -void DatabaseAPI::projView(const Auth::ProjectViewRequest &a_request, - Auth::ProjectDataReply &a_reply, +void DatabaseAPI::projView(const SDMS::ProjectViewRequest &a_request, + SDMS::ProjectDataReply &a_reply, LogContext log_context) { Value result; dbGet("prj/view", {{"id", a_request.id()}}, result, log_context); @@ -765,8 +766,8 @@ void DatabaseAPI::projView(const Auth::ProjectViewRequest &a_request, setProjectData(a_reply, result, log_context); } -void DatabaseAPI::projList(const Auth::ProjectListRequest &a_request, - Auth::ListingReply &a_reply, +void DatabaseAPI::projList(const SDMS::ProjectListRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -792,8 +793,8 @@ void DatabaseAPI::projList(const Auth::ProjectListRequest &a_request, setListingDataReply(a_reply, result, log_context); } -void DatabaseAPI::projGetRole(const Auth::ProjectGetRoleRequest &a_request, - Auth::ProjectGetRoleReply &a_reply, +void DatabaseAPI::projGetRole(const SDMS::ProjectGetRoleRequest &a_request, + SDMS::ProjectGetRoleReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -808,7 +809,7 @@ void DatabaseAPI::projGetRole(const Auth::ProjectGetRoleRequest &a_request, } void DatabaseAPI::projSearch(const std::string &a_query, - Auth::ProjectDataReply &a_reply, + SDMS::ProjectDataReply &a_reply, LogContext log_context) { Value result; @@ -817,7 +818,7 @@ void DatabaseAPI::projSearch(const std::string &a_query, setProjectData(a_reply, result, log_context); } -void DatabaseAPI::setProjectData(Auth::ProjectDataReply &a_reply, +void DatabaseAPI::setProjectData(SDMS::ProjectDataReply &a_reply, const Value &a_result, LogContext log_context) { ProjectData *proj; @@ -872,8 +873,8 @@ void DatabaseAPI::setProjectData(Auth::ProjectDataReply &a_reply, } void DatabaseAPI::recordListByAlloc( - const Auth::RecordListByAllocRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context) { + const SDMS::RecordListByAllocRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { Value result; vector> params; params.push_back({"repo", a_request.repo()}); @@ -888,8 +889,8 @@ void DatabaseAPI::recordListByAlloc( setListingDataReply(a_reply, result, log_context); } -void DatabaseAPI::recordView(const Auth::RecordViewRequest &a_request, - Auth::RecordDataReply &a_reply, +void DatabaseAPI::recordView(const SDMS::RecordViewRequest &a_request, + SDMS::RecordDataReply &a_reply, LogContext log_context) { Value result; @@ -898,8 +899,8 @@ void DatabaseAPI::recordView(const Auth::RecordViewRequest &a_request, setRecordData(a_reply, result, log_context); } -void DatabaseAPI::recordCreate(const Auth::RecordCreateRequest &a_request, - Auth::RecordDataReply &a_reply, +void DatabaseAPI::recordCreate(const SDMS::RecordCreateRequest &a_request, + SDMS::RecordDataReply &a_reply, LogContext log_context) { Value result; nlohmann::json payload; @@ -968,8 +969,8 @@ void DatabaseAPI::recordCreate(const Auth::RecordCreateRequest &a_request, } void DatabaseAPI::recordCreateBatch( - const Auth::RecordCreateBatchRequest &a_request, - Auth::RecordDataReply &a_reply, LogContext log_context) { + const SDMS::RecordCreateBatchRequest &a_request, + SDMS::RecordDataReply &a_reply, LogContext log_context) { Value result; dbPost("dat/create/batch", {}, &a_request.records(), result, log_context); @@ -977,8 +978,8 @@ void DatabaseAPI::recordCreateBatch( setRecordData(a_reply, result, log_context); } -void DatabaseAPI::recordUpdate(const Auth::RecordUpdateRequest &a_request, - Auth::RecordDataReply &a_reply, +void DatabaseAPI::recordUpdate(const SDMS::RecordUpdateRequest &a_request, + SDMS::RecordDataReply &a_reply, libjson::Value &result, LogContext log_context) { nlohmann::json payload; payload["id"] = a_request.id(); @@ -1051,8 +1052,8 @@ void DatabaseAPI::recordUpdate(const Auth::RecordUpdateRequest &a_request, } void DatabaseAPI::recordUpdateBatch( - const Auth::RecordUpdateBatchRequest &a_request, - Auth::RecordDataReply &a_reply, libjson::Value &result, + const SDMS::RecordUpdateBatchRequest &a_request, + SDMS::RecordDataReply &a_reply, libjson::Value &result, LogContext log_context) { // "records" field is a JSON document - send directly to DB dbPost("dat/update/batch", {}, &a_request.records(), result, log_context); @@ -1060,7 +1061,7 @@ void DatabaseAPI::recordUpdateBatch( setRecordData(a_reply, result, log_context); } -void DatabaseAPI::recordUpdateSize(const Auth::RepoDataSizeReply &a_size_rep, +void DatabaseAPI::recordUpdateSize(const SDMS::RepoDataSizeReply &a_size_rep, LogContext log_context) { libjson::Value result; @@ -1091,8 +1092,8 @@ void DatabaseAPI::recordUpdateSchemaError(const std::string &a_rec_id, log_context); } -void DatabaseAPI::recordExport(const Auth::RecordExportRequest &a_request, - Auth::RecordExportReply &a_reply, +void DatabaseAPI::recordExport(const SDMS::RecordExportRequest &a_request, + SDMS::RecordExportReply &a_reply, LogContext log_context) { Value result; @@ -1117,8 +1118,8 @@ void DatabaseAPI::recordExport(const Auth::RecordExportRequest &a_request, TRANSLATE_END(result, log_context) } -void DatabaseAPI::recordLock(const Auth::RecordLockRequest &a_request, - Auth::ListingReply &a_reply, +void DatabaseAPI::recordLock(const SDMS::RecordLockRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { Value result; string ids; @@ -1143,8 +1144,8 @@ void DatabaseAPI::recordLock(const Auth::RecordLockRequest &a_request, } void DatabaseAPI::recordGetDependencyGraph( - const Auth::RecordGetDependencyGraphRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context) { + const SDMS::RecordGetDependencyGraphRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { Value result; dbGet("dat/dep/graph/get", {{"id", a_request.id()}}, result, log_context); @@ -1152,7 +1153,7 @@ void DatabaseAPI::recordGetDependencyGraph( setListingDataReply(a_reply, result, log_context); } -void DatabaseAPI::setRecordData(Auth::RecordDataReply &a_reply, +void DatabaseAPI::setRecordData(SDMS::RecordDataReply &a_reply, const Value &a_result, LogContext log_context) { RecordData *rec; DependencyData *deps; @@ -1268,8 +1269,8 @@ void DatabaseAPI::setRecordData(Auth::RecordDataReply &a_reply, TRANSLATE_END(a_result, log_context) } -void DatabaseAPI::dataPath(const Auth::DataPathRequest &a_request, - Auth::DataPathReply &a_reply, +void DatabaseAPI::dataPath(const SDMS::DataPathRequest &a_request, + SDMS::DataPathReply &a_reply, LogContext log_context) { Value result; @@ -1293,8 +1294,8 @@ void DatabaseAPI::dataPath(const Auth::DataPathRequest &a_request, * depending on scope. The DB relies on either tha "dataview" or "collview" * Arango search views for execution of the query. */ -void DatabaseAPI::generalSearch(const Auth::SearchRequest &a_request, - Auth::ListingReply &a_reply, +void DatabaseAPI::generalSearch(const SDMS::SearchRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { Value result; string qry_begin, qry_end, qry_filter, params; @@ -1321,8 +1322,8 @@ void DatabaseAPI::generalSearch(const Auth::SearchRequest &a_request, } void DatabaseAPI::collListPublished( - const Auth::CollListPublishedRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context) { + const SDMS::CollListPublishedRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -1338,8 +1339,8 @@ void DatabaseAPI::collListPublished( setListingDataReply(a_reply, result, log_context); } -void DatabaseAPI::collCreate(const Auth::CollCreateRequest &a_request, - Auth::CollDataReply &a_reply, +void DatabaseAPI::collCreate(const SDMS::CollCreateRequest &a_request, + SDMS::CollDataReply &a_reply, LogContext log_context) { Value result; nlohmann::json payload; @@ -1376,8 +1377,8 @@ void DatabaseAPI::collCreate(const Auth::CollCreateRequest &a_request, setCollData(a_reply, result, log_context); } -void DatabaseAPI::collUpdate(const Auth::CollUpdateRequest &a_request, - Auth::CollDataReply &a_reply, +void DatabaseAPI::collUpdate(const SDMS::CollUpdateRequest &a_request, + SDMS::CollDataReply &a_reply, LogContext log_context) { Value result; nlohmann::json payload; @@ -1414,8 +1415,8 @@ void DatabaseAPI::collUpdate(const Auth::CollUpdateRequest &a_request, setCollData(a_reply, result, log_context); } -void DatabaseAPI::collView(const Auth::CollViewRequest &a_request, - Auth::CollDataReply &a_reply, +void DatabaseAPI::collView(const SDMS::CollViewRequest &a_request, + SDMS::CollDataReply &a_reply, LogContext log_context) { Value result; @@ -1424,8 +1425,8 @@ void DatabaseAPI::collView(const Auth::CollViewRequest &a_request, setCollData(a_reply, result, log_context); } -void DatabaseAPI::collRead(const Auth::CollReadRequest &a_request, - Auth::ListingReply &a_reply, +void DatabaseAPI::collRead(const SDMS::CollReadRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -1440,8 +1441,8 @@ void DatabaseAPI::collRead(const Auth::CollReadRequest &a_request, setListingDataReply(a_reply, result, log_context); } -void DatabaseAPI::collWrite(const Auth::CollWriteRequest &a_request, - Auth::ListingReply &a_reply, +void DatabaseAPI::collWrite(const SDMS::CollWriteRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { string add_list, rem_list; vector> params; @@ -1479,8 +1480,8 @@ void DatabaseAPI::collWrite(const Auth::CollWriteRequest &a_request, setListingDataReply(a_reply, result, log_context); } -void DatabaseAPI::collMove(const Auth::CollMoveRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context) { +void DatabaseAPI::collMove(const SDMS::CollMoveRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context) { (void)a_reply; if (a_request.item_size() == 0) @@ -1504,8 +1505,8 @@ void DatabaseAPI::collMove(const Auth::CollMoveRequest &a_request, result, log_context); } -void DatabaseAPI::collGetParents(const Auth::CollGetParentsRequest &a_request, - Auth::CollPathReply &a_reply, +void DatabaseAPI::collGetParents(const SDMS::CollGetParentsRequest &a_request, + SDMS::CollPathReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -1518,8 +1519,8 @@ void DatabaseAPI::collGetParents(const Auth::CollGetParentsRequest &a_request, setCollPathData(a_reply, result, log_context); } -void DatabaseAPI::collGetOffset(const Auth::CollGetOffsetRequest &a_request, - Auth::CollGetOffsetReply &a_reply, +void DatabaseAPI::collGetOffset(const SDMS::CollGetOffsetRequest &a_request, + SDMS::CollGetOffsetReply &a_reply, LogContext log_context) { Value result; @@ -1534,7 +1535,7 @@ void DatabaseAPI::collGetOffset(const Auth::CollGetOffsetRequest &a_request, a_reply.set_offset(result.asObject().getNumber("offset")); } -void DatabaseAPI::setCollData(Auth::CollDataReply &a_reply, +void DatabaseAPI::setCollData(SDMS::CollDataReply &a_reply, const libjson::Value &a_result, LogContext log_context) { CollData *coll; @@ -1637,7 +1638,7 @@ void DatabaseAPI::setCollPathData(CollPathReply &a_reply, TRANSLATE_END(a_result, log_context) } -void DatabaseAPI::setListingDataReply(Auth::ListingReply &a_reply, +void DatabaseAPI::setListingDataReply(SDMS::ListingReply &a_reply, const libjson::Value &a_result, LogContext log_context) { Value::ObjectConstIter j; @@ -1730,8 +1731,8 @@ void DatabaseAPI::setListingData(ListingData *a_item, } } -void DatabaseAPI::queryList(const Auth::QueryListRequest &a_request, - Auth::ListingReply &a_reply, +void DatabaseAPI::queryList(const SDMS::QueryListRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -1745,8 +1746,8 @@ void DatabaseAPI::queryList(const Auth::QueryListRequest &a_request, setListingDataReply(a_reply, result, log_context); } -void DatabaseAPI::queryCreate(const Auth::QueryCreateRequest &a_request, - Auth::QueryDataReply &a_reply, +void DatabaseAPI::queryCreate(const SDMS::QueryCreateRequest &a_request, + SDMS::QueryDataReply &a_reply, LogContext log_context) { Value result; // vector> params; @@ -1783,8 +1784,8 @@ void DatabaseAPI::queryCreate(const Auth::QueryCreateRequest &a_request, setQueryData(a_reply, result, log_context); } -void DatabaseAPI::queryUpdate(const Auth::QueryUpdateRequest &a_request, - Auth::QueryDataReply &a_reply, +void DatabaseAPI::queryUpdate(const SDMS::QueryUpdateRequest &a_request, + SDMS::QueryDataReply &a_reply, LogContext log_context) { Value result; nlohmann::json payload; @@ -1827,8 +1828,8 @@ void DatabaseAPI::queryUpdate(const Auth::QueryUpdateRequest &a_request, } // DatabaseAPI::queryDelete( const std::string & a_id ) -void DatabaseAPI::queryDelete(const Auth::QueryDeleteRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context) { +void DatabaseAPI::queryDelete(const SDMS::QueryDeleteRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context) { (void)a_reply; Value result; string ids = "["; @@ -1844,8 +1845,8 @@ void DatabaseAPI::queryDelete(const Auth::QueryDeleteRequest &a_request, dbGet("qry/delete", {{"ids", ids}}, result, log_context); } -void DatabaseAPI::queryView(const Auth::QueryViewRequest &a_request, - Auth::QueryDataReply &a_reply, +void DatabaseAPI::queryView(const SDMS::QueryViewRequest &a_request, + SDMS::QueryDataReply &a_reply, LogContext log_context) { Value result; @@ -1854,8 +1855,8 @@ void DatabaseAPI::queryView(const Auth::QueryViewRequest &a_request, setQueryData(a_reply, result, log_context); } -void DatabaseAPI::queryExec(const Auth::QueryExecRequest &a_request, - Auth::ListingReply &a_reply, +void DatabaseAPI::queryExec(const SDMS::QueryExecRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -1893,8 +1894,8 @@ void DatabaseAPI::setQueryData(QueryDataReply &a_reply, TRANSLATE_END(a_result, log_context) } -void DatabaseAPI::aclView(const Auth::ACLViewRequest &a_request, - Auth::ACLDataReply &a_reply, LogContext log_context) { +void DatabaseAPI::aclView(const SDMS::ACLViewRequest &a_request, + SDMS::ACLDataReply &a_reply, LogContext log_context) { libjson::Value result; dbGet("acl/view", {{"id", a_request.id()}}, result, log_context); @@ -1902,8 +1903,8 @@ void DatabaseAPI::aclView(const Auth::ACLViewRequest &a_request, setACLData(a_reply, result, log_context); } -void DatabaseAPI::aclUpdate(const Auth::ACLUpdateRequest &a_request, - Auth::ACLDataReply &a_reply, +void DatabaseAPI::aclUpdate(const SDMS::ACLUpdateRequest &a_request, + SDMS::ACLDataReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -1916,8 +1917,8 @@ void DatabaseAPI::aclUpdate(const Auth::ACLUpdateRequest &a_request, setACLData(a_reply, result, log_context); } -void DatabaseAPI::aclSharedList(const Auth::ACLSharedListRequest &a_request, - Auth::ListingReply &a_reply, +void DatabaseAPI::aclSharedList(const SDMS::ACLSharedListRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -1934,8 +1935,8 @@ void DatabaseAPI::aclSharedList(const Auth::ACLSharedListRequest &a_request, } void DatabaseAPI::aclSharedListItems( - const Auth::ACLSharedListItemsRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context) { + const SDMS::ACLSharedListItemsRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -1971,8 +1972,8 @@ void DatabaseAPI::setACLData(ACLDataReply &a_reply, TRANSLATE_END(a_result, log_context) } -void DatabaseAPI::groupCreate(const Auth::GroupCreateRequest &a_request, - Auth::GroupDataReply &a_reply, +void DatabaseAPI::groupCreate(const SDMS::GroupCreateRequest &a_request, + SDMS::GroupDataReply &a_reply, LogContext log_context) { Value result; @@ -2000,8 +2001,8 @@ void DatabaseAPI::groupCreate(const Auth::GroupCreateRequest &a_request, setGroupData(a_reply, result, log_context); } -void DatabaseAPI::groupUpdate(const Auth::GroupUpdateRequest &a_request, - Auth::GroupDataReply &a_reply, +void DatabaseAPI::groupUpdate(const SDMS::GroupUpdateRequest &a_request, + SDMS::GroupDataReply &a_reply, LogContext log_context) { Value result; @@ -2039,8 +2040,8 @@ void DatabaseAPI::groupUpdate(const Auth::GroupUpdateRequest &a_request, setGroupData(a_reply, result, log_context); } -void DatabaseAPI::groupDelete(const Auth::GroupDeleteRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context) { +void DatabaseAPI::groupDelete(const SDMS::GroupDeleteRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context) { (void)a_reply; Value result; @@ -2052,8 +2053,8 @@ void DatabaseAPI::groupDelete(const Auth::GroupDeleteRequest &a_request, dbGet("grp/delete", params, result, log_context); } -void DatabaseAPI::groupList(const Auth::GroupListRequest &a_request, - Auth::GroupDataReply &a_reply, +void DatabaseAPI::groupList(const SDMS::GroupListRequest &a_request, + SDMS::GroupDataReply &a_reply, LogContext log_context) { (void)a_request; @@ -2067,8 +2068,8 @@ void DatabaseAPI::groupList(const Auth::GroupListRequest &a_request, setGroupData(a_reply, result, log_context); } -void DatabaseAPI::groupView(const Auth::GroupViewRequest &a_request, - Auth::GroupDataReply &a_reply, +void DatabaseAPI::groupView(const SDMS::GroupViewRequest &a_request, + SDMS::GroupDataReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -2117,8 +2118,8 @@ void DatabaseAPI::setGroupData(GroupDataReply &a_reply, TRANSLATE_END(a_result, log_context) } -void DatabaseAPI::repoList(const Auth::RepoListRequest &a_request, - Auth::RepoDataReply &a_reply, +void DatabaseAPI::repoList(const SDMS::RepoListRequest &a_request, + SDMS::RepoDataReply &a_reply, LogContext log_context) { Value result; @@ -2158,8 +2159,8 @@ void DatabaseAPI::repoView(std::vector &a_repos, } } -void DatabaseAPI::repoView(const Auth::RepoViewRequest &a_request, - Auth::RepoDataReply &a_reply, +void DatabaseAPI::repoView(const SDMS::RepoViewRequest &a_request, + SDMS::RepoDataReply &a_reply, LogContext log_context) { Value result; @@ -2169,8 +2170,8 @@ void DatabaseAPI::repoView(const Auth::RepoViewRequest &a_request, setRepoData(&a_reply, temp, result, log_context); } -void DatabaseAPI::repoCreate(const Auth::RepoCreateRequest &a_request, - Auth::RepoDataReply &a_reply, +void DatabaseAPI::repoCreate(const SDMS::RepoCreateRequest &a_request, + SDMS::RepoDataReply &a_reply, LogContext log_context) { Value result; @@ -2189,13 +2190,13 @@ void DatabaseAPI::repoCreate(const Auth::RepoCreateRequest &a_request, }; // List of optional fields to check - add_if_present(&Auth::RepoCreateRequest::has_path, &Auth::RepoCreateRequest::path, "path"); - add_if_present(&Auth::RepoCreateRequest::has_pub_key, &Auth::RepoCreateRequest::pub_key, "pub_key"); - add_if_present(&Auth::RepoCreateRequest::has_address, &Auth::RepoCreateRequest::address, "address"); - add_if_present(&Auth::RepoCreateRequest::has_endpoint, &Auth::RepoCreateRequest::endpoint, "endpoint"); - add_if_present(&Auth::RepoCreateRequest::has_desc, &Auth::RepoCreateRequest::desc, "desc"); - add_if_present(&Auth::RepoCreateRequest::has_domain, &Auth::RepoCreateRequest::domain, "domain"); - add_if_present(&Auth::RepoCreateRequest::has_exp_path, &Auth::RepoCreateRequest::exp_path, "exp_path"); + add_if_present(&SDMS::RepoCreateRequest::has_path, &SDMS::RepoCreateRequest::path, "path"); + add_if_present(&SDMS::RepoCreateRequest::has_pub_key, &SDMS::RepoCreateRequest::pub_key, "pub_key"); + add_if_present(&SDMS::RepoCreateRequest::has_address, &SDMS::RepoCreateRequest::address, "address"); + add_if_present(&SDMS::RepoCreateRequest::has_endpoint, &SDMS::RepoCreateRequest::endpoint, "endpoint"); + add_if_present(&SDMS::RepoCreateRequest::has_desc, &SDMS::RepoCreateRequest::desc, "desc"); + add_if_present(&SDMS::RepoCreateRequest::has_domain, &SDMS::RepoCreateRequest::domain, "domain"); + add_if_present(&SDMS::RepoCreateRequest::has_exp_path, &SDMS::RepoCreateRequest::exp_path, "exp_path"); if (a_request.admin_size() > 0) { nlohmann::json admins = nlohmann::json::array(); @@ -2212,8 +2213,8 @@ void DatabaseAPI::repoCreate(const Auth::RepoCreateRequest &a_request, setRepoData(&a_reply, temp, result, log_context); } -void DatabaseAPI::repoUpdate(const Auth::RepoUpdateRequest &a_request, - Auth::RepoDataReply &a_reply, +void DatabaseAPI::repoUpdate(const SDMS::RepoUpdateRequest &a_request, + SDMS::RepoDataReply &a_reply, LogContext log_context) { Value result; nlohmann::json payload; @@ -2260,16 +2261,16 @@ void DatabaseAPI::repoUpdate(const Auth::RepoUpdateRequest &a_request, setRepoData(&a_reply, temp, result, log_context); } -void DatabaseAPI::repoDelete(const Auth::RepoDeleteRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context) { +void DatabaseAPI::repoDelete(const SDMS::RepoDeleteRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context) { (void)a_reply; Value result; dbGet("repo/delete", {{"id", a_request.id()}}, result, log_context); } -void DatabaseAPI::repoCalcSize(const Auth::RepoCalcSizeRequest &a_request, - Auth::RepoCalcSizeReply &a_reply, +void DatabaseAPI::repoCalcSize(const SDMS::RepoCalcSizeRequest &a_request, + SDMS::RepoCalcSizeReply &a_reply, LogContext log_context) { Value result; @@ -2298,7 +2299,7 @@ void DatabaseAPI::repoCalcSize(const Auth::RepoCalcSizeRequest &a_request, TRANSLATE_END(result, log_context) } -void DatabaseAPI::setRepoData(Auth::RepoDataReply *a_reply, +void DatabaseAPI::setRepoData(SDMS::RepoDataReply *a_reply, std::vector &a_repos, const libjson::Value &a_result, LogContext log_context) { @@ -2371,8 +2372,8 @@ void DatabaseAPI::setRepoData(Auth::RepoDataReply *a_reply, } void DatabaseAPI::repoListAllocations( - const Auth::RepoListAllocationsRequest &a_request, - Auth::RepoAllocationsReply &a_reply, LogContext log_context) { + const SDMS::RepoListAllocationsRequest &a_request, + SDMS::RepoAllocationsReply &a_reply, LogContext log_context) { Value result; dbGet("repo/alloc/list/by_repo", {{"repo", a_request.id()}}, result, @@ -2382,8 +2383,8 @@ void DatabaseAPI::repoListAllocations( } void DatabaseAPI::repoListSubjectAllocations( - const Auth::RepoListSubjectAllocationsRequest &a_request, - Auth::RepoAllocationsReply &a_reply, LogContext log_context) { + const SDMS::RepoListSubjectAllocationsRequest &a_request, + SDMS::RepoAllocationsReply &a_reply, LogContext log_context) { Value result; vector> params; if (a_request.has_subject()) @@ -2399,8 +2400,8 @@ void DatabaseAPI::repoListSubjectAllocations( } void DatabaseAPI::repoListObjectAllocations( - const Auth::RepoListObjectAllocationsRequest &a_request, - Auth::RepoAllocationsReply &a_reply, LogContext log_context) { + const SDMS::RepoListObjectAllocationsRequest &a_request, + SDMS::RepoAllocationsReply &a_reply, LogContext log_context) { Value result; dbGet("repo/alloc/list/by_object", {{"object", a_request.id()}}, result, @@ -2409,7 +2410,7 @@ void DatabaseAPI::repoListObjectAllocations( setAllocData(a_reply, result, log_context); } -void DatabaseAPI::setAllocData(Auth::RepoAllocationsReply &a_reply, +void DatabaseAPI::setAllocData(SDMS::RepoAllocationsReply &a_reply, const libjson::Value &a_result, LogContext log_context) { TRANSLATE_BEGIN() @@ -2444,8 +2445,8 @@ void DatabaseAPI::setAllocData(AllocData *a_alloc, } void DatabaseAPI::repoViewAllocation( - const Auth::RepoViewAllocationRequest &a_request, - Auth::RepoAllocationsReply &a_reply, LogContext log_context) { + const SDMS::RepoViewAllocationRequest &a_request, + SDMS::RepoAllocationsReply &a_reply, LogContext log_context) { Value result; vector> params; params.push_back({"repo", a_request.repo()}); @@ -2458,8 +2459,8 @@ void DatabaseAPI::repoViewAllocation( } void DatabaseAPI::repoAllocationStats( - const Auth::RepoAllocationStatsRequest &a_request, - Auth::RepoAllocationStatsReply &a_reply, LogContext log_context) { + const SDMS::RepoAllocationStatsRequest &a_request, + SDMS::RepoAllocationStatsReply &a_reply, LogContext log_context) { Value result; vector> params; params.push_back({"repo", a_request.repo()}); @@ -2496,7 +2497,7 @@ void DatabaseAPI::setAllocStatsData(AllocStatsData &a_stats, } void DatabaseAPI::repoAllocationSet( - const Auth::RepoAllocationSetRequest &a_request, Anon::AckReply &a_reply, + const SDMS::RepoAllocationSetRequest &a_request, SDMS::AckReply &a_reply, LogContext log_context) { (void)a_reply; Value result; @@ -2510,8 +2511,8 @@ void DatabaseAPI::repoAllocationSet( } void DatabaseAPI::repoAllocationSetDefault( - const Auth::RepoAllocationSetDefaultRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context) { + const SDMS::RepoAllocationSetDefaultRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context) { (void)a_reply; Value result; @@ -2556,8 +2557,8 @@ void DatabaseAPI::getPerms(const GetPermsRequest &a_request, TRANSLATE_END(result, log_context) } -void DatabaseAPI::repoAuthz(const Auth::RepoAuthzRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context) { +void DatabaseAPI::repoAuthz(const SDMS::RepoAuthzRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context) { (void)a_reply; Value result; @@ -2571,8 +2572,8 @@ void DatabaseAPI::repoAuthz(const Auth::RepoAuthzRequest &a_request, result, log_context); } -void DatabaseAPI::topicListTopics(const Auth::TopicListTopicsRequest &a_request, - Auth::TopicDataReply &a_reply, +void DatabaseAPI::topicListTopics(const SDMS::TopicListTopicsRequest &a_request, + SDMS::TopicDataReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -2588,8 +2589,8 @@ void DatabaseAPI::topicListTopics(const Auth::TopicListTopicsRequest &a_request, setTopicDataReply(a_reply, result, log_context); } -void DatabaseAPI::topicView(const Auth::TopicViewRequest &a_request, - Auth::TopicDataReply &a_reply, +void DatabaseAPI::topicView(const SDMS::TopicViewRequest &a_request, + SDMS::TopicDataReply &a_reply, LogContext log_context) { Value result; @@ -2598,8 +2599,8 @@ void DatabaseAPI::topicView(const Auth::TopicViewRequest &a_request, setTopicDataReply(a_reply, result, log_context); } -void DatabaseAPI::topicSearch(const Auth::TopicSearchRequest &a_request, - Auth::TopicDataReply &a_reply, +void DatabaseAPI::topicSearch(const SDMS::TopicSearchRequest &a_request, + SDMS::TopicDataReply &a_reply, LogContext log_context) { Value result; @@ -2608,7 +2609,7 @@ void DatabaseAPI::topicSearch(const Auth::TopicSearchRequest &a_request, setTopicDataReply(a_reply, result, log_context); } -void DatabaseAPI::setTopicDataReply(Auth::TopicDataReply &a_reply, +void DatabaseAPI::setTopicDataReply(SDMS::TopicDataReply &a_reply, const libjson::Value &a_result, LogContext log_context) { TRANSLATE_BEGIN() @@ -2659,7 +2660,7 @@ void DatabaseAPI::setTopicDataReply(Auth::TopicDataReply &a_reply, } void DatabaseAPI::noteCreate(const NoteCreateRequest &a_request, - Auth::NoteDataReply &a_reply, + SDMS::NoteDataReply &a_reply, LogContext log_context) { DL_DEBUG(log_context, "NoteCreate"); @@ -2677,7 +2678,7 @@ void DatabaseAPI::noteCreate(const NoteCreateRequest &a_request, } void DatabaseAPI::noteUpdate(const NoteUpdateRequest &a_request, - Auth::NoteDataReply &a_reply, + SDMS::NoteDataReply &a_reply, LogContext log_context) { DL_DEBUG(log_context, "NoteUpdate"); @@ -2697,8 +2698,8 @@ void DatabaseAPI::noteUpdate(const NoteUpdateRequest &a_request, setNoteDataReply(a_reply, result, log_context); } -void DatabaseAPI::noteCommentEdit(const Auth::NoteCommentEditRequest &a_request, - Auth::NoteDataReply &a_reply, +void DatabaseAPI::noteCommentEdit(const SDMS::NoteCommentEditRequest &a_request, + SDMS::NoteDataReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -2711,8 +2712,8 @@ void DatabaseAPI::noteCommentEdit(const Auth::NoteCommentEditRequest &a_request, setNoteDataReply(a_reply, result, log_context); } -void DatabaseAPI::noteView(const Auth::NoteViewRequest &a_request, - Auth::NoteDataReply &a_reply, +void DatabaseAPI::noteView(const SDMS::NoteViewRequest &a_request, + SDMS::NoteDataReply &a_reply, LogContext log_context) { Value result; @@ -2722,8 +2723,8 @@ void DatabaseAPI::noteView(const Auth::NoteViewRequest &a_request, } void DatabaseAPI::noteListBySubject( - const Auth::NoteListBySubjectRequest &a_request, - Auth::NoteDataReply &a_reply, LogContext log_context) { + const SDMS::NoteListBySubjectRequest &a_request, + SDMS::NoteDataReply &a_reply, LogContext log_context) { Value result; dbGet("note/list/by_subject", {{"subject", a_request.subject()}}, result, @@ -2738,7 +2739,7 @@ void DatabaseAPI::notePurge(uint32_t a_age_sec, LogContext log_context) { dbGet("note/purge", {{"age_sec", to_string(a_age_sec)}}, result, log_context); } -void DatabaseAPI::setNoteDataReply(Auth::NoteDataReply &a_reply, +void DatabaseAPI::setNoteDataReply(SDMS::NoteDataReply &a_reply, const libjson::Value &a_result, LogContext log_context) { Value::ArrayConstIter i; @@ -2803,8 +2804,8 @@ void DatabaseAPI::setNoteData(NoteData *a_note, } } -void DatabaseAPI::tagSearch(const Auth::TagSearchRequest &a_request, - Auth::TagDataReply &a_reply, +void DatabaseAPI::tagSearch(const SDMS::TagSearchRequest &a_request, + SDMS::TagDataReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -2821,8 +2822,8 @@ void DatabaseAPI::tagSearch(const Auth::TagSearchRequest &a_request, setTagDataReply(a_reply, result, log_context); } -void DatabaseAPI::tagListByCount(const Auth::TagListByCountRequest &a_request, - Auth::TagDataReply &a_reply, +void DatabaseAPI::tagListByCount(const SDMS::TagListByCountRequest &a_request, + SDMS::TagDataReply &a_reply, LogContext log_context) { Value result; vector> params; @@ -2837,7 +2838,7 @@ void DatabaseAPI::tagListByCount(const Auth::TagListByCountRequest &a_request, setTagDataReply(a_reply, result, log_context); } -void DatabaseAPI::setTagDataReply(Auth::TagDataReply &a_reply, +void DatabaseAPI::setTagDataReply(SDMS::TagDataReply &a_reply, const Value &a_result, LogContext log_context) { Value::ObjectConstIter j; @@ -2871,8 +2872,8 @@ void DatabaseAPI::setTagData(TagData *a_tag, a_tag->set_count(a_obj.getNumber("count")); } -void DatabaseAPI::schemaSearch(const Auth::SchemaSearchRequest &a_request, - Auth::SchemaDataReply &a_reply, +void DatabaseAPI::schemaSearch(const SDMS::SchemaSearchRequest &a_request, + SDMS::SchemaDataReply &a_reply, LogContext log_context) { libjson::Value result; vector> params; @@ -2896,8 +2897,8 @@ void DatabaseAPI::schemaSearch(const Auth::SchemaSearchRequest &a_request, setSchemaDataReply(a_reply, result, log_context); } -void DatabaseAPI::schemaView(const Auth::SchemaViewRequest &a_request, - Auth::SchemaDataReply &a_reply, +void DatabaseAPI::schemaView(const SDMS::SchemaViewRequest &a_request, + SDMS::SchemaDataReply &a_reply, LogContext log_context) { libjson::Value result; vector> params; @@ -2910,7 +2911,7 @@ void DatabaseAPI::schemaView(const Auth::SchemaViewRequest &a_request, setSchemaDataReply(a_reply, result, log_context); } -void DatabaseAPI::schemaCreate(const Auth::SchemaCreateRequest &a_request, +void DatabaseAPI::schemaCreate(const SDMS::SchemaCreateRequest &a_request, LogContext log_context) { libjson::Value result; @@ -2926,7 +2927,7 @@ void DatabaseAPI::schemaCreate(const Auth::SchemaCreateRequest &a_request, dbPost("schema/create", {}, &body, result, log_context); } -void DatabaseAPI::schemaRevise(const Auth::SchemaReviseRequest &a_request, +void DatabaseAPI::schemaRevise(const SDMS::SchemaReviseRequest &a_request, LogContext log_context) { libjson::Value result; @@ -2952,7 +2953,7 @@ void DatabaseAPI::schemaRevise(const Auth::SchemaReviseRequest &a_request, dbPost("schema/revise", {{"id", a_request.id()}}, &body, result, log_context); } -void DatabaseAPI::schemaUpdate(const Auth::SchemaUpdateRequest &a_request, +void DatabaseAPI::schemaUpdate(const SDMS::SchemaUpdateRequest &a_request, LogContext log_context) { libjson::Value result; @@ -2981,8 +2982,8 @@ void DatabaseAPI::schemaUpdate(const Auth::SchemaUpdateRequest &a_request, dbPost("schema/update", {{"id", a_request.id()}}, &body, result, log_context); } -void DatabaseAPI::schemaDelete(const Auth::SchemaDeleteRequest &a_request, - Anon::AckReply &a_reply, +void DatabaseAPI::schemaDelete(const SDMS::SchemaDeleteRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context) { (void)a_reply; libjson::Value result; @@ -2990,7 +2991,7 @@ void DatabaseAPI::schemaDelete(const Auth::SchemaDeleteRequest &a_request, dbPost("schema/delete", {{"id", a_request.id()}}, 0, result, log_context); } -void DatabaseAPI::setSchemaDataReply(Auth::SchemaDataReply &a_reply, +void DatabaseAPI::setSchemaDataReply(SDMS::SchemaDataReply &a_reply, const libjson::Value &a_result, LogContext log_context) { Value::ObjectConstIter j; @@ -3078,8 +3079,8 @@ void DatabaseAPI::schemaView(const std::string &a_id, libjson::Value &a_result, dbGet("schema/view", {{"id", a_id}}, a_result, log_context); } -void DatabaseAPI::dailyMessage(const Anon::DailyMessageRequest &a_request, - Anon::DailyMessageReply &a_reply, +void DatabaseAPI::dailyMessage(const SDMS::DailyMessageRequest &a_request, + SDMS::DailyMessageReply &a_reply, LogContext log_context) { (void)a_request; // Not used libjson::Value result; @@ -3129,8 +3130,8 @@ void DatabaseAPI::taskAbort(const std::string &a_task_id, log_context); } -void DatabaseAPI::taskInitDataGet(const Auth::DataGetRequest &a_request, - Auth::DataGetReply &a_reply, +void DatabaseAPI::taskInitDataGet(const SDMS::DataGetRequest &a_request, + SDMS::DataGetReply &a_reply, libjson::Value &a_result, LogContext log_context) { nlohmann::json payload; @@ -3172,7 +3173,7 @@ void DatabaseAPI::taskInitDataGet(const Auth::DataGetRequest &a_request, setDataGetReply(a_reply, a_result, log_context); } -void DatabaseAPI::setDataGetReply(Auth::DataGetReply &a_reply, +void DatabaseAPI::setDataGetReply(SDMS::DataGetReply &a_reply, const libjson::Value &a_result, LogContext log_context) { Value::ObjectIter t; @@ -3203,8 +3204,8 @@ void DatabaseAPI::setDataGetReply(Auth::DataGetReply &a_reply, TRANSLATE_END(a_result, log_context) } -void DatabaseAPI::taskInitDataPut(const Auth::DataPutRequest &a_request, - Auth::DataPutReply &a_reply, +void DatabaseAPI::taskInitDataPut(const SDMS::DataPutRequest &a_request, + SDMS::DataPutReply &a_reply, libjson::Value &a_result, LogContext log_context) { nlohmann::json payload; @@ -3240,7 +3241,7 @@ void DatabaseAPI::taskInitDataPut(const Auth::DataPutRequest &a_request, setDataPutReply(a_reply, a_result, log_context); } -void DatabaseAPI::setDataPutReply(Auth::DataPutReply &a_reply, +void DatabaseAPI::setDataPutReply(SDMS::DataPutReply &a_reply, const libjson::Value &a_result, LogContext log_context) { Value::ObjectIter t; @@ -3252,7 +3253,7 @@ void DatabaseAPI::setDataPutReply(Auth::DataPutReply &a_reply, Value::ArrayConstIter j; if (!obj.has("glob_data") || obj.value().size() != 1) - EXCEPT_PARAM(ID_BAD_REQUEST, "Invalid or missing upload target"); + EXCEPT_PARAM(BAD_REQUEST, "Invalid or missing upload target"); const Value::Array &arr = obj.asArray(); const Value::Object &rec = arr.begin()->asObject(); @@ -3289,8 +3290,8 @@ void DatabaseAPI::taskInitRecordCollectionDelete( } void DatabaseAPI::taskInitRecordAllocChange( - const Auth::RecordAllocChangeRequest &a_request, - Auth::RecordAllocChangeReply &a_reply, libjson::Value &a_result, + const SDMS::RecordAllocChangeRequest &a_request, + SDMS::RecordAllocChangeReply &a_reply, libjson::Value &a_result, LogContext log_context) { nlohmann::json payload; nlohmann::json ids = nlohmann::json::array(); @@ -3329,8 +3330,8 @@ void DatabaseAPI::taskInitRecordAllocChange( } void DatabaseAPI::taskInitRecordOwnerChange( - const Auth::RecordOwnerChangeRequest &a_request, - Auth::RecordOwnerChangeReply &a_reply, libjson::Value &a_result, + const SDMS::RecordOwnerChangeRequest &a_request, + SDMS::RecordOwnerChangeReply &a_reply, libjson::Value &a_result, LogContext log_context) { nlohmann::json payload; nlohmann::json ids = nlohmann::json::array(); @@ -3374,7 +3375,7 @@ void DatabaseAPI::taskInitRecordOwnerChange( } void DatabaseAPI::taskInitProjectDelete( - const Auth::ProjectDeleteRequest &a_request, Auth::TaskDataReply &a_reply, + const SDMS::ProjectDeleteRequest &a_request, SDMS::TaskDataReply &a_reply, libjson::Value &a_result, LogContext log_context) { nlohmann::json payload; nlohmann::json ids = nlohmann::json::array(); @@ -3392,8 +3393,8 @@ void DatabaseAPI::taskInitProjectDelete( } void DatabaseAPI::taskInitRepoAllocationCreate( - const Auth::RepoAllocationCreateRequest &a_request, - Auth::TaskDataReply &a_reply, libjson::Value &a_result, + const SDMS::RepoAllocationCreateRequest &a_request, + SDMS::TaskDataReply &a_reply, libjson::Value &a_result, LogContext log_context) { dbGet("repo/alloc/create", {{"subject", a_request.subject()}, @@ -3406,8 +3407,8 @@ void DatabaseAPI::taskInitRepoAllocationCreate( } void DatabaseAPI::taskInitRepoAllocationDelete( - const Auth::RepoAllocationDeleteRequest &a_request, - Auth::TaskDataReply &a_reply, libjson::Value &a_result, + const SDMS::RepoAllocationDeleteRequest &a_request, + SDMS::TaskDataReply &a_reply, libjson::Value &a_result, LogContext log_context) { dbGet("repo/alloc/delete", {{"subject", a_request.subject()}, {"repo", a_request.repo()}}, @@ -3516,7 +3517,7 @@ void DatabaseAPI::setTaskData(TaskData *a_task, * method removes tasks that are nor in READY status from the original JSON * input - this is to. */ -void DatabaseAPI::setTaskDataReply(Auth::TaskDataReply &a_reply, +void DatabaseAPI::setTaskDataReply(SDMS::TaskDataReply &a_reply, const libjson::Value &a_result, LogContext log_context) { TRANSLATE_BEGIN() @@ -3536,7 +3537,7 @@ void DatabaseAPI::setTaskDataReply(Auth::TaskDataReply &a_reply, * * JSON contains an array of task objects containing task fields. */ -void DatabaseAPI::setTaskDataReplyArray(Auth::TaskDataReply &a_reply, +void DatabaseAPI::setTaskDataReplyArray(SDMS::TaskDataReply &a_reply, const libjson::Value &a_result, LogContext log_context) { TRANSLATE_BEGIN() @@ -3596,8 +3597,8 @@ void DatabaseAPI::taskFinalize(const std::string &a_task_id, bool a_succeeded, dbPost("task/finalize", params, 0, a_result, log_context); } -void DatabaseAPI::taskList(const Auth::TaskListRequest &a_request, - Auth::TaskDataReply &a_reply, +void DatabaseAPI::taskList(const SDMS::TaskListRequest &a_request, + SDMS::TaskDataReply &a_reply, LogContext log_context) { vector> params; @@ -3629,8 +3630,8 @@ void DatabaseAPI::taskList(const Auth::TaskListRequest &a_request, setTaskDataReplyArray(a_reply, result, log_context); } -void DatabaseAPI::taskView(const Auth::TaskViewRequest &a_request, - Auth::TaskDataReply &a_reply, +void DatabaseAPI::taskView(const SDMS::TaskViewRequest &a_request, + SDMS::TaskDataReply &a_reply, LogContext log_context) { libjson::Value result; @@ -3790,13 +3791,13 @@ void DatabaseAPI::metricsPurge(uint32_t a_timestamp, LogContext log_context) { log_context); } -uint32_t DatabaseAPI::parseSearchRequest(const Auth::SearchRequest &a_request, +uint32_t DatabaseAPI::parseSearchRequest(const SDMS::SearchRequest &a_request, std::string &a_qry_begin, std::string &a_qry_end, std::string &a_qry_filter, std::string &a_params, LogContext log_context) { - string view = (a_request.mode() == SM_DATA ? "dataview" : "collview"); + string view = (a_request.mode() == SDMS::SM_DATA ? "dataview" : "collview"); if (a_request.has_published() && a_request.published()) { a_qry_begin = string("for i in ") + view + " search i.public == true"; @@ -3860,7 +3861,7 @@ uint32_t DatabaseAPI::parseSearchRequest(const Auth::SearchRequest &a_request, } // Data-only search options - if (a_request.mode() == SM_DATA) { + if (a_request.mode() == SDMS::SM_DATA) { if (a_request.has_sch_id() > 0) { a_qry_begin += " and i.sch_id == @sch"; a_params += ",\"sch_id\":\"" + a_request.sch_id() + "\""; @@ -3937,7 +3938,7 @@ uint32_t DatabaseAPI::parseSearchRequest(const Auth::SearchRequest &a_request, string(" return distinct " "{_id:i._id,title:i.title,'desc':i['desc'],owner:i.owner,owner_" "name:name,alias:i.alias") + - (a_request.mode() == SM_DATA ? ",size:i.size,md_err:i.md_err" : "") + "}"; + (a_request.mode() == SDMS::SM_DATA ? ",size:i.size,md_err:i.md_err" : "") + "}"; a_qry_begin = a_qry_begin; a_qry_end = a_qry_end; diff --git a/core/server/DatabaseAPI.hpp b/core/server/DatabaseAPI.hpp index bf94ce90b..f0df10a51 100644 --- a/core/server/DatabaseAPI.hpp +++ b/core/server/DatabaseAPI.hpp @@ -4,9 +4,7 @@ // Local public includes #include "common/DynaLog.hpp" -#include "common/SDMS.pb.h" -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" +#include "common/envelope.pb.h" #include "common/libjson.hpp" // Third party includes @@ -38,10 +36,10 @@ class DatabaseAPI { void setClient(const std::string &a_client); void clientAuthenticateByPassword(const std::string &a_password, - Anon::AuthStatusReply &a_reply, + SDMS::AuthStatusReply &a_reply, LogContext log_context); void clientAuthenticateByToken(const std::string &a_token, - Anon::AuthStatusReply &a_reply, + SDMS::AuthStatusReply &a_reply, LogContext log_context); void clientLinkIdentity(const std::string &a_identity, LogContext log_context); @@ -72,186 +70,186 @@ class DatabaseAPI { std::vector &a_expiring_tokens, LogContext log_context); void purgeTransferRecords(size_t age); - void checkPerms(const Auth::CheckPermsRequest &a_request, - Auth::CheckPermsReply &a_reply, LogContext log_context); - void getPerms(const Auth::GetPermsRequest &a_request, - Auth::GetPermsReply &a_reply, LogContext log_context); - - void userSetAccessToken(const Auth::UserSetAccessTokenRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context); - void userCreate(const Auth::UserCreateRequest &a_request, - Auth::UserDataReply &a_reply, LogContext log_context); - void userView(const Auth::UserViewRequest &a_request, - Auth::UserDataReply &a_reply, LogContext log_context); - void userUpdate(const Auth::UserUpdateRequest &a_request, - Auth::UserDataReply &a_reply, LogContext log_context); - void userListAll(const Auth::UserListAllRequest &a_request, - Auth::UserDataReply &a_reply, LogContext log_context); - void userListCollab(const Auth::UserListCollabRequest &a_request, - Auth::UserDataReply &a_reply, LogContext log_context); - void userFindByUUIDs(const Auth::UserFindByUUIDsRequest &a_request, - Auth::UserDataReply &a_reply, LogContext log_context); - void userFindByNameUID(const Auth::UserFindByNameUIDRequest &a_request, - Auth::UserDataReply &a_reply, LogContext log_context); - void userGetRecentEP(const Auth::UserGetRecentEPRequest &a_request, - Auth::UserGetRecentEPReply &a_reply, + void checkPerms(const SDMS::CheckPermsRequest &a_request, + SDMS::CheckPermsReply &a_reply, LogContext log_context); + void getPerms(const SDMS::GetPermsRequest &a_request, + SDMS::GetPermsReply &a_reply, LogContext log_context); + + void userSetAccessToken(const SDMS::UserSetAccessTokenRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context); + void userCreate(const SDMS::UserCreateRequest &a_request, + SDMS::UserDataReply &a_reply, LogContext log_context); + void userView(const SDMS::UserViewRequest &a_request, + SDMS::UserDataReply &a_reply, LogContext log_context); + void userUpdate(const SDMS::UserUpdateRequest &a_request, + SDMS::UserDataReply &a_reply, LogContext log_context); + void userListAll(const SDMS::UserListAllRequest &a_request, + SDMS::UserDataReply &a_reply, LogContext log_context); + void userListCollab(const SDMS::UserListCollabRequest &a_request, + SDMS::UserDataReply &a_reply, LogContext log_context); + void userFindByUUIDs(const SDMS::UserFindByUUIDsRequest &a_request, + SDMS::UserDataReply &a_reply, LogContext log_context); + void userFindByNameUID(const SDMS::UserFindByNameUIDRequest &a_request, + SDMS::UserDataReply &a_reply, LogContext log_context); + void userGetRecentEP(const SDMS::UserGetRecentEPRequest &a_request, + SDMS::UserGetRecentEPReply &a_reply, LogContext log_context); - void userSetRecentEP(const Auth::UserSetRecentEPRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context); - - void projCreate(const Auth::ProjectCreateRequest &a_request, - Auth::ProjectDataReply &a_reply, LogContext log_context); - void projUpdate(const Auth::ProjectUpdateRequest &a_request, - Auth::ProjectDataReply &a_reply, LogContext log_context); - void projView(const Auth::ProjectViewRequest &a_request, - Auth::ProjectDataReply &a_reply, LogContext log_context); - void projList(const Auth::ProjectListRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); - void projSearch(const std::string &a_query, Auth::ProjectDataReply &a_reply, + void userSetRecentEP(const SDMS::UserSetRecentEPRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context); + + void projCreate(const SDMS::ProjectCreateRequest &a_request, + SDMS::ProjectDataReply &a_reply, LogContext log_context); + void projUpdate(const SDMS::ProjectUpdateRequest &a_request, + SDMS::ProjectDataReply &a_reply, LogContext log_context); + void projView(const SDMS::ProjectViewRequest &a_request, + SDMS::ProjectDataReply &a_reply, LogContext log_context); + void projList(const SDMS::ProjectListRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); + void projSearch(const std::string &a_query, SDMS::ProjectDataReply &a_reply, LogContext log_context); - void projGetRole(const Auth::ProjectGetRoleRequest &a_request, - Auth::ProjectGetRoleReply &a_reply, LogContext log_context); - - void recordView(const Auth::RecordViewRequest &a_request, - Auth::RecordDataReply &a_reply, LogContext log_context); - void recordCreate(const Auth::RecordCreateRequest &a_request, - Auth::RecordDataReply &a_reply, LogContext log_context); - void recordCreateBatch(const Auth::RecordCreateBatchRequest &a_request, - Auth::RecordDataReply &a_reply, + void projGetRole(const SDMS::ProjectGetRoleRequest &a_request, + SDMS::ProjectGetRoleReply &a_reply, LogContext log_context); + + void recordView(const SDMS::RecordViewRequest &a_request, + SDMS::RecordDataReply &a_reply, LogContext log_context); + void recordCreate(const SDMS::RecordCreateRequest &a_request, + SDMS::RecordDataReply &a_reply, LogContext log_context); + void recordCreateBatch(const SDMS::RecordCreateBatchRequest &a_request, + SDMS::RecordDataReply &a_reply, LogContext log_context); - void recordUpdate(const Auth::RecordUpdateRequest &a_request, - Auth::RecordDataReply &a_reply, libjson::Value &result, + void recordUpdate(const SDMS::RecordUpdateRequest &a_request, + SDMS::RecordDataReply &a_reply, libjson::Value &result, LogContext log_context); - void recordUpdateBatch(const Auth::RecordUpdateBatchRequest &a_request, - Auth::RecordDataReply &a_reply, libjson::Value &result, + void recordUpdateBatch(const SDMS::RecordUpdateBatchRequest &a_request, + SDMS::RecordDataReply &a_reply, libjson::Value &result, LogContext log_context); - void recordUpdateSize(const Auth::RepoDataSizeReply &a_sizes, + void recordUpdateSize(const SDMS::RepoDataSizeReply &a_sizes, LogContext log_context); void recordUpdateSchemaError(const std::string &a_rec_id, const std::string &a_err_msg, LogContext log_context); - void recordExport(const Auth::RecordExportRequest &a_request, - Auth::RecordExportReply &a_reply, LogContext log_context); - void recordLock(const Auth::RecordLockRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); - void recordListByAlloc(const Auth::RecordListByAllocRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); + void recordExport(const SDMS::RecordExportRequest &a_request, + SDMS::RecordExportReply &a_reply, LogContext log_context); + void recordLock(const SDMS::RecordLockRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); + void recordListByAlloc(const SDMS::RecordListByAllocRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); void recordGetDependencyGraph( - const Auth::RecordGetDependencyGraphRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); - - void generalSearch(const Auth::SearchRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); - - void dataPath(const Auth::DataPathRequest &a_request, - Auth::DataPathReply &a_reply, LogContext log_context); - - void collListPublished(const Auth::CollListPublishedRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); - void collCreate(const Auth::CollCreateRequest &a_request, - Auth::CollDataReply &a_reply, LogContext log_context); - void collUpdate(const Auth::CollUpdateRequest &a_request, - Auth::CollDataReply &a_reply, LogContext log_context); - void collView(const Auth::CollViewRequest &a_request, - Auth::CollDataReply &a_reply, LogContext log_context); - void collRead(const Auth::CollReadRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); - void collWrite(const Auth::CollWriteRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); - void collMove(const Auth::CollMoveRequest &a_request, Anon::AckReply &a_reply, + const SDMS::RecordGetDependencyGraphRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); + + void generalSearch(const SDMS::SearchRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); + + void dataPath(const SDMS::DataPathRequest &a_request, + SDMS::DataPathReply &a_reply, LogContext log_context); + + void collListPublished(const SDMS::CollListPublishedRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); + void collCreate(const SDMS::CollCreateRequest &a_request, + SDMS::CollDataReply &a_reply, LogContext log_context); + void collUpdate(const SDMS::CollUpdateRequest &a_request, + SDMS::CollDataReply &a_reply, LogContext log_context); + void collView(const SDMS::CollViewRequest &a_request, + SDMS::CollDataReply &a_reply, LogContext log_context); + void collRead(const SDMS::CollReadRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); + void collWrite(const SDMS::CollWriteRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); + void collMove(const SDMS::CollMoveRequest &a_request, SDMS::AckReply &a_reply, LogContext log_context); - void collGetParents(const Auth::CollGetParentsRequest &a_request, - Auth::CollPathReply &a_reply, LogContext log_context); - void collGetOffset(const Auth::CollGetOffsetRequest &a_request, - Auth::CollGetOffsetReply &a_reply, LogContext log_context); - - void queryList(const Auth::QueryListRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); - void queryCreate(const Auth::QueryCreateRequest &a_request, - Auth::QueryDataReply &a_reply, LogContext log_context); - void queryUpdate(const Auth::QueryUpdateRequest &a_request, - Auth::QueryDataReply &a_reply, LogContext log_context); - void queryDelete(const Auth::QueryDeleteRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context); - void queryView(const Auth::QueryViewRequest &a_request, - Auth::QueryDataReply &a_reply, LogContext log_context); - void queryExec(const Auth::QueryExecRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); - - void aclView(const Auth::ACLViewRequest &a_request, - Auth::ACLDataReply &a_reply, LogContext log_context); - void aclUpdate(const Auth::ACLUpdateRequest &a_request, - Auth::ACLDataReply &a_reply, LogContext log_context); - void aclSharedList(const Auth::ACLSharedListRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); - void aclSharedListItems(const Auth::ACLSharedListItemsRequest &a_request, - Auth::ListingReply &a_reply, LogContext log_context); - - void groupCreate(const Auth::GroupCreateRequest &a_request, - Auth::GroupDataReply &a_reply, LogContext log_context); - void groupUpdate(const Auth::GroupUpdateRequest &a_request, - Auth::GroupDataReply &a_reply, LogContext log_context); - void groupDelete(const Auth::GroupDeleteRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context); - void groupList(const Auth::GroupListRequest &a_request, - Auth::GroupDataReply &a_reply, LogContext log_context); - void groupView(const Auth::GroupViewRequest &a_request, - Auth::GroupDataReply &a_reply, LogContext log_context); + void collGetParents(const SDMS::CollGetParentsRequest &a_request, + SDMS::CollPathReply &a_reply, LogContext log_context); + void collGetOffset(const SDMS::CollGetOffsetRequest &a_request, + SDMS::CollGetOffsetReply &a_reply, LogContext log_context); + + void queryList(const SDMS::QueryListRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); + void queryCreate(const SDMS::QueryCreateRequest &a_request, + SDMS::QueryDataReply &a_reply, LogContext log_context); + void queryUpdate(const SDMS::QueryUpdateRequest &a_request, + SDMS::QueryDataReply &a_reply, LogContext log_context); + void queryDelete(const SDMS::QueryDeleteRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context); + void queryView(const SDMS::QueryViewRequest &a_request, + SDMS::QueryDataReply &a_reply, LogContext log_context); + void queryExec(const SDMS::QueryExecRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); + + void aclView(const SDMS::ACLViewRequest &a_request, + SDMS::ACLDataReply &a_reply, LogContext log_context); + void aclUpdate(const SDMS::ACLUpdateRequest &a_request, + SDMS::ACLDataReply &a_reply, LogContext log_context); + void aclSharedList(const SDMS::ACLSharedListRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); + void aclSharedListItems(const SDMS::ACLSharedListItemsRequest &a_request, + SDMS::ListingReply &a_reply, LogContext log_context); + + void groupCreate(const SDMS::GroupCreateRequest &a_request, + SDMS::GroupDataReply &a_reply, LogContext log_context); + void groupUpdate(const SDMS::GroupUpdateRequest &a_request, + SDMS::GroupDataReply &a_reply, LogContext log_context); + void groupDelete(const SDMS::GroupDeleteRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context); + void groupList(const SDMS::GroupListRequest &a_request, + SDMS::GroupDataReply &a_reply, LogContext log_context); + void groupView(const SDMS::GroupViewRequest &a_request, + SDMS::GroupDataReply &a_reply, LogContext log_context); void repoList(std::vector &a_repos, LogContext log_context); - void repoList(const Auth::RepoListRequest &a_request, - Auth::RepoDataReply &a_reply, LogContext log_context); + void repoList(const SDMS::RepoListRequest &a_request, + SDMS::RepoDataReply &a_reply, LogContext log_context); void repoView(std::vector &a_repos, LogContext log_context); - void repoView(const Auth::RepoViewRequest &a_request, - Auth::RepoDataReply &a_reply, LogContext log_context); - void repoCreate(const Auth::RepoCreateRequest &a_request, - Auth::RepoDataReply &a_reply, LogContext log_context); - void repoUpdate(const Auth::RepoUpdateRequest &a_request, - Auth::RepoDataReply &a_reply, LogContext log_context); - void repoDelete(const Auth::RepoDeleteRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context); - void repoCalcSize(const Auth::RepoCalcSizeRequest &a_request, - Auth::RepoCalcSizeReply &a_reply, LogContext log_context); - void repoListAllocations(const Auth::RepoListAllocationsRequest &a_request, - Auth::RepoAllocationsReply &a_reply, + void repoView(const SDMS::RepoViewRequest &a_request, + SDMS::RepoDataReply &a_reply, LogContext log_context); + void repoCreate(const SDMS::RepoCreateRequest &a_request, + SDMS::RepoDataReply &a_reply, LogContext log_context); + void repoUpdate(const SDMS::RepoUpdateRequest &a_request, + SDMS::RepoDataReply &a_reply, LogContext log_context); + void repoDelete(const SDMS::RepoDeleteRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context); + void repoCalcSize(const SDMS::RepoCalcSizeRequest &a_request, + SDMS::RepoCalcSizeReply &a_reply, LogContext log_context); + void repoListAllocations(const SDMS::RepoListAllocationsRequest &a_request, + SDMS::RepoAllocationsReply &a_reply, LogContext log_context); void repoListSubjectAllocations( - const Auth::RepoListSubjectAllocationsRequest &a_request, - Auth::RepoAllocationsReply &a_reply, LogContext log_context); + const SDMS::RepoListSubjectAllocationsRequest &a_request, + SDMS::RepoAllocationsReply &a_reply, LogContext log_context); void repoListObjectAllocations( - const Auth::RepoListObjectAllocationsRequest &a_request, - Auth::RepoAllocationsReply &a_reply, LogContext log_context); - void repoViewAllocation(const Auth::RepoViewAllocationRequest &a_request, - Auth::RepoAllocationsReply &a_reply, + const SDMS::RepoListObjectAllocationsRequest &a_request, + SDMS::RepoAllocationsReply &a_reply, LogContext log_context); + void repoViewAllocation(const SDMS::RepoViewAllocationRequest &a_request, + SDMS::RepoAllocationsReply &a_reply, LogContext log_context); - void repoAllocationStats(const Auth::RepoAllocationStatsRequest &a_request, - Auth::RepoAllocationStatsReply &a_reply, + void repoAllocationStats(const SDMS::RepoAllocationStatsRequest &a_request, + SDMS::RepoAllocationStatsReply &a_reply, LogContext log_context); - void repoAllocationSet(const Auth::RepoAllocationSetRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context); + void repoAllocationSet(const SDMS::RepoAllocationSetRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context); void repoAllocationSetDefault( - const Auth::RepoAllocationSetDefaultRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context); - void repoAuthz(const Auth::RepoAuthzRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context); - - void topicListTopics(const Auth::TopicListTopicsRequest &a_request, - Auth::TopicDataReply &a_reply, LogContext log_context); - void topicView(const Auth::TopicViewRequest &a_request, - Auth::TopicDataReply &a_reply, LogContext log_context); - void topicSearch(const Auth::TopicSearchRequest &a_request, - Auth::TopicDataReply &a_reply, LogContext log_context); - - void noteCreate(const Auth::NoteCreateRequest &a_request, - Auth::NoteDataReply &a_reply, LogContext log_context); - void noteUpdate(const Auth::NoteUpdateRequest &a_request, - Auth::NoteDataReply &a_reply, LogContext log_context); - void noteCommentEdit(const Auth::NoteCommentEditRequest &a_request, - Auth::NoteDataReply &a_reply, LogContext log_context); - void noteView(const Auth::NoteViewRequest &a_request, - Auth::NoteDataReply &a_reply, LogContext log_context); - void noteListBySubject(const Auth::NoteListBySubjectRequest &a_request, - Auth::NoteDataReply &a_reply, LogContext log_context); + const SDMS::RepoAllocationSetDefaultRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context); + void repoAuthz(const SDMS::RepoAuthzRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context); + + void topicListTopics(const SDMS::TopicListTopicsRequest &a_request, + SDMS::TopicDataReply &a_reply, LogContext log_context); + void topicView(const SDMS::TopicViewRequest &a_request, + SDMS::TopicDataReply &a_reply, LogContext log_context); + void topicSearch(const SDMS::TopicSearchRequest &a_request, + SDMS::TopicDataReply &a_reply, LogContext log_context); + + void noteCreate(const SDMS::NoteCreateRequest &a_request, + SDMS::NoteDataReply &a_reply, LogContext log_context); + void noteUpdate(const SDMS::NoteUpdateRequest &a_request, + SDMS::NoteDataReply &a_reply, LogContext log_context); + void noteCommentEdit(const SDMS::NoteCommentEditRequest &a_request, + SDMS::NoteDataReply &a_reply, LogContext log_context); + void noteView(const SDMS::NoteViewRequest &a_request, + SDMS::NoteDataReply &a_reply, LogContext log_context); + void noteListBySubject(const SDMS::NoteListBySubjectRequest &a_request, + SDMS::NoteDataReply &a_reply, LogContext log_context); void notePurge(uint32_t a_age_sec, LogContext log_context); void taskLoadReady(libjson::Value &a_result, LogContext log_context); @@ -261,34 +259,34 @@ class DatabaseAPI { void taskAbort(const std::string &a_task_id, const std::string &a_msg, libjson::Value &a_task_reply, LogContext log_context); - void taskInitDataGet(const Auth::DataGetRequest &a_request, - Auth::DataGetReply &a_reply, libjson::Value &a_result, + void taskInitDataGet(const SDMS::DataGetRequest &a_request, + SDMS::DataGetReply &a_reply, libjson::Value &a_result, LogContext log_context); - void taskInitDataPut(const Auth::DataPutRequest &a_request, - Auth::DataPutReply &a_reply, libjson::Value &a_result, + void taskInitDataPut(const SDMS::DataPutRequest &a_request, + SDMS::DataPutReply &a_reply, libjson::Value &a_result, LogContext log_context); void taskInitRecordCollectionDelete(const std::vector &a_ids, - Auth::TaskDataReply &a_reply, + SDMS::TaskDataReply &a_reply, libjson::Value &a_result, LogContext log_context); void - taskInitRecordAllocChange(const Auth::RecordAllocChangeRequest &a_request, - Auth::RecordAllocChangeReply &a_reply, + taskInitRecordAllocChange(const SDMS::RecordAllocChangeRequest &a_request, + SDMS::RecordAllocChangeReply &a_reply, libjson::Value &a_result, LogContext log_context); void - taskInitRecordOwnerChange(const Auth::RecordOwnerChangeRequest &a_request, - Auth::RecordOwnerChangeReply &a_reply, + taskInitRecordOwnerChange(const SDMS::RecordOwnerChangeRequest &a_request, + SDMS::RecordOwnerChangeReply &a_reply, libjson::Value &a_result, LogContext log_context); void taskInitRepoAllocationCreate( - const Auth::RepoAllocationCreateRequest &a_request, - Auth::TaskDataReply &a_reply, libjson::Value &a_result, + const SDMS::RepoAllocationCreateRequest &a_request, + SDMS::TaskDataReply &a_reply, libjson::Value &a_result, LogContext log_context); void taskInitRepoAllocationDelete( - const Auth::RepoAllocationDeleteRequest &a_request, - Auth::TaskDataReply &a_reply, libjson::Value &a_result, + const SDMS::RepoAllocationDeleteRequest &a_request, + SDMS::TaskDataReply &a_reply, libjson::Value &a_result, LogContext log_context); - void taskInitProjectDelete(const Auth::ProjectDeleteRequest &a_request, - Auth::TaskDataReply &a_reply, + void taskInitProjectDelete(const SDMS::ProjectDeleteRequest &a_request, + SDMS::TaskDataReply &a_reply, libjson::Value &a_result, LogContext log_context); void taskStart(const std::string &a_task_id, libjson::Value &a_result, LogContext log_context); @@ -298,34 +296,34 @@ class DatabaseAPI { void taskFinalize(const std::string &a_task_id, bool a_succeeded, const std::string &a_msg, libjson::Value &a_result, LogContext log_context); - void taskList(const Auth::TaskListRequest &a_request, - Auth::TaskDataReply &a_reply, LogContext log_context); - void taskView(const Auth::TaskViewRequest &a_request, - Auth::TaskDataReply &a_reply, LogContext log_context); + void taskList(const SDMS::TaskListRequest &a_request, + SDMS::TaskDataReply &a_reply, LogContext log_context); + void taskView(const SDMS::TaskViewRequest &a_request, + SDMS::TaskDataReply &a_reply, LogContext log_context); void taskPurge(uint32_t a_age_sec, LogContext log_context); - void tagSearch(const Auth::TagSearchRequest &a_request, - Auth::TagDataReply &a_reply, LogContext log_context); - void tagListByCount(const Auth::TagListByCountRequest &a_request, - Auth::TagDataReply &a_reply, LogContext log_context); + void tagSearch(const SDMS::TagSearchRequest &a_request, + SDMS::TagDataReply &a_reply, LogContext log_context); + void tagListByCount(const SDMS::TagListByCountRequest &a_request, + SDMS::TagDataReply &a_reply, LogContext log_context); - void schemaSearch(const Auth::SchemaSearchRequest &a_request, - Auth::SchemaDataReply &a_reply, LogContext log_context); - void schemaView(const Auth::SchemaViewRequest &a_request, - Auth::SchemaDataReply &a_reply, LogContext log_context); + void schemaSearch(const SDMS::SchemaSearchRequest &a_request, + SDMS::SchemaDataReply &a_reply, LogContext log_context); + void schemaView(const SDMS::SchemaViewRequest &a_request, + SDMS::SchemaDataReply &a_reply, LogContext log_context); void schemaView(const std::string &a_id, libjson::Value &a_result, LogContext log_context); - void schemaCreate(const Auth::SchemaCreateRequest &a_request, + void schemaCreate(const SDMS::SchemaCreateRequest &a_request, LogContext log_context); - void schemaRevise(const Auth::SchemaReviseRequest &a_request, + void schemaRevise(const SDMS::SchemaReviseRequest &a_request, LogContext log_context); - void schemaUpdate(const Auth::SchemaUpdateRequest &a_request, + void schemaUpdate(const SDMS::SchemaUpdateRequest &a_request, LogContext log_context); - void schemaDelete(const Auth::SchemaDeleteRequest &a_request, - Anon::AckReply &a_reply, LogContext log_context); + void schemaDelete(const SDMS::SchemaDeleteRequest &a_request, + SDMS::AckReply &a_reply, LogContext log_context); - void dailyMessage(const Anon::DailyMessageRequest &a_request, - Anon::DailyMessageReply &a_reply, LogContext log_context); + void dailyMessage(const SDMS::DailyMessageRequest &a_request, + SDMS::DailyMessageReply &a_reply, LogContext log_context); void metricsUpdateMsgCounts( uint32_t a_timestamp, uint32_t a_total, @@ -342,66 +340,66 @@ class DatabaseAPI { const std::vector> &a_params, const std::string *a_body, libjson::Value &a_result, LogContext); - void setAuthStatus(Anon::AuthStatusReply &a_reply, + void setAuthStatus(SDMS::AuthStatusReply &a_reply, const libjson::Value &a_result); - void setUserData(Auth::UserDataReply &a_reply, const libjson::Value &a_result, + void setUserData(SDMS::UserDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setProjectData(Auth::ProjectDataReply &a_reply, + void setProjectData(SDMS::ProjectDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setRecordData(Auth::RecordDataReply &a_reply, + void setRecordData(SDMS::RecordDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setCollData(Auth::CollDataReply &a_reply, const libjson::Value &a_result, + void setCollData(SDMS::CollDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setCollPathData(Auth::CollPathReply &a_reply, + void setCollPathData(SDMS::CollPathReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setQueryData(Auth::QueryDataReply &a_reply, + void setQueryData(SDMS::QueryDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setListingDataReply(Auth::ListingReply &a_reply, + void setListingDataReply(SDMS::ListingReply &a_reply, const libjson::Value &a_result, LogContext log_context); void setListingData(ListingData *a_item, const libjson::Value::Object &a_obj, LogContext log_context); - void setGroupData(Auth::GroupDataReply &a_reply, + void setGroupData(SDMS::GroupDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setACLData(Auth::ACLDataReply &a_reply, const libjson::Value &a_result, + void setACLData(SDMS::ACLDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setAllocData(Auth::RepoAllocationsReply &a_reply, + void setAllocData(SDMS::RepoAllocationsReply &a_reply, const libjson::Value &a_result, LogContext log_context); void setAllocData(AllocData *a_alloc, const libjson::Value::Object &a_obj, LogContext log_context); - void setRepoData(Auth::RepoDataReply *a_reply, std::vector &a_repos, + void setRepoData(SDMS::RepoDataReply *a_reply, std::vector &a_repos, const libjson::Value &a_result, LogContext log_context); void setAllocStatsData(AllocStatsData &a_stats, const libjson::Value::Object &a_object, LogContext log_context); - void setNoteDataReply(Auth::NoteDataReply &a_reply, + void setNoteDataReply(SDMS::NoteDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); void setNoteData(NoteData *a_item, const libjson::Value::Object &a_obj, LogContext log_context); - void setTaskDataReply(Auth::TaskDataReply &a_reply, + void setTaskDataReply(SDMS::TaskDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setTaskDataReplyArray(Auth::TaskDataReply &a_reply, + void setTaskDataReplyArray(SDMS::TaskDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); void setTaskData(TaskData *a_task, const libjson::Value &a_task_json, LogContext log_context); - void setDataGetReply(Auth::DataGetReply &a_reply, + void setDataGetReply(SDMS::DataGetReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setDataPutReply(Auth::DataPutReply &a_reply, + void setDataPutReply(SDMS::DataPutReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setTagDataReply(Auth::TagDataReply &a_reply, + void setTagDataReply(SDMS::TagDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); void setTagData(TagData *a_tag, const libjson::Value::Object &a_obj, LogContext log_context); - void setTopicDataReply(Auth::TopicDataReply &a_reply, + void setTopicDataReply(SDMS::TopicDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); - void setSchemaDataReply(Auth::SchemaDataReply &a_reply, + void setSchemaDataReply(SDMS::SchemaDataReply &a_reply, const libjson::Value &a_result, LogContext log_context); void setSchemaData(SchemaData *a_schema, const libjson::Value::Object &a_obj); - uint32_t parseSearchRequest(const Auth::SearchRequest &a_request, + uint32_t parseSearchRequest(const SDMS::SearchRequest &a_request, std::string &a_qry_begin, std::string &a_qry_end, std::string &a_filter, std::string &a_params, LogContext log_context); diff --git a/core/server/GlobusAPI.cpp b/core/server/GlobusAPI.cpp index 7cc42b4cd..b0834206a 100644 --- a/core/server/GlobusAPI.cpp +++ b/core/server/GlobusAPI.cpp @@ -5,6 +5,7 @@ #include "common/DynaLog.hpp" #include "common/TraceException.hpp" #include "common/Util.hpp" +#include "common/envelope.pb.h" // Standard includes #include @@ -226,7 +227,7 @@ std::string GlobusAPI::getSubmissionID(const std::string &a_acc_token) { try { if (!raw_result.size()) - EXCEPT_PARAM(ID_SERVICE_ERROR, "Empty response. Code: " << code); + EXCEPT_PARAM(SERVICE_ERROR, "Empty response. Code: " << code); Value result; @@ -239,7 +240,7 @@ std::string GlobusAPI::getSubmissionID(const std::string &a_acc_token) { return resp_obj.getString("value"); } catch (libjson::ParseError &e) { DL_DEBUG(m_log_context, "PARSE FAILED! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus submission API call returned invalid JSON."); } catch (TraceException &e) { DL_DEBUG(m_log_context, raw_result); @@ -247,7 +248,7 @@ std::string GlobusAPI::getSubmissionID(const std::string &a_acc_token) { throw; } catch (...) { DL_DEBUG(m_log_context, "UNEXPECTED/MISSING JSON! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus submission API call returned unexpected content"); } } @@ -292,7 +293,7 @@ string GlobusAPI::transfer( try { if (!raw_result.size()) - EXCEPT_PARAM(ID_SERVICE_ERROR, "Empty response. Code: " << code); + EXCEPT_PARAM(SERVICE_ERROR, "Empty response. Code: " << code); Value result; @@ -307,14 +308,14 @@ string GlobusAPI::transfer( string &code = resp_obj.getString("code"); if (code.compare("Accepted") != 0) - EXCEPT_PARAM(ID_SERVICE_ERROR, "Request not accepted (" << code << ")"); + EXCEPT_PARAM(SERVICE_ERROR, "Request not accepted (" << code << ")"); string &task_id = resp_obj.getString("task_id"); return task_id; } catch (libjson::ParseError &e) { DL_ERROR(m_log_context, "PARSE FAILED! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus transfer API call returned invalid JSON."); } catch (TraceException &e) { DL_ERROR(m_log_context, raw_result); @@ -322,7 +323,7 @@ string GlobusAPI::transfer( throw; } catch (...) { DL_ERROR(m_log_context, "UNEXPECTED EXCEPTION " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus transfer API call returned unexpected content"); } } @@ -344,7 +345,7 @@ bool GlobusAPI::checkTransferStatus(const std::string &a_task_id, try { if (!raw_result.size()) { - EXCEPT_PARAM(ID_SERVICE_ERROR, "Empty response. Code: " << code); + EXCEPT_PARAM(SERVICE_ERROR, "Empty response. Code: " << code); } Value result; @@ -400,7 +401,7 @@ bool GlobusAPI::checkTransferStatus(const std::string &a_task_id, } } catch (libjson::ParseError &e) { DL_ERROR(m_log_context, "PARSE FAILED! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus task view API call returned invalid JSON."); } catch (TraceException &e) { DL_ERROR(m_log_context, raw_result); @@ -408,7 +409,7 @@ bool GlobusAPI::checkTransferStatus(const std::string &a_task_id, throw; } catch (...) { DL_ERROR(m_log_context, "UNEXPECTED/MISSING JSON! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus task view API call returned unexpected content"); } @@ -421,7 +422,7 @@ bool GlobusAPI::checkTransferStatus(const std::string &a_task_id, try { if (!raw_result.size()) - EXCEPT_PARAM(ID_SERVICE_ERROR, "Empty response. Code: " << code); + EXCEPT_PARAM(SERVICE_ERROR, "Empty response. Code: " << code); Value result; @@ -434,7 +435,7 @@ bool GlobusAPI::checkTransferStatus(const std::string &a_task_id, string &data_type = resp_obj.getString("DATA_TYPE"); if (data_type.compare("event_list") != 0) - EXCEPT(ID_SERVICE_ERROR, "Invalid DATA_TYPE field."); + EXCEPT(SERVICE_ERROR, "Invalid DATA_TYPE field."); vector events; @@ -457,7 +458,7 @@ bool GlobusAPI::checkTransferStatus(const std::string &a_task_id, return eventsHaveErrors(events, a_status, a_err_msg); } catch (libjson::ParseError &e) { DL_ERROR(m_log_context, "PARSE FAILED! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus task event list API call returned invalid JSON."); } catch (TraceException &e) { DL_ERROR(m_log_context, raw_result); @@ -465,7 +466,7 @@ bool GlobusAPI::checkTransferStatus(const std::string &a_task_id, throw; } catch (...) { DL_ERROR(m_log_context, "UNEXPECTED/MISSING JSON! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus task event list API call returned unexpected content"); } } @@ -480,7 +481,7 @@ void GlobusAPI::cancelTask(const std::string &a_task_id, try { if (!raw_result.size()) - EXCEPT_PARAM(ID_SERVICE_ERROR, "Empty response. Code: " << code); + EXCEPT_PARAM(SERVICE_ERROR, "Empty response. Code: " << code); Value result; @@ -493,11 +494,11 @@ void GlobusAPI::cancelTask(const std::string &a_task_id, string &resp_code = resp_obj.getString("code"); if (resp_code != "Canceled") - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Unexpected 'code' value returned: " << resp_code); } catch (libjson::ParseError &e) { DL_ERROR(m_log_context, "PARSE FAILED! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus cancel task API call returned invalid JSON."); } catch (TraceException &e) { DL_ERROR(m_log_context, raw_result); @@ -505,7 +506,7 @@ void GlobusAPI::cancelTask(const std::string &a_task_id, throw; } catch (...) { DL_ERROR(m_log_context, "UNEXPECTED/MISSING JSON! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus cancel task API call returned unexpected content"); } } @@ -555,7 +556,7 @@ void GlobusAPI::getEndpointInfo(const std::string &a_ep_id, Value result; try { if (!raw_result.size()) - EXCEPT_PARAM(ID_SERVICE_ERROR, "Empty response. Code: " << code); + EXCEPT_PARAM(SERVICE_ERROR, "Empty response. Code: " << code); result.fromString(raw_result); @@ -599,7 +600,7 @@ void GlobusAPI::getEndpointInfo(const std::string &a_ep_id, } } catch (libjson::ParseError &e) { DL_ERROR(m_log_context, "PARSE FAILED! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus endpoint API call returned invalid JSON."); } catch (TraceException &e) { DL_ERROR(m_log_context, raw_result); @@ -608,7 +609,7 @@ void GlobusAPI::getEndpointInfo(const std::string &a_ep_id, throw; } catch (exception &e) { DL_ERROR(m_log_context, "UNEXPECTED/MISSING JSON! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus endpoint API call returned unexpected content"); } } @@ -625,7 +626,7 @@ void GlobusAPI::refreshAccessToken(const std::string &a_ref_tok, if (!raw_result.size()) { EXCEPT_PARAM( - ID_SERVICE_ERROR, + SERVICE_ERROR, "Globus token API call returned empty response. Code: " << code); } @@ -643,7 +644,7 @@ void GlobusAPI::refreshAccessToken(const std::string &a_ref_tok, } catch (libjson::ParseError &e) { DL_ERROR(m_log_context, "PARSE FAILED! Globus token API call returned invalid JSON"); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus token API call returned invalid JSON."); } catch (TraceException &e) { DL_ERROR(m_log_context, raw_result); @@ -651,7 +652,7 @@ void GlobusAPI::refreshAccessToken(const std::string &a_ref_tok, throw; } catch (exception &e) { DL_ERROR(m_log_context, "UNEXPECTED/MISSING JSON! " << raw_result); - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Globus token API call returned unexpected content"); } } @@ -661,9 +662,9 @@ void GlobusAPI::checkResponsCode(long a_code, if (a_code < 200 || a_code > 202) { libjson::Value::ObjectIter i = a_body.find("message"); if (i == a_body.end()) - EXCEPT_PARAM(ID_SERVICE_ERROR, "Request failed, code: " << a_code); + EXCEPT_PARAM(SERVICE_ERROR, "Request failed, code: " << a_code); else - EXCEPT_PARAM(ID_SERVICE_ERROR, + EXCEPT_PARAM(SERVICE_ERROR, "Request failed, code: " << a_code << ", reason: " << i->second.asString()); } diff --git a/core/server/GlobusAPI.hpp b/core/server/GlobusAPI.hpp index c571a1e4a..20c856411 100644 --- a/core/server/GlobusAPI.hpp +++ b/core/server/GlobusAPI.hpp @@ -7,7 +7,6 @@ // Local public includes #include "common/DynaLog.hpp" -#include "common/SDMS.pb.h" #include "common/libjson.hpp" // Third party includes diff --git a/core/server/TaskMgr.cpp b/core/server/TaskMgr.cpp index ef8003b6d..0d284e348 100644 --- a/core/server/TaskMgr.cpp +++ b/core/server/TaskMgr.cpp @@ -7,9 +7,9 @@ // Local public includes #include "common/DynaLog.hpp" -#include "common/SDMS.pb.h" #include "common/TraceException.hpp" #include "common/libjson.hpp" +#include "common/envelope.pb.h" // Standard includes #include diff --git a/core/server/TaskMgr.hpp b/core/server/TaskMgr.hpp index 52aacf4dd..f24c1dc52 100644 --- a/core/server/TaskMgr.hpp +++ b/core/server/TaskMgr.hpp @@ -8,8 +8,6 @@ #include "ITaskWorker.hpp" // Local public includes -#include "common/SDMS.pb.h" -#include "common/SDMS_Auth.pb.h" #include "common/libjson.hpp" // Standard includes diff --git a/core/server/TaskWorker.cpp b/core/server/TaskWorker.cpp index 1278db017..9da9c9423 100644 --- a/core/server/TaskWorker.cpp +++ b/core/server/TaskWorker.cpp @@ -11,8 +11,12 @@ #include "common/ICommunicator.hpp" #include "common/IMessage.hpp" #include "common/MessageFactory.hpp" -#include "common/SDMS.pb.h" +#include "common/envelope.pb.h" #include "common/SocketOptions.hpp" +#include "common/enums/task_command.pb.h" +#include "common/enums/encryption.pb.h" +#include "common/enums/task_type.pb.h" +#include "common/enums/access_token_type.pb.h" // Standard includes #include "common/TraceException.hpp" @@ -40,11 +44,11 @@ TaskWorker::TaskWorker(ITaskMgr &a_mgr, uint32_t a_worker_id, m_thread = std::make_unique(&TaskWorker::workerThread, this, log_context); - m_execute[TC_RAW_DATA_TRANSFER] = &cmdRawDataTransfer; - m_execute[TC_RAW_DATA_DELETE] = &cmdRawDataDelete; - m_execute[TC_RAW_DATA_UPDATE_SIZE] = &cmdRawDataUpdateSize; - m_execute[TC_ALLOC_CREATE] = &cmdAllocCreate; - m_execute[TC_ALLOC_DELETE] = &cmdAllocDelete; + m_execute[::SDMS::TC_RAW_DATA_TRANSFER] = &cmdRawDataTransfer; + m_execute[::SDMS::TC_RAW_DATA_DELETE] = &cmdRawDataDelete; + m_execute[::SDMS::TC_RAW_DATA_UPDATE_SIZE] = &cmdRawDataUpdateSize; + m_execute[::SDMS::TC_ALLOC_CREATE] = &cmdAllocCreate; + m_execute[::SDMS::TC_ALLOC_DELETE] = &cmdAllocDelete; } TaskWorker::~TaskWorker() { @@ -101,7 +105,7 @@ void TaskWorker::workerThread(LogContext log_context) { if (obj.has("step")) { step = obj.asNumber(); - } else if (cmd != TC_STOP) { + } else if (cmd != SDMS::TC_STOP) { EXCEPT(1, "Reply missing step value"); } @@ -111,7 +115,7 @@ void TaskWorker::workerThread(LogContext log_context) { "TASK_ID: " << m_task->task_id << ", Step: " << step); response = m_execute[cmd](*this, params, log_context); - } else if (cmd == TC_STOP) { + } else if (cmd == SDMS::TC_STOP) { DL_DEBUG(log_context, "TASK_ID: " << m_task->task_id << ", STOP at step: " << step); m_mgr.newTasks(params, log_context); @@ -344,7 +348,7 @@ ICommunicator::Response TaskWorker::cmdRawDataDelete(TaskWorker &me, auto message_req = msg_factory.create(MessageType::GOOGLE_PROTOCOL_BUFFER); auto del_req = - std::make_unique(); // del_req; + std::make_unique(); // del_req; for (; i < j; i++, id++) { RecordDataLocation *loc = del_req->add_loc(); loc->set_id(id->asString()); @@ -371,7 +375,7 @@ TaskWorker::cmdRawDataUpdateSize(TaskWorker &me, const Value &a_task_params, const string &repo_id = obj.getString("repo_id"); const string &path = obj.getString("repo_path"); const Value::Array &ids = obj.getArray("ids"); - auto size_req = std::make_unique(); // sz_req; + auto size_req = std::make_unique(); // sz_req; // RecordDataLocation * loc; MessageFactory msg_factory; @@ -399,7 +403,7 @@ TaskWorker::cmdRawDataUpdateSize(TaskWorker &me, const Value &a_task_params, } auto proto_msg = std::get(response.message->getPayload()); - auto size_reply = dynamic_cast(proto_msg); + auto size_reply = dynamic_cast(proto_msg); if (size_reply != 0) { if (size_reply->size_size() != (int)ids.size()) { DL_ERROR(log_context, @@ -433,7 +437,7 @@ ICommunicator::Response TaskWorker::cmdAllocCreate(TaskWorker &me, MessageFactory msg_factory; auto message = msg_factory.create(MessageType::GOOGLE_PROTOCOL_BUFFER); - auto req = std::make_unique(); + auto req = std::make_unique(); req->set_path(path); message->setPayload(std::move(req)); @@ -455,7 +459,7 @@ ICommunicator::Response TaskWorker::cmdAllocDelete(TaskWorker &me, MessageFactory msg_factory; auto message = msg_factory.create(MessageType::GOOGLE_PROTOCOL_BUFFER); - auto req = std::make_unique(); + auto req = std::make_unique(); req->set_path(path); message->setPayload(std::move(req)); log_context.correlation_id = @@ -653,7 +657,7 @@ TaskWorker::repoSendRecv(const string &a_repo_id, auto proto_msg = std::get(response.message->getPayload()); - auto nack = dynamic_cast(proto_msg); + auto nack = dynamic_cast(proto_msg); if (nack != 0) { ErrorCode code = nack->err_code(); string msg = diff --git a/core/server/Version.hpp.in b/core/server/Version.hpp.in index be239e85f..b186730d3 100644 --- a/core/server/Version.hpp.in +++ b/core/server/Version.hpp.in @@ -18,6 +18,22 @@ namespace SDMS { constexpr int PATCH = @DATAFED_FOXX_API_PATCH@; } } + + namespace protocol { + namespace version { + constexpr int MAJOR = @DATAFED_COMMON_PROTOCOL_API_MAJOR@; + constexpr int MINOR = @DATAFED_COMMON_PROTOCOL_API_MINOR@; + constexpr int PATCH = @DATAFED_COMMON_PROTOCOL_API_PATCH@; + } + } + + namespace release { + constexpr int YEAR = @DATAFED_RELEASE_YEAR@; + constexpr int MONTH = @DATAFED_RELEASE_MONTH@; + constexpr int DAY = @DATAFED_RELEASE_DAY@; + constexpr int HOUR = @DATAFED_RELEASE_HOUR@; + constexpr int MINUTE = @DATAFED_RELEASE_MINUTE@; + } } #endif // CORE_VERSION_HPP diff --git a/core/server/main.cpp b/core/server/main.cpp index 92cf985ee..48cca3b5b 100644 --- a/core/server/main.cpp +++ b/core/server/main.cpp @@ -7,8 +7,7 @@ #include "common/DynaLog.hpp" #include "common/TraceException.hpp" #include "common/Util.hpp" -// messaging version -#include "common/Version.pb.h" +#include "Version.hpp" // Third party includes #include @@ -105,13 +104,13 @@ int main(int a_argc, char **a_argv) { } if (opt_map.count("version")) { - cout << "Release Version: " << DATAFED_RELEASE_YEAR << "." - << DATAFED_RELEASE_MONTH << "." << DATAFED_RELEASE_DAY << "." - << DATAFED_RELEASE_HOUR << "." << DATAFED_RELEASE_MINUTE + cout << "Release Version: " << release::YEAR << "." + << release::MONTH << "." << release::DAY << "." + << release::HOUR << "." << release::MINUTE << std::endl; - cout << "Messaging API: " << DATAFED_COMMON_PROTOCOL_API_MAJOR << "." - << DATAFED_COMMON_PROTOCOL_API_MINOR << "." - << DATAFED_COMMON_PROTOCOL_API_PATCH << endl; + cout << "Messaging API: " << protocol::version::MAJOR << "." + << protocol::version::MINOR << "." + << protocol::version::PATCH << endl; cout << "Core Server: " << core::version::MAJOR << "." << core::version::MINOR << "." << core::version::PATCH << endl; return 0; @@ -120,7 +119,7 @@ int main(int a_argc, char **a_argv) { if (cfg_file.size()) { ifstream optfile(cfg_file.c_str()); if (!optfile.is_open()) - EXCEPT_PARAM(ID_CLIENT_ERROR, + EXCEPT_PARAM(CLIENT_ERROR, "Could not open config file: " << cfg_file); po::store(po::parse_config_file(optfile, opts, false), opt_map); diff --git a/core/server/tests/unit/test_DatabaseAPI.cpp b/core/server/tests/unit/test_DatabaseAPI.cpp index 23812af2b..1c302eed4 100644 --- a/core/server/tests/unit/test_DatabaseAPI.cpp +++ b/core/server/tests/unit/test_DatabaseAPI.cpp @@ -34,6 +34,13 @@ class DatabaseAPITestHelper : public DatabaseAPI { } }; +struct GlobalProtobufTeardown { + ~GlobalProtobufTeardown() { + // This is the teardown function that runs once at the end + google::protobuf::ShutdownProtobufLibrary(); + } +}; + struct CurlGlobalFixture { CurlGlobalFixture() { curl_global_init(CURL_GLOBAL_DEFAULT); } @@ -43,6 +50,9 @@ struct CurlGlobalFixture { // Register fixture to run once per test module BOOST_TEST_GLOBAL_CONFIGURATION(CurlGlobalFixture); +// Declare a global fixture instance +BOOST_GLOBAL_FIXTURE(GlobalProtobufTeardown); + const std::string url("https://localhost:8529"); const std::string user("bob"); const std::string pass("open_sesame"); diff --git a/docker/Dockerfile.runtime b/docker/Dockerfile.runtime index eb564dcc9..b8c8bfff1 100644 --- a/docker/Dockerfile.runtime +++ b/docker/Dockerfile.runtime @@ -27,6 +27,7 @@ COPY ./scripts/dependency_versions.sh ${BUILD_DIR}/scripts/ RUN mkdir -p ${DATAFED_DIR} RUN mkdir -p /opt/datafed RUN mkdir -p /var/log/datafed +RUN mkdir -p /opt/datafed/logs RUN chown -R datafed:root /opt/datafed RUN chown -R datafed:root /var/log/datafed RUN chown -R datafed:root ${DATAFED_DIR} diff --git a/python/datafed_pkg/CMakeLists.txt b/python/datafed_pkg/CMakeLists.txt index 730fbed68..ac834383e 100644 --- a/python/datafed_pkg/CMakeLists.txt +++ b/python/datafed_pkg/CMakeLists.txt @@ -35,4 +35,4 @@ endforeach() add_subdirectory( datafed ) add_custom_target( pydatafed ) -add_dependencies( pydatafed pydatafed_src) +add_dependencies( pydatafed pydatafed_proto_src) diff --git a/python/datafed_pkg/datafed/CLI.py b/python/datafed_pkg/datafed/CLI.py index 949d535ae..27918efba 100644 --- a/python/datafed_pkg/datafed/CLI.py +++ b/python/datafed_pkg/datafed/CLI.py @@ -34,8 +34,6 @@ from prompt_toolkit.history import FileHistory from prompt_toolkit.auto_suggest import AutoSuggestFromHistory -# from . import SDMS_Auth_pb2 as auth -from . import Version_pb2 from . import CommandLib from . import Config from . import VERSION @@ -162,14 +160,14 @@ def run(): except _NoCommand as e: # Be nice and switch to interactive when no command given if _interactive and _first: - api_version = f"{Version_pb2.DATAFED_COMMON_PROTOCOL_API_MAJOR}." - api_version += f"{Version_pb2.DATAFED_COMMON_PROTOCOL_API_MINOR}." - api_version += f"{Version_pb2.DATAFED_COMMON_PROTOCOL_API_PATCH}" - release_version = f"{Version_pb2.DATAFED_RELEASE_YEAR}." - release_version += f"{Version_pb2.DATAFED_RELEASE_MONTH}." - release_version += f"{Version_pb2.DATAFED_RELEASE_DAY}." - release_version += f"{Version_pb2.DATAFED_RELEASE_HOUR}." - release_version += f"{Version_pb2.DATAFED_RELEASE_MINUTE}" + api_version = f"{VERSION.DATAFED_COMMON_PROTOCOL_API_MAJOR}." + api_version += f"{VERSION.DATAFED_COMMON_PROTOCOL_API_MINOR}." + api_version += f"{VERSION.DATAFED_COMMON_PROTOCOL_API_PATCH}" + release_version = f"{VERSION.DATAFED_RELEASE_YEAR}." + release_version += f"{VERSION.DATAFED_RELEASE_MONTH}." + release_version += f"{VERSION.DATAFED_RELEASE_DAY}." + release_version += f"{VERSION.DATAFED_RELEASE_HOUR}." + release_version += f"{VERSION.DATAFED_RELEASE_MINUTE}" _print_msg(1, f"Welcome to DataFed CLI, version {VERSION.__version__}") _print_msg( 1, " Release, version {}".format(release_version) diff --git a/python/datafed_pkg/datafed/CMakeLists.txt b/python/datafed_pkg/datafed/CMakeLists.txt index 9e066deea..900b953cd 100644 --- a/python/datafed_pkg/datafed/CMakeLists.txt +++ b/python/datafed_pkg/datafed/CMakeLists.txt @@ -1,42 +1,60 @@ -cmake_minimum_required (VERSION 3.17.0) +cmake_minimum_required(VERSION 3.17.0) # Copy py source to build package source dir -file( GLOB SrcFiles ${CMAKE_CURRENT_SOURCE_DIR}/*.py ) +file(GLOB SrcFiles ${CMAKE_CURRENT_SOURCE_DIR}/*.py) foreach(file ${SrcFiles}) - configure_file(${file} ${CMAKE_CURRENT_BINARY_DIR} COPYONLY ) + configure_file(${file} ${CMAKE_CURRENT_BINARY_DIR} COPYONLY) endforeach() -# Collect top-level proto files as dependencies -file( GLOB ProtoFiles ${DataFed_SOURCE_DIR}/common/proto/common/*.proto ) +# Collect proto files from the new 1-1-1 directory structure +file(GLOB_RECURSE ProtoFiles ${DataFed_SOURCE_DIR}/common/proto3/common/*.proto) # OBJECT - is needed because we don't want to compile to a binary # because we are dealing with python add_library(protobuf-target-py OBJECT ${ProtoFiles}) + protobuf_generate( LANGUAGE python TARGET protobuf-target-py - IMPORT_DIRS "${DataFed_SOURCE_DIR}/common/proto/common" + IMPORT_DIRS "${DataFed_SOURCE_DIR}/common/proto3/common" OUT_VAR protobuf-generated-files-py PROTOC_OUT_DIR "${CMAKE_CURRENT_BINARY_DIR}" - ) - -add_custom_target( pydatafed_src DEPENDS protobuf-target-py ) - -# By default this will output the proto py files in the CMAKE BINARY DIR -add_custom_command( TARGET pydatafed_src POST_BUILD - COMMAND sed -i -r 's:^import.*_pb2:from . \\0:' ${protobuf-generated-files-py} - COMMAND ${DataFed_SOURCE_DIR}/python/pyproto_add_msg_idx.py ${DataFed_SOURCE_DIR}/common/proto/common/SDMS_Anon.proto ${CMAKE_CURRENT_BINARY_DIR}/SDMS_Anon_pb2.py - COMMAND ${DataFed_SOURCE_DIR}/python/pyproto_add_msg_idx.py ${DataFed_SOURCE_DIR}/common/proto/common/SDMS_Auth.proto ${CMAKE_CURRENT_BINARY_DIR}/SDMS_Auth_pb2.py -) - -# Crea#te copies of the files so they show up in the source folder as well -# for the purpose of testing -add_custom_target( pydatafed_proto_src DEPENDS pydatafed_src ) -add_custom_command( TARGET pydatafed_proto_src POST_BUILD pydatafed_src - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/SDMS_Auth_pb2.py ${CMAKE_CURRENT_SOURCE_DIR}/ - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/SDMS_pb2.py ${CMAKE_CURRENT_SOURCE_DIR}/ - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/Version_pb2.py ${CMAKE_CURRENT_SOURCE_DIR}/ - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/SDMS_Anon_pb2.py ${CMAKE_CURRENT_SOURCE_DIR}/ ) +add_custom_target(pydatafed_src DEPENDS protobuf-target-py) + +# Proto subdirectories that protoc generates imports for +set(PROTO_SUBDIRS anon auth enums messages) + +# Fix imports in generated pb2 files to use relative imports within the package. +# protoc generates absolute imports like: +# from anon import ack_reply_pb2 as ... +# from enums import error_code_pb2 as ... +# import envelope_pb2 as ... +# These must become relative imports: +# from .anon import ack_reply_pb2 as ... +# from .enums import error_code_pb2 as ... +# from . import envelope_pb2 as ... +# Create the import fixup script +add_custom_command(TARGET pydatafed_src POST_BUILD + COMMAND sh ${DataFed_SOURCE_DIR}/python/datafed_pkg/scripts/fix_proto_imports.sh ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Rewriting protobuf imports to relative" +) + +# Copy generated files back to source tree for testing +add_custom_target(pydatafed_proto_src DEPENDS pydatafed_src) +add_custom_command(TARGET pydatafed_proto_src POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy + ${CMAKE_CURRENT_BINARY_DIR}/envelope_pb2.py + ${CMAKE_CURRENT_SOURCE_DIR}/ + # Copy subdirectories back to source for testing + COMMAND ${CMAKE_COMMAND} -E copy_directory + ${CMAKE_CURRENT_BINARY_DIR}/anon ${CMAKE_CURRENT_SOURCE_DIR}/anon + COMMAND ${CMAKE_COMMAND} -E copy_directory + ${CMAKE_CURRENT_BINARY_DIR}/auth ${CMAKE_CURRENT_SOURCE_DIR}/auth + COMMAND ${CMAKE_COMMAND} -E copy_directory + ${CMAKE_CURRENT_BINARY_DIR}/enums ${CMAKE_CURRENT_SOURCE_DIR}/enums + COMMAND ${CMAKE_COMMAND} -E copy_directory + ${CMAKE_CURRENT_BINARY_DIR}/messages ${CMAKE_CURRENT_SOURCE_DIR}/messages +) diff --git a/python/datafed_pkg/datafed/CommandLib.py b/python/datafed_pkg/datafed/CommandLib.py index 3f4ad8098..7b9140678 100644 --- a/python/datafed_pkg/datafed/CommandLib.py +++ b/python/datafed_pkg/datafed/CommandLib.py @@ -14,11 +14,9 @@ import time import pathlib import requests -from . import SDMS_Auth_pb2 as auth -from . import SDMS_pb2 as sdms from . import MessageLib from . import Config - +from . import envelope_pb2 as sdms class API: """ @@ -168,7 +166,7 @@ def generateCredentials(self): ------ Exception: On communication or server error """ - msg = auth.GenerateCredentialsRequest() + msg = sdms.GenerateCredentialsRequest() return self._mapi.sendRecv(msg) @@ -236,7 +234,7 @@ def repoCreate( ------ Exception : On communication or server error """ - msg = auth.RepoCreateRequest() + msg = sdms.RepoCreateRequest() msg.id = repo_id msg.title = title msg.desc = desc @@ -260,7 +258,7 @@ def repoList(self, list_all: bool = False): By default will only list the repos associated with the user. """ - msg = auth.RepoListRequest() + msg = sdms.RepoListRequest() msg.all = list_all return self._mapi.sendRecv(msg) @@ -281,7 +279,7 @@ def repoDelete(self, repo_id): ------ Exception : On communication or server error """ - msg = auth.RepoDeleteRequest() + msg = sdms.RepoDeleteRequest() msg.id = repo_id return self._mapi.sendRecv(msg) @@ -289,7 +287,7 @@ def repoAllocationCreate(self, repo_id, subject, data_limit, rec_limit): if not repo_id.startswith("repo/"): repo_id = "repo/" + repo_id - msg = auth.RepoAllocationCreateRequest() + msg = sdms.RepoAllocationCreateRequest() msg.repo = repo_id msg.subject = subject msg.data_limit = data_limit @@ -300,14 +298,14 @@ def repoListAllocations(self, repo_id): if not repo_id.startswith("repo/"): repo_id = "repo/" + repo_id - msg = auth.RepoListAllocationsRequest() + msg = sdms.RepoListAllocationsRequest() msg.id = repo_id return self._mapi.sendRecv(msg) def repoAllocationDelete(self, repo_id, subject): if not repo_id.startswith("repo/"): repo_id = "repo/" + repo_id - msg = auth.RepoAllocationDeleteRequest() + msg = sdms.RepoAllocationDeleteRequest() msg.repo = repo_id msg.subject = subject return self._mapi.sendRecv(msg) @@ -341,7 +339,7 @@ def dataView(self, data_id, details=False, context=None): ------ Exception : On communication or server error """ - msg = auth.RecordViewRequest() + msg = sdms.RecordViewRequest() msg.id = self._resolve_id(data_id, context) msg.details = details @@ -436,7 +434,7 @@ def dataCreate( if metadata and metadata_file: raise Exception("Cannot specify both metadata and metadata-file options.") - msg = auth.RecordCreateRequest() + msg = sdms.RecordCreateRequest() msg.title = title msg.parent_id = self._resolve_id(parent_id, context) @@ -579,7 +577,7 @@ def dataUpdate( if metadata and metadata_file: raise Exception("Cannot specify both metadata and metadata-file options.") - msg = auth.RecordUpdateRequest() + msg = sdms.RecordUpdateRequest() msg.id = self._resolve_id(data_id, context) if title is not None: @@ -673,7 +671,7 @@ def dataDelete(self, data_id, context=None): ------ Exception : On invalid options or communication / server error """ - msg = auth.RecordDeleteRequest() + msg = sdms.RecordDeleteRequest() if isinstance(data_id, list): for i in data_id: @@ -740,7 +738,7 @@ def dataGet( # Request server to map specified IDs into a list of specific record IDs. # This accounts for download of collections. - msg = auth.DataGetRequest() + msg = sdms.DataGetRequest() msg.check = True if isinstance(item_id, str): @@ -761,7 +759,7 @@ def dataGet( if len(glob_ids) > 0: # Globus transfers - msg = auth.DataGetRequest() + msg = sdms.DataGetRequest() msg.id.extend(glob_ids) msg.path = self._resolvePathForGlobus(path, False) msg.encrypt = encrypt @@ -770,7 +768,7 @@ def dataGet( reply = self._mapi.sendRecv(msg) if reply[0].task and wait: - msg2 = auth.TaskViewRequest() + msg2 = sdms.TaskViewRequest() msg2.task_id = reply[0].task.id elapsed = 0 @@ -849,7 +847,7 @@ def dataPut( ------ Exception : On invalid options or communication / server error. """ - msg = auth.DataPutRequest() + msg = sdms.DataPutRequest() msg.id = self._resolve_id(data_id, context) msg.path = self._resolvePathForGlobus(path, False) msg.encrypt = encrypt @@ -859,7 +857,7 @@ def dataPut( reply = self._mapi.sendRecv(msg) if (reply[0].HasField("task")) and wait: - msg2 = auth.TaskViewRequest() + msg2 = sdms.TaskViewRequest() msg2.task_id = reply[0].task.id elapsed = 0 @@ -944,7 +942,7 @@ def dataBatchCreate(self, file, coll_id=None, context=None): payload.extend(records) - msg = auth.RecordCreateBatchRequest() + msg = sdms.RecordCreateBatchRequest() msg.records = jsonlib.dumps(payload) return self._mapi.sendRecv(msg) @@ -998,7 +996,7 @@ def dataBatchUpdate(self, file): else: payload.extend(records) - msg = auth.RecordUpdateBatchRequest() + msg = sdms.RecordUpdateBatchRequest() msg.records = jsonlib.dumps(payload) return self._mapi.sendRecv(msg) @@ -1029,7 +1027,7 @@ def collectionView(self, coll_id, context=None): ------ Exception : On invalid options or communication / server error. """ - msg = auth.CollViewRequest() + msg = sdms.CollViewRequest() msg.id = self._resolve_id(coll_id, context) # msg.id = self._resolve_coll_id( coll_id, context ) @@ -1083,7 +1081,7 @@ def collectionCreate( ------ Exception : On communication or server error """ - msg = auth.CollCreateRequest() + msg = sdms.CollCreateRequest() msg.title = title if alias: @@ -1149,7 +1147,7 @@ def collectionUpdate( ------ Exception : On communication or server error """ - msg = auth.CollUpdateRequest() + msg = sdms.CollUpdateRequest() msg.id = self._resolve_id(coll_id, context) if title is not None: @@ -1197,7 +1195,7 @@ def collectionDelete(self, coll_id, context=None): ------ Exception : On communication or server error """ - msg = auth.CollDeleteRequest() + msg = sdms.CollDeleteRequest() if isinstance(coll_id, list): for i in coll_id: @@ -1234,7 +1232,7 @@ def collectionItemsList(self, coll_id, offset=0, count=20, context=None): Exception : On communication or server error Exception : On invalid options """ - msg = auth.CollReadRequest() + msg = sdms.CollReadRequest() msg.count = count msg.offset = offset msg.id = self._resolve_id(coll_id, context) @@ -1276,7 +1274,7 @@ def collectionItemsUpdate(self, coll_id, add_ids=None, rem_ids=None, context=Non Exception : On communication or server error Exception : On invalid options """ - msg = auth.CollWriteRequest() + msg = sdms.CollWriteRequest() msg.id = self._resolve_id(coll_id, context) if isinstance(add_ids, list): @@ -1317,7 +1315,7 @@ def collectionGetParents(self, coll_id, inclusive=False, context=None): Exception : On communication or server error Exception : On invalid options """ - msg = auth.CollGetParentsRequest() + msg = sdms.CollGetParentsRequest() msg.id = self._resolve_id(coll_id, context) msg.inclusive = inclusive @@ -1348,7 +1346,7 @@ def queryList(self, offset=0, count=20): Exception : On communication or server error Exception : On invalid options """ - msg = auth.QueryListRequest() + msg = sdms.QueryListRequest() msg.offset = offset msg.count = count @@ -1371,7 +1369,7 @@ def queryView(self, query_id): ------ Exception : On communication or server error """ - msg = auth.QueryViewRequest() + msg = sdms.QueryViewRequest() msg.id = query_id return self._mapi.sendRecv(msg) @@ -1422,7 +1420,7 @@ def queryCreate( Exception : On communication or server error Exception : On invalid options """ - msg = auth.QueryCreateRequest() + msg = sdms.QueryCreateRequest() msg.title = title self._buildSearchRequest( @@ -1487,7 +1485,7 @@ def queryUpdate( Exception : On invalid options """ - msg = auth.QueryUpdateRequest() + msg = sdms.QueryUpdateRequest() msg.id = query_id if title is not None: @@ -1532,7 +1530,7 @@ def queryDelete(self, query_id): ------ Exception : On communication or server error """ - msg = auth.QueryDeleteRequest() + msg = sdms.QueryDeleteRequest() msg.id.append(query_id) return self._mapi.sendRecv(msg) @@ -1560,7 +1558,7 @@ def queryExec(self, query_id, offset=0, count=20): Exception : On communication or server error Exception : On invalid options """ - msg = auth.QueryExecRequest() + msg = sdms.QueryExecRequest() msg.id = query_id msg.offset = offset msg.count = count @@ -1612,7 +1610,7 @@ def queryDirect( Exception : On communication or server error Exception : On invalid options """ - msg = auth.SearchRequest() + msg = sdms.SearchRequest() self._buildSearchRequest( msg, @@ -1776,7 +1774,7 @@ def userListCollaborators(self, offset=0, count=20): Exception : On communication or server error Exception : On invalid options """ - msg = auth.UserListCollabRequest() + msg = sdms.UserListCollabRequest() msg.offset = offset msg.count = count @@ -1802,7 +1800,7 @@ def userListAll(self, offset=0, count=20): Exception : On communication or server error Exception : On invalid options """ - msg = auth.UserListAllRequest() + msg = sdms.UserListAllRequest() msg.offset = offset msg.count = count @@ -1826,7 +1824,7 @@ def userView(self, uid): Exception : On communication or server error Exception : On invalid options """ - msg = auth.UserViewRequest() + msg = sdms.UserViewRequest() msg.uid = uid return self._mapi.sendRecv(msg) @@ -1868,7 +1866,7 @@ def projectList(self, owned=True, admin=True, member=True, offset=0, count=20): Exception : On communication or server error Exception : On invalid options """ - msg = auth.ProjectListRequest() + msg = sdms.ProjectListRequest() msg.as_owner = owned msg.as_admin = admin msg.as_member = member @@ -1895,7 +1893,7 @@ def projectView(self, project_id): Exception : On communication or server error Exception : On invalid options """ - msg = auth.ProjectViewRequest() + msg = sdms.ProjectViewRequest() msg.id = project_id return self._mapi.sendRecv(msg) @@ -1918,7 +1916,7 @@ def projectGetRole(self, project_id): Exception : On communication or server error Exception : On invalid options """ - msg = auth.ProjectGetRoleRequest() + msg = sdms.ProjectGetRoleRequest() msg.id = project_id reply = self._mapi.sendRecv(msg) @@ -1951,7 +1949,7 @@ def sharedList(self, inc_users=None, inc_projects=None, subject=None): Exception : On communication or server error Exception : On invalid options """ - msg = auth.ACLSharedListRequest() + msg = sdms.ACLSharedListRequest() if inc_users is not None: msg.inc_users = inc_users @@ -1981,7 +1979,7 @@ def sharedUsersList( self ): Exception : On communication or server error Exception : On invalid options """ - msg = auth.ACLByUserRequest() + msg = sdms.ACLByUserRequest() return self._mapi.sendRecv( msg ) @@ -1999,7 +1997,7 @@ def sharedProjectsList( self ): Exception : On communication or server error Exception : On invalid options """ - msg = auth.ACLByProjRequest() + msg = sdms.ACLByProjRequest() return self._mapi.sendRecv( msg ) ''' @@ -2031,7 +2029,7 @@ def sharedListItems(self, owner_id, context=None, offset=None, count=None): """ # TODO add support for offset & count - msg = auth.ACLSharedListItemsRequest() + msg = sdms.ACLSharedListItemsRequest() msg.owner = owner_id.lower() if context is not None: msg.subject = context.lower() @@ -2081,7 +2079,7 @@ def taskList( if since is not None and (time_from is not None or time_to is not None): raise Exception("Cannot specify 'since' and 'from'/'to' ranges.") - msg = auth.TaskListRequest() + msg = sdms.TaskListRequest() if time_from is not None: ts = self.strToTimestamp(time_from) @@ -2191,12 +2189,12 @@ def taskView(self, task_id=None): Exception : On invalid options """ if task_id: - msg = auth.TaskViewRequest() + msg = sdms.TaskViewRequest() msg.task_id = task_id reply = self._mapi.sendRecv(msg) else: - msg = auth.TaskListRequest() + msg = sdms.TaskListRequest() msg.offset = 0 msg.count = 1 @@ -2221,7 +2219,7 @@ def endpointListRecent(self): ------ Exception : On communication or server error """ - msg = auth.UserGetRecentEPRequest() + msg = sdms.UserGetRecentEPRequest() return self._mapi.sendRecv(msg) @@ -2305,7 +2303,7 @@ def setupCredentials(self): "Client configuration directory and/or client key files not configured" ) - msg = auth.GenerateCredentialsRequest() + msg = sdms.GenerateCredentialsRequest() reply = self._mapi.sendRecv(msg) @@ -2352,7 +2350,7 @@ def setContext(self, item_id=None): id2 = item_id if id2[0:2] == "p/": - msg = auth.ProjectViewRequest() + msg = sdms.ProjectViewRequest() msg.id = id2 else: if id2[0:2] != "u/": @@ -2364,7 +2362,7 @@ def setContext(self, item_id=None): ) id2 = "u/" + id2 - msg = auth.UserViewRequest() + msg = sdms.UserViewRequest() msg.uid = id2 # Don't need reply - just using to throw an except if id/uid is diff --git a/python/datafed_pkg/datafed/Connection.py b/python/datafed_pkg/datafed/Connection.py index d34dd1e1c..87546f528 100644 --- a/python/datafed_pkg/datafed/Connection.py +++ b/python/datafed_pkg/datafed/Connection.py @@ -6,13 +6,11 @@ # unserialized, and custom framing is generated to efficiently convey message # type, size, and a re-association context value. # -# The Google protobuf library does not provide a mechanism for identifying -# message types numerically (only by string), so a build-time custom tool -# (pyproto_add_msg_idx.py) is used to generate the mappings from message -# names to message index (and vice versa) and appends this information as -# dictionaries to the compiled proto files (xxxx_pb2.py). The -# registerProtocol() method then loads uses this information to create -# consistent message type framing for python send/recv methods. +# Message type identification is derived at runtime from the Envelope proto +# message's field descriptors. Each message type has a stable field number +# in the Envelope, which serves as its wire-format type ID. This replaces +# the previous build-time pyproto_add_msg_idx.py hack that assigned type +# IDs based on message declaration order within proto files. from google.protobuf.message_factory import GetMessageClass import logging @@ -69,6 +67,10 @@ def __init__( self._msg_desc_by_type = {} self._msg_desc_by_name = {} self._msg_type_by_desc = {} + self._field_by_msg_desc = {} + + self._envelope_class = None + self._envelope_desc = None self._address = "tcp://{0}:{1}".format(server_host, server_port) # init zeromq @@ -116,19 +118,65 @@ def __del__(self): self._zmq_ctxt.destroy() ## - # @brief Register a protobuf module + # @brief Register message types from the Envelope proto message + # + # This method derives message type mappings at runtime by inspecting the + # Envelope message's field descriptors. Each field in the Envelope that + # wraps a message type has a stable field number, which becomes the + # message type ID used in wire framing. This replaces the old + # registerProtocol() approach that relied on build-time generated + # _msg_name_to_type / _msg_type_to_name dicts. + # + # @param envelope_module - The compiled envelope_pb2 module + # @param envelope_class_name - Name of the envelope message (default: "Envelope") + # + def registerEnvelope(self, envelope_module, envelope_class_name="Envelope"): + envelope_class = getattr(envelope_module, envelope_class_name) + envelope_desc = envelope_class.DESCRIPTOR + + # Store for envelope wrapping/unwrapping + self._envelope_class = envelope_class + self._envelope_desc = envelope_desc + + for field in envelope_desc.fields: + if field.message_type is None: + # Skip non-message fields (e.g. scalars) if any exist + continue + + msg_type = field.number + desc = field.message_type + + self._msg_desc_by_type[msg_type] = desc + self._msg_desc_by_name[desc.name] = desc + self._msg_type_by_desc[desc] = msg_type + self._field_by_msg_desc[desc] = field + + self._logger.debug( + "Registered %d message types from %s", + len(self._msg_desc_by_type), + envelope_class_name, + ) + + ## + # @brief Register a protobuf module (DEPRECATED - use registerEnvelope) # # This method registers an imported protobuf module (_pb2 file) for use # with the Connection class. Registration is required for proper message # framing and serialization. # + # This relies on build-time generated _msg_name_to_type dicts appended + # to _pb2 files by pyproto_add_msg_idx.py. Prefer registerEnvelope() + # which derives mappings from envelope field numbers at runtime. + # # @param msg_module - Protobuf module (imported *_pb2 module) # def registerProtocol(self, msg_module): - # Message descriptors are stored by name created by protobuf compiler - # A custom post-proc tool generates and appends _msg_name_to_type with - # defined DataFed-sepcific numer message types - + import warnings + warnings.warn( + "registerProtocol() is deprecated, use registerEnvelope() instead", + DeprecationWarning, + stacklevel=2, + ) for name, desc in sorted(msg_module.DESCRIPTOR.message_types_by_name.items()): msg_t = msg_module._msg_name_to_type[name] self._msg_desc_by_type[msg_t] = desc @@ -138,15 +186,15 @@ def registerProtocol(self, msg_module): ## # @brief Receive a message # - # Receive a protobuf message with timeout. This method automatically - # parses and creates a new protobuf message class based on received - # framing. The new message object, the message name (defined in the - # associated proto file), and re-association context are returned as - # a tuple. On timeout, (None,None,None) is returned. + # Receive a protobuf message with timeout. The wire payload is an + # Envelope message; this method deserializes the Envelope and extracts + # the inner message via the oneof payload field. The inner message + # object, its name, and re-association context are returned as a tuple. + # On timeout, (None, None, None) is returned. # # @param timeout - Timeout in milliseconds - # @return Tuple of message, message type, and re-association context - # @retval (object,str,int) or (None,None,None) on timeout + # @return Tuple of message, message name, and re-association context + # @retval (object, str, int) or (None, None, None) on timeout # @exception Exception: if unregistered message type is received. # def recv(self, a_timeout=1000): @@ -180,38 +228,46 @@ def recv(self, a_timeout=1000): # client self._socket.recv_string(0) - # receive custom frame header and unpack + # Receive frame: 8 bytes = uint32 size + uint16 msg_type + uint16 context frame_data = self._socket.recv(0) - frame_values = struct.unpack(">LBBH", frame_data) - msg_type = (frame_values[1] << 8) | frame_values[2] - - # find message descriptor based on type (descriptor index) + frame_values = struct.unpack(">LHH", frame_data) + body_size = frame_values[0] + msg_type = frame_values[1] + ctxt = frame_values[2] - if not (msg_type in self._msg_desc_by_type): + if msg_type not in self._msg_desc_by_type: raise Exception( "received unregistered message type: {}".format(msg_type) ) - desc = self._msg_desc_by_type[msg_type] + data = self._socket.recv(0) - if frame_values[0] > 0: - # Create message by parsing content - data = self._socket.recv(0) - reply = GetMessageClass(desc)() - reply.ParseFromString(data) + if body_size > 0: + # Deserialize as Envelope + envelope = self._envelope_class() + envelope.ParseFromString(data) + + # Extract inner message from the oneof + payload_field = envelope.WhichOneof("payload") + if payload_field is None: + raise Exception("Received Envelope with no payload set") + reply = getattr(envelope, payload_field) else: - # No content, just create message instance - data = self._socket.recv(0) + # Zero-size body: create empty message instance from type + desc = self._msg_desc_by_type[msg_type] reply = GetMessageClass(desc)() - return reply, desc.name, frame_values[3] + return reply, reply.DESCRIPTOR.name, ctxt else: return None, None, None ## # @brief Send a message # - # Serializes and sends framing and message payload over connection. + # Wraps the inner message in an Envelope, serializes it, and sends + # framing and payload over the connection. The frame header carries the + # message type (Envelope field number) for efficient routing on the + # server side. # # @param message - The protobuf message object to be sent # @param ctxt - Reply re-association value (int) @@ -219,9 +275,15 @@ def recv(self, a_timeout=1000): # def send(self, message, ctxt): # Find msg type by descriptor look-up - if not (message.DESCRIPTOR in self._msg_type_by_desc): + if message.DESCRIPTOR not in self._msg_type_by_desc: raise Exception("Attempt to send unregistered message type.") + msg_type = self._msg_type_by_desc[message.DESCRIPTOR] + field = self._field_by_msg_desc[message.DESCRIPTOR] + + # Wrap inner message in Envelope + envelope = self._envelope_class() + getattr(envelope, field.name).CopyFrom(message) # Initial Null frame self._socket.send_string("BEGIN_DATAFED", zmq.SNDMORE) @@ -235,12 +297,12 @@ def send(self, message, ctxt): self._socket.send_string(self._pub_key, zmq.SNDMORE) self._socket.send_string("no_user", zmq.SNDMORE) - # Serialize - data = message.SerializeToString() + # Serialize the Envelope (not the inner message) + data = envelope.SerializeToString() data_sz = len(data) - # Build the message frame, to match C-struct MessageFrame - frame = struct.pack(">LBBH", data_sz, msg_type >> 8, msg_type & 0xFF, ctxt) + # Build the message frame: uint32 size + uint16 msg_type + uint16 context + frame = struct.pack(">LHH", data_sz, msg_type, ctxt) if data_sz > 0: # Send frame and payload diff --git a/python/datafed_pkg/datafed/MessageLib.py b/python/datafed_pkg/datafed/MessageLib.py index 62354c0eb..f0964e7d7 100644 --- a/python/datafed_pkg/datafed/MessageLib.py +++ b/python/datafed_pkg/datafed/MessageLib.py @@ -12,9 +12,7 @@ import zmq -from . import Version_pb2 -from . import SDMS_Anon_pb2 as anon -from . import SDMS_Auth_pb2 as auth +from . import envelope_pb2 as proto from . import Connection from . import VERSION @@ -166,8 +164,7 @@ def __init__( server_host, server_port, _server_pub_key, _client_pub_key, _client_priv_key ) - self._conn.registerProtocol(anon) - self._conn.registerProtocol(auth) + self._conn.registerEnvelope(proto) # Make a request to pypi package_name = "datafed" # Replace with the package name you want to check @@ -191,14 +188,14 @@ def __init__( self.new_client_avail = latest_version_on_pypi # Check for compatible protocol versions - reply, mt = self.sendRecv(anon.VersionRequest(), 10000) + reply, mt = self.sendRecv(proto.VersionRequest(), 10000) if reply is None: raise Exception( "Timeout waiting for server connection. Make sure" "the right ports are open." ) - if reply.api_major != Version_pb2.DATAFED_COMMON_PROTOCOL_API_MAJOR: + if reply.api_major != VERSION.DATAFED_COMMON_PROTOCOL_API_MAJOR: error_msg = ( "Incompatible server api detected {}.{}.{}, you are running " "{}.{}.{} consider " @@ -206,9 +203,9 @@ def __init__( reply.api_major, reply.api_minor, reply.api_patch, - Version_pb2.DATAFED_COMMON_PROTOCOL_API_MAJOR, - Version_pb2.DATAFED_COMMON_PROTOCOL_API_MINOR, - Version_pb2.DATAFED_COMMON_PROTOCOL_API_PATCH, + VERSION.DATAFED_COMMON_PROTOCOL_API_MAJOR, + VERSION.DATAFED_COMMON_PROTOCOL_API_MINOR, + VERSION.DATAFED_COMMON_PROTOCOL_API_PATCH, ) ) if self.new_client_avail: @@ -223,7 +220,7 @@ def __init__( self.manualAuthByToken(client_token) else: # Check if server authenticated based on keys - reply, mt = self.sendRecv(anon.GetAuthStatusRequest(), 10000) + reply, mt = self.sendRecv(proto.GetAuthStatusRequest(), 10000) self._auth = reply.auth self._uid = reply.uid @@ -263,7 +260,7 @@ def getAuthStatus(self): # @exception Exception: On communication timeout or authentication failure. # def manualAuthByPassword(self, uid, password): - msg = anon.AuthenticateByPasswordRequest() + msg = proto.AuthenticateByPasswordRequest() msg.uid = uid msg.password = password a, b = self.sendRecv(msg) @@ -272,7 +269,7 @@ def manualAuthByPassword(self, uid, password): self._conn.reset() # Test auth status - reply, mt = self.sendRecv(anon.GetAuthStatusRequest()) + reply, mt = self.sendRecv(proto.GetAuthStatusRequest()) if not reply.auth: raise Exception("Password authentication failed.") @@ -280,7 +277,7 @@ def manualAuthByPassword(self, uid, password): self._uid = reply.uid def manualAuthByToken(self, token): - msg = anon.AuthenticateByTokenRequest() + msg = proto.AuthenticateByTokenRequest() msg.token = token self.sendRecv(msg) @@ -288,7 +285,7 @@ def manualAuthByToken(self, token): self._conn.reset() # Test auth status - reply, mt = self.sendRecv(anon.GetAuthStatusRequest()) + reply, mt = self.sendRecv(proto.GetAuthStatusRequest()) if not reply.auth: raise Exception("Token authentication failed") @@ -332,7 +329,7 @@ def getDefaultTimeout(self): def getDailyMessage(self): # Get daily message, if set - reply, mt = self.sendRecv(anon.DailyMessageRequest(), 10000) + reply, mt = self.sendRecv(proto.DailyMessageRequest(), 10000) if reply is None: raise Exception("Timeout waiting for server connection.") diff --git a/python/datafed_pkg/datafed/VERSION.py.in b/python/datafed_pkg/datafed/VERSION.py.in index fc9c6a3b3..03ae7c5f5 100644 --- a/python/datafed_pkg/datafed/VERSION.py.in +++ b/python/datafed_pkg/datafed/VERSION.py.in @@ -1 +1,9 @@ __version__="@DATAFED_PYTHON_CLIENT_MAJOR@.@DATAFED_PYTHON_CLIENT_MINOR@.@DATAFED_PYTHON_CLIENT_PATCH@@DATAFED_PYTHON_CLIENT_RELEASE_TYPE@@DATAFED_PYTHON_CLIENT_PRE_RELEASE_IDENTIFER@" +DATAFED_COMMON_PROTOCOL_API_MAJOR=@DATAFED_COMMON_PROTOCOL_API_MAJOR@ +DATAFED_COMMON_PROTOCOL_API_MINOR=@DATAFED_COMMON_PROTOCOL_API_MINOR@ +DATAFED_COMMON_PROTOCOL_API_PATCH=@DATAFED_COMMON_PROTOCOL_API_PATCH@ +DATAFED_RELEASE_YEAR=@DATAFED_RELEASE_YEAR@ +DATAFED_RELEASE_MONTH=@DATAFED_RELEASE_MONTH@ +DATAFED_RELEASE_DAY=@DATAFED_RELEASE_DAY@ +DATAFED_RELEASE_HOUR=@DATAFED_RELEASE_HOUR@ +DATAFED_RELEASE_MINUTE=@DATAFED_RELEASE_MINUTE@ diff --git a/python/datafed_pkg/scripts/fix_proto_imports.sh b/python/datafed_pkg/scripts/fix_proto_imports.sh new file mode 100755 index 000000000..1894efa0a --- /dev/null +++ b/python/datafed_pkg/scripts/fix_proto_imports.sh @@ -0,0 +1,111 @@ +#!/bin/sh +set -e + +# What this script does +# +# protoc --python_out +# +# generates _pb2.py files with absolute imports based on the proto import +# paths. For example, if envelope.proto imports anon/auth_by_token.proto, the +# generated envelope_pb2.py will contain: +# +# python from anon import auth_by_token_pb2 +# + +# This works if you run Python from the exact output directory, but breaks when +# the generated code is consumed as a Python package (which is how DataFed uses +# it). Python's package system requires relative imports for intra-package +# references: +# +# File at package level +# +# from .anon import auth_by_token_pb2 +# +# File at root +# +# from ..anon import auth_by_token_pb2 +# +# file in a subdirectory protoc +# has no option to emit relative imports. This is a well-known, long-standing +# limitation (protocolbuffers/protobuf#1491). The script does three things: +# +# 1. Rewrites imports to be relative. It finds every _pb2.py file, determines +# whether it lives at the package root or in a subdirectory (e.g., anon/, +# auth/), and rewrites bare absolute imports (from anon import ...) to the +# correct relative form (.anon for root-level files, ..anon for files one level +# deep). +# 2. Creates __init__.py files in each subdirectory (anon/, auth/, enums/, +# messages/) so Python recognizes them as subpackages. Appends re-exports to +# envelope_pb2.py for backward compatibility. The existing Python client +# (Connection.py) uses getattr(envelope_module, ClassName) to dynamically look +# up message classes by name on the envelope module. +# +# Under the old single-file +# proto2 layout, all message classes lived directly in envelope_pb2.py. Now +# that messages are split across subpackages, this dynamic lookup would break. +# The wildcard re-exports (from .anon.auth_by_token_pb2 import *, etc.) restore +# the flat namespace on envelope_pb2 so existing code continues to work without +# modification. + +PROTO_DIR="$1" +ROOT_DIR="${2:-$1}" + +if [ -z "$PROTO_DIR" ]; then + echo "Usage: fix_proto_imports.sh [root_dir]" + echo " proto_output_dir: directory to find and fix _pb2.py files" + echo " root_dir: package root for computing relative depth (defaults to proto_output_dir)" + exit 1 +fi + +find "$PROTO_DIR" -name '*_pb2.py' | while read f; do + relpath=$(realpath --relative-to="$ROOT_DIR" "$f") + case "$relpath" in + */*) + sed -i \ + -e 's:^from anon import:from ..anon import:g' \ + -e 's:^from anon\.:from ..anon.:g' \ + -e 's:^from auth import:from ..auth import:g' \ + -e 's:^from auth\.:from ..auth.:g' \ + -e 's:^from enums import:from ..enums import:g' \ + -e 's:^from enums\.:from ..enums.:g' \ + -e 's:^from messages import:from ..messages import:g' \ + -e 's:^from messages\.:from ..messages.:g' \ + -e 's:^import \(.*_pb2\):from . import \1:g' \ + "$f" + ;; + *) + sed -i \ + -e 's:^from anon import:from .anon import:g' \ + -e 's:^from anon\.:from .anon.:g' \ + -e 's:^from auth import:from .auth import:g' \ + -e 's:^from auth\.:from .auth.:g' \ + -e 's:^from enums import:from .enums import:g' \ + -e 's:^from enums\.:from .enums.:g' \ + -e 's:^from messages import:from .messages import:g' \ + -e 's:^from messages\.:from .messages.:g' \ + -e 's:^import \(.*_pb2\):from . import \1:g' \ + "$f" + ;; + esac +done + +for subdir in anon auth enums messages; do + if [ -d "$ROOT_DIR/$subdir" ]; then + touch "$ROOT_DIR/$subdir/__init__.py" + fi +done + +# Append re-exports to envelope_pb2.py for backward compatibility +# Connection.py uses getattr(envelope_module, class_name) for dynamic dispatch +echo "" >>"$ROOT_DIR/envelope_pb2.py" +echo "# Re-export all message and enum classes for dynamic lookup" >>"$ROOT_DIR/envelope_pb2.py" + +for subdir in anon auth enums messages; do + if [ -d "$ROOT_DIR/$subdir" ]; then + for f in "$ROOT_DIR/$subdir"/*_pb2.py; do + [ -f "$f" ] || continue + module=$(basename "$f" .py) + echo "from .$subdir.$module import *" >>"$ROOT_DIR/envelope_pb2.py" + done + fi +done diff --git a/python/datafed_pkg/setup.py b/python/datafed_pkg/setup.py index 21abefdcc..55ebd76d9 100644 --- a/python/datafed_pkg/setup.py +++ b/python/datafed_pkg/setup.py @@ -22,6 +22,13 @@ long_description_content_type="text/markdown", url="https://github.com/ORNL/DataFed", packages=setuptools.find_packages(), + package_data={ + "datafed": ["*.py"], + "datafed.anon": ["*.py"], + "datafed.auth": ["*.py"], + "datafed.enums": ["*.py"], + "datafed.messages": ["*.py"], + }, setup_requires=["setuptools"], install_requires=install_requires, entry_points={"console_scripts": ["datafed = datafed.CLI:run"]}, @@ -31,3 +38,5 @@ "Operating System :: OS Independent", ], ) + + diff --git a/python/datafed_pkg/test/security.py b/python/datafed_pkg/test/security.py index d9b479c1a..a8e42c027 100755 --- a/python/datafed_pkg/test/security.py +++ b/python/datafed_pkg/test/security.py @@ -2,7 +2,7 @@ import getpass import datafed.CommandLib -import datafed.SDMS_Auth_pb2 as auth +import datafed.envelope_pb2 as sdms opts = {} @@ -15,7 +15,7 @@ api.loginByPassword(uid, password) -msg = auth.UserCreateRequest() +msg = sdms.UserCreateRequest() msg.uid = "newuser" msg.password = "temptemp" msg.name = "New User" diff --git a/python/pyproto_add_msg_idx.py b/python/pyproto_add_msg_idx.py deleted file mode 100755 index 233f83b33..000000000 --- a/python/pyproto_add_msg_idx.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python3 - -""" -Protobuf processing to generate message ID maps for C++, Python, and JS -""" - -import sys -import re - -print("args", sys.argv) - -pf_in = open(sys.argv[1], "r") -pf_out = open(sys.argv[2], "a") - -while True: - line = pf_in.readline() - if len(line) == 0: - sys.exit(-1) - parts = re.split(r"\W+", line.strip()) - # print( line, parts ) - try: - idx = parts.index("ID") - # print( "ID:", parts[idx+1] ) - msg_type = int(parts[idx + 1]) << 8 - break - except BaseException: - pass - -# msg_type = 0 - -by_type = [] -idx = 0 - -pf_out.write("\n_msg_name_to_type = {\n") - -while True: - line = pf_in.readline() - if len(line) == 0: - break - - if line.startswith("message "): - msg_name = line.split()[1] - by_type.append(msg_name) - # print( msg_name, msg_type ) - if idx > 0: - pf_out.write(",\n") - pf_out.write(" '{}' : {}".format(msg_name, msg_type | idx)) - idx += 1 - -pf_out.write("\n}\n\n_msg_type_to_name = {\n") - -idx = 0 -for name in by_type: - if idx > 0: - pf_out.write(",\n") - pf_out.write(" {} : '{}'".format(msg_type | idx, name)) - idx += 1 - -pf_out.write("\n}\n") - -sys.exit(0) diff --git a/repository/gridftp/globus5/authz/source/AuthzWorker.cpp b/repository/gridftp/globus5/authz/source/AuthzWorker.cpp index 234c1e166..9050bb45b 100644 --- a/repository/gridftp/globus5/authz/source/AuthzWorker.cpp +++ b/repository/gridftp/globus5/authz/source/AuthzWorker.cpp @@ -14,11 +14,8 @@ #include "common/TraceException.hpp" #include "common/Util.hpp" -// Protobuf includes -#include "common/SDMS.pb.h" -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" -#include "common/Version.pb.h" +// Proto files +#include "common/envelope.pb.h" // Standard includes #include @@ -28,9 +25,8 @@ #include #include +using namespace SDMS; using namespace std; -using namespace SDMS::Anon; -using namespace SDMS::Auth; namespace { @@ -507,7 +503,7 @@ int AuthzWorker::processResponse(ICommunicator::Response &response) { auto payload = std::get(response.message->getPayload()); - Anon::NackReply *nack = dynamic_cast(payload); + NackReply *nack = dynamic_cast(payload); if (!nack) { return 0; } else { @@ -581,7 +577,7 @@ int AuthzWorker::checkAuth(char *client_id, char *path, char *action) { return 0; } - auto auth_req = std::make_unique(); + auto auth_req = std::make_unique(); auth_req->set_repo(m_config->repo_id); auth_req->set_client(client_id); @@ -619,19 +615,19 @@ const char *getVersion() { const char *getAPIVersion() { static std::string ver_str = - std::to_string(DATAFED_COMMON_PROTOCOL_API_MAJOR) + "." + - std::to_string(DATAFED_COMMON_PROTOCOL_API_MINOR) + "." + - std::to_string(DATAFED_COMMON_PROTOCOL_API_PATCH); + std::to_string(protocol::version::MAJOR) + "." + + std::to_string(protocol::version::MINOR) + "." + + std::to_string(protocol::version::PATCH); return ver_str.c_str(); } const char *getReleaseVersion() { - static std::string ver_str = std::to_string(DATAFED_RELEASE_YEAR) + "." + - std::to_string(DATAFED_RELEASE_MONTH) + "." + - std::to_string(DATAFED_RELEASE_DAY) + "." + - std::to_string(DATAFED_RELEASE_HOUR) + "." + - std::to_string(DATAFED_RELEASE_MINUTE); + static std::string ver_str = std::to_string(release::YEAR) + "." + + std::to_string(release::MONTH) + "." + + std::to_string(release::DAY) + "." + + std::to_string(release::HOUR) + "." + + std::to_string(release::MINUTE); return ver_str.c_str(); } diff --git a/repository/gridftp/globus5/authz/source/Version.hpp.in b/repository/gridftp/globus5/authz/source/Version.hpp.in index cdd5e35e0..f698e5b7a 100644 --- a/repository/gridftp/globus5/authz/source/Version.hpp.in +++ b/repository/gridftp/globus5/authz/source/Version.hpp.in @@ -10,6 +10,23 @@ namespace SDMS { constexpr int PATCH = @DATAFED_AUTHZ_PATCH@; } } + + namespace protocol { + namespace version { + constexpr int MAJOR = @DATAFED_COMMON_PROTOCOL_API_MAJOR@; + constexpr int MINOR = @DATAFED_COMMON_PROTOCOL_API_MINOR@; + constexpr int PATCH = @DATAFED_COMMON_PROTOCOL_API_PATCH@; + } + } + + namespace release { + constexpr int YEAR = @DATAFED_RELEASE_YEAR@; + constexpr int MONTH = @DATAFED_RELEASE_MONTH@; + constexpr int DAY = @DATAFED_RELEASE_DAY@; + constexpr int HOUR = @DATAFED_RELEASE_HOUR@; + constexpr int MINUTE = @DATAFED_RELEASE_MINUTE@; + } + } #endif // AUTHZ_VERSION_HPP diff --git a/repository/gridftp/globus5/authz/tests/unit/test_AuthzWorker.cpp b/repository/gridftp/globus5/authz/tests/unit/test_AuthzWorker.cpp index ba51d8d08..5b03f8466 100644 --- a/repository/gridftp/globus5/authz/tests/unit/test_AuthzWorker.cpp +++ b/repository/gridftp/globus5/authz/tests/unit/test_AuthzWorker.cpp @@ -15,7 +15,7 @@ #include "common/ICommunicator.hpp" #include "common/IMessage.hpp" #include "common/MessageFactory.hpp" -#include "common/SDMS_Anon.pb.h" +#include "common/envelope.pb.h" #include "common/TraceException.hpp" extern "C" { @@ -397,7 +397,7 @@ BOOST_AUTO_TEST_CASE(ProcessResponseWithValidMessage) { SDMS::MessageState::REQUEST); response.message->set(SDMS::constants::message::google::CONTEXT, context); auto auth_by_token_req = - std::make_unique(); + std::make_unique(); std::string token = "golden_chest"; auth_by_token_req->set_token(token); @@ -429,7 +429,7 @@ BOOST_AUTO_TEST_CASE(ProcessResponseWithNackReply) { response.message->set(SDMS::MessageAttribute::STATE, SDMS::MessageState::REQUEST); response.message->set(SDMS::constants::message::google::CONTEXT, context); - auto nack = std::make_unique(); + auto nack = std::make_unique(); response.message->setPayload(std::move(nack)); diff --git a/repository/server/Config.hpp b/repository/server/Config.hpp index 887d47996..2c954692e 100644 --- a/repository/server/Config.hpp +++ b/repository/server/Config.hpp @@ -5,11 +5,9 @@ // Common public includes #include "common/ICredentials.hpp" -// Proto includes -#include "common/SDMS.pb.h" - // Standard includes #include +#include #include #include diff --git a/repository/server/RepoServer.cpp b/repository/server/RepoServer.cpp index 36b33dbbf..915673168 100644 --- a/repository/server/RepoServer.cpp +++ b/repository/server/RepoServer.cpp @@ -15,10 +15,7 @@ #include "common/Util.hpp" // Proto includes -#include "common/SDMS.pb.h" -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" -#include "common/Version.pb.h" +#include "common/envelope.pb.h" // Standard includes #include @@ -33,8 +30,6 @@ ((_T1.tv_sec - _T0.tv_sec) + ((_T1.tv_nsec - _T0.tv_nsec) / 1.0e9)) using namespace std; -using namespace SDMS::Anon; -using namespace SDMS::Auth; namespace { std::string randomAlphaNumericCode() { @@ -212,22 +207,22 @@ void Server::checkServerVersion() { << ver_reply->api_patch() << ")"); } bool new_release_available = false; - if (ver_reply->release_year() > Version::DATAFED_RELEASE_YEAR) { + if (ver_reply->release_year() > release::YEAR) { new_release_available = true; - } else if (ver_reply->release_year() == Version::DATAFED_RELEASE_YEAR) { - if (ver_reply->release_month() > Version::DATAFED_RELEASE_MONTH) { + } else if (ver_reply->release_year() == release::YEAR) { + if (ver_reply->release_month() > release::MONTH) { new_release_available = true; } else if (ver_reply->release_month() == - Version::DATAFED_RELEASE_MONTH) { - if (ver_reply->release_day() > Version::DATAFED_RELEASE_DAY) { + release::MONTH) { + if (ver_reply->release_day() > release::DAY) { new_release_available = true; - } else if (ver_reply->release_day() == Version::DATAFED_RELEASE_DAY) { - if (ver_reply->release_hour() > Version::DATAFED_RELEASE_HOUR) { + } else if (ver_reply->release_day() == release::DAY) { + if (ver_reply->release_hour() > release::HOUR) { new_release_available = true; } else if (ver_reply->release_hour() == - Version::DATAFED_RELEASE_HOUR) { + release::HOUR) { if (ver_reply->release_minute() > - Version::DATAFED_RELEASE_MINUTE) { + release::MINUTE) { new_release_available = true; } } diff --git a/repository/server/RequestWorker.cpp b/repository/server/RequestWorker.cpp index 49bb9186e..8e3c00772 100644 --- a/repository/server/RequestWorker.cpp +++ b/repository/server/RequestWorker.cpp @@ -12,11 +12,8 @@ #include "common/TraceException.hpp" #include "common/Util.hpp" -// Proto includes -#include "common/SDMS.pb.h" -#include "common/SDMS_Anon.pb.h" -#include "common/SDMS_Auth.pb.h" -#include "common/Version.pb.h" +// Proto files +#include "common/envelope.pb.h" // Third party includes #include @@ -29,9 +26,6 @@ using namespace std; namespace SDMS { -using namespace SDMS::Anon; -using namespace SDMS::Auth; - namespace Repo { map RequestWorker::m_msg_handlers; @@ -180,9 +174,14 @@ void RequestWorker::wait() { } } -#define SET_MSG_HANDLER(proto_id, msg, func) \ - m_msg_handlers[m_msg_mapper->getMessageType(proto_id, #msg)] = func +#define SET_MSG_HANDLER(msg, func) \ + m_msg_handlers[m_msg_mapper->getMessageType(#msg)] = func +/** + * This method configures message handling by creating a map from message type + * (envelope field number) to handler function. Message types are identified + * by their field number in the Envelope proto message. + */ void RequestWorker::setupMsgHandlers() { static std::atomic_flag lock = ATOMIC_FLAG_INIT; @@ -190,22 +189,17 @@ void RequestWorker::setupMsgHandlers() { return; try { + // Anonymous interface handlers + SET_MSG_HANDLER(VersionRequest, &RequestWorker::procVersionRequest); - uint8_t proto_id = - m_msg_mapper->getProtocolID(MessageProtocol::GOOGLE_ANONONYMOUS); - - SET_MSG_HANDLER(proto_id, VersionRequest, - &RequestWorker::procVersionRequest); - - proto_id = m_msg_mapper->getProtocolID(MessageProtocol::GOOGLE_AUTHORIZED); - - SET_MSG_HANDLER(proto_id, RepoDataDeleteRequest, + // Authenticated interface handlers + SET_MSG_HANDLER(RepoDataDeleteRequest, &RequestWorker::procDataDeleteRequest); - SET_MSG_HANDLER(proto_id, RepoDataGetSizeRequest, + SET_MSG_HANDLER(RepoDataGetSizeRequest, &RequestWorker::procDataGetSizeRequest); - SET_MSG_HANDLER(proto_id, RepoPathCreateRequest, + SET_MSG_HANDLER(RepoPathCreateRequest, &RequestWorker::procPathCreateRequest); - SET_MSG_HANDLER(proto_id, RepoPathDeleteRequest, + SET_MSG_HANDLER(RepoPathDeleteRequest, &RequestWorker::procPathDeleteRequest); } catch (TraceException &e) { DL_ERROR(m_log_context, @@ -252,6 +246,8 @@ void RequestWorker::workerThread(LogContext log_context) { timeout_on_poll); }(repo_thread_id); + ProtoBufMap proto_map; + DL_TRACE(log_context, "Listening on address " << client->address()); while (m_run) { @@ -280,7 +276,8 @@ void RequestWorker::workerThread(LogContext log_context) { uint16_t msg_type = std::get( message.get(constants::message::google::MSG_TYPE)); - DL_TRACE(message_log_context, "Received msg of type: " << msg_type); + DL_TRACE(message_log_context, "Received msg of type: " + << proto_map.toString(msg_type)); if (m_msg_handlers.count(msg_type)) { map::iterator handler = @@ -348,7 +345,7 @@ void RequestWorker::workerThread(LogContext log_context) { DL_ERROR(message_log_context, "Error: " << e.what()); \ auto msg_reply = m_msg_factory.createResponseEnvelope(*msg_request); \ auto nack = std::make_unique(); \ - nack->set_err_code(ID_INTERNAL_ERROR); \ + nack->set_err_code(INTERNAL_ERROR); \ nack->set_err_msg(e.what()); \ msg_reply->setPayload(std::move(nack)); \ return msg_reply; \ @@ -358,7 +355,7 @@ void RequestWorker::workerThread(LogContext log_context) { "Error unkown exception while processing message!"); \ auto msg_reply = m_msg_factory.createResponseEnvelope(*msg_request); \ auto nack = std::make_unique(); \ - nack->set_err_code(ID_INTERNAL_ERROR); \ + nack->set_err_code(INTERNAL_ERROR); \ nack->set_err_msg("Unknown exception type"); \ msg_reply->setPayload(std::move(nack)); \ return msg_reply; \ @@ -373,7 +370,7 @@ void RequestWorker::workerThread(LogContext log_context) { "Message parse failed (malformed or unregistered msg type)."); \ auto msg_reply = m_msg_factory.createResponseEnvelope(*msg_request); \ auto nack = std::make_unique(); \ - nack->set_err_code(ID_BAD_REQUEST); \ + nack->set_err_code(BAD_REQUEST); \ nack->set_err_msg( \ "Message parse failed (malformed or unregistered msg type)"); \ msg_reply->setPayload(std::move(nack)); \ @@ -388,15 +385,15 @@ RequestWorker::procVersionRequest(std::unique_ptr &&msg_request) { DL_DEBUG(message_log_context, "Version request."); - reply.set_release_year(Version::DATAFED_RELEASE_YEAR); - reply.set_release_month(Version::DATAFED_RELEASE_MONTH); - reply.set_release_day(Version::DATAFED_RELEASE_DAY); - reply.set_release_hour(Version::DATAFED_RELEASE_HOUR); - reply.set_release_minute(Version::DATAFED_RELEASE_MINUTE); + reply.set_release_year(SDMS::release::YEAR); + reply.set_release_month(SDMS::release::MONTH); + reply.set_release_day(SDMS::release::DAY); + reply.set_release_hour(SDMS::release::HOUR); + reply.set_release_minute(SDMS::release::MINUTE); - reply.set_api_major(Version::DATAFED_COMMON_PROTOCOL_API_MAJOR); - reply.set_api_minor(Version::DATAFED_COMMON_PROTOCOL_API_MINOR); - reply.set_api_patch(Version::DATAFED_COMMON_PROTOCOL_API_PATCH); + reply.set_api_major(SDMS::protocol::version::MAJOR); + reply.set_api_minor(SDMS::protocol::version::MINOR); + reply.set_api_patch(SDMS::protocol::version::PATCH); reply.set_component_major(SDMS::repository::version::MAJOR); reply.set_component_minor(SDMS::repository::version::MINOR); @@ -407,7 +404,7 @@ RequestWorker::procVersionRequest(std::unique_ptr &&msg_request) { std::unique_ptr RequestWorker::procDataDeleteRequest(std::unique_ptr &&msg_request) { - PROC_MSG_BEGIN(Auth::RepoDataDeleteRequest, Anon::AckReply) + PROC_MSG_BEGIN(RepoDataDeleteRequest, AckReply) if (request->loc_size()) { @@ -427,7 +424,7 @@ RequestWorker::procDataDeleteRequest(std::unique_ptr &&msg_request) { std::unique_ptr RequestWorker::procDataGetSizeRequest(std::unique_ptr &&msg_request) { - PROC_MSG_BEGIN(Auth::RepoDataGetSizeRequest, Auth::RepoDataSizeReply) + PROC_MSG_BEGIN(RepoDataGetSizeRequest, RepoDataSizeReply) DL_DEBUG(message_log_context, "Data get size."); @@ -462,7 +459,7 @@ RequestWorker::procDataGetSizeRequest(std::unique_ptr &&msg_request) { std::unique_ptr RequestWorker::procPathCreateRequest(std::unique_ptr &&msg_request) { - PROC_MSG_BEGIN(Auth::RepoPathCreateRequest, Anon::AckReply) + PROC_MSG_BEGIN(RepoPathCreateRequest, AckReply) std::string local_path = createSanitizedPath(request->path()); @@ -480,7 +477,7 @@ RequestWorker::procPathCreateRequest(std::unique_ptr &&msg_request) { std::unique_ptr RequestWorker::procPathDeleteRequest(std::unique_ptr &&msg_request) { - PROC_MSG_BEGIN(Auth::RepoPathDeleteRequest, Anon::AckReply) + PROC_MSG_BEGIN(RepoPathDeleteRequest, AckReply) DL_DEBUG(message_log_context, "Relative path delete request: " << request->path()); diff --git a/repository/server/Version.hpp.in b/repository/server/Version.hpp.in index 322bb4c5c..617a414e9 100644 --- a/repository/server/Version.hpp.in +++ b/repository/server/Version.hpp.in @@ -10,6 +10,22 @@ namespace SDMS { constexpr int PATCH = @DATAFED_REPO_PATCH@; } } + + namespace protocol { + namespace version { + constexpr int MAJOR = @DATAFED_COMMON_PROTOCOL_API_MAJOR@; + constexpr int MINOR = @DATAFED_COMMON_PROTOCOL_API_MINOR@; + constexpr int PATCH = @DATAFED_COMMON_PROTOCOL_API_PATCH@; + } + } + + namespace release { + constexpr int YEAR = @DATAFED_RELEASE_YEAR@; + constexpr int MONTH = @DATAFED_RELEASE_MONTH@; + constexpr int DAY = @DATAFED_RELEASE_DAY@; + constexpr int HOUR = @DATAFED_RELEASE_HOUR@; + constexpr int MINUTE = @DATAFED_RELEASE_MINUTE@; + } } #endif // REPO_VERSION_HPP diff --git a/repository/server/main.cpp b/repository/server/main.cpp index a5c00d1f0..0e5d2e287 100644 --- a/repository/server/main.cpp +++ b/repository/server/main.cpp @@ -9,9 +9,6 @@ #include "common/TraceException.hpp" #include "common/Util.hpp" -// Protocol includes -#include "common/Version.pb.h" - // Third party includes #include @@ -78,13 +75,13 @@ int main(int a_argc, char **a_argv) { } if (opt_map.count("version")) { - cout << "Release Version: " << DATAFED_RELEASE_YEAR << "." - << DATAFED_RELEASE_MONTH << "." << DATAFED_RELEASE_DAY << "." - << DATAFED_RELEASE_HOUR << "." << DATAFED_RELEASE_MINUTE + cout << "Release Version: " << release::YEAR << "." + << release::MONTH << "." << release::DAY << "." + << release::HOUR << "." << release::MINUTE << std::endl; - cout << "Messaging API: " << DATAFED_COMMON_PROTOCOL_API_MAJOR << "." - << DATAFED_COMMON_PROTOCOL_API_MINOR << "." - << DATAFED_COMMON_PROTOCOL_API_PATCH << endl; + cout << "Messaging API: " << protocol::version::MAJOR << "." + << protocol::version::MINOR << "." + << protocol::version::PATCH << endl; cout << "Repo Server: " << repository::version::MAJOR << "." << repository::version::MINOR << "." << repository::version::PATCH << endl; diff --git a/web/datafed-ws.js b/web/datafed-ws.js index d4373c707..b52b23c31 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -15,7 +15,7 @@ if (process.argv.length != 3) { throw "Invalid arguments, usage: datafed-ws config-file"; } -import web_version from "./version.js"; +import version from "./version.js"; import express from "express"; // For REST api import session from "express-session"; import sanitizeHtml from "sanitize-html"; @@ -55,6 +55,7 @@ var g_host, g_test, g_msg_by_id = {}, g_msg_by_name = {}, + g_envelope_type, g_core_sock = zmq.socket("dealer"), g_core_serv_addr, g_globus_auth, @@ -64,7 +65,7 @@ var g_host, g_ctx_next = 0, g_client_id, g_client_secret, - g_ready_start = 4, + g_ready_start = 3, g_version, g_ver_release_year, g_ver_release_month, @@ -145,6 +146,25 @@ class Logger { const logger = new Logger(LogLevel.INFO); +g_ver_release_year = version.DATAFED_RELEASE_YEAR; +g_ver_release_month = version.DATAFED_RELEASE_MONTH; +g_ver_release_day = version.DATAFED_RELEASE_DAY; +g_ver_release_hour = version.DATAFED_RELEASE_HOUR; +g_ver_release_minute = version.DATAFED_RELEASE_MINUTE; + +g_version = + g_ver_release_year + + "." + + g_ver_release_month + + "." + + g_ver_release_day + + "." + + g_ver_release_hour + + "." + + g_ver_release_minute; + +if (--g_ready_start == 0) startServer(); + function getCurrentLineNumber() { const stackTrace = new Error().stack; const lineMatches = stackTrace.match(/:\d+:\d+/g); @@ -1108,23 +1128,6 @@ app.get("/api/dat/lock", (a_req, a_resp) => { ); }); -app.get("/api/dat/lock/toggle", (a_req, a_resp) => { - sendMessage("RecordLockToggleRequest", { id: a_req.query.id }, a_req, a_resp, function (reply) { - a_resp.send(reply); - }); -}); - -app.get("/api/dat/copy", (a_req, a_resp) => { - var params = { - sourceId: a_req.query.src, - destId: a_req.query.dst, - }; - - sendMessage("DataCopyRequest", params, a_req, a_resp, function (reply) { - a_resp.send(reply); - }); -}); - app.get("/api/dat/delete", (a_req, a_resp) => { sendMessage( "RecordDeleteRequest", @@ -1211,18 +1214,6 @@ app.get("/api/dat/put", (a_req, a_resp) => { }); }); -app.get("/api/dat/dep/get", (a_req, a_resp) => { - sendMessage( - "RecordGetDependenciesRequest", - { id: a_req.query.ids }, - a_req, - a_resp, - function (reply) { - a_resp.send(reply); - }, - ); -}); - app.get("/api/dat/dep/graph/get", (a_req, a_resp) => { sendMessage( "RecordGetDependencyGraphRequest", @@ -1569,12 +1560,6 @@ app.get("/api/col/published/list", (a_req, a_resp) => { }); }); -app.post("/api/cat/search", (a_req, a_resp) => { - sendMessage("CatalogSearchRequest", a_req.body, a_req, a_resp, function (reply) { - a_resp.send(reply); - }); -}); - app.get("/api/globus/consent_url", storeCollectionId, (a_req, a_resp) => { const { requested_scopes, state, refresh_tokens, query_params } = a_req.query; @@ -1590,12 +1575,6 @@ app.get("/api/globus/consent_url", storeCollectionId, (a_req, a_resp) => { a_resp.json({ consent_url }); }); -app.post("/api/col/pub/search/data", (a_req, a_resp) => { - sendMessage("RecordSearchPublishedRequest", a_req.body, a_req, a_resp, function (reply) { - a_resp.send(reply); - }); -}); - app.get("/api/repo/list", (a_req, a_resp) => { var params = {}; if (a_req.query.all) params.all = a_req.query.all; @@ -1772,18 +1751,6 @@ app.get("/api/top/list/topics", (a_req, a_resp) => { }); }); -app.get("/api/top/list/coll", (a_req, a_resp) => { - var par = { topicId: a_req.query.id }; - if (a_req.query.offset != undefined && a_req.query.count != undefined) { - par.offset = a_req.query.offset; - par.count = a_req.query.count; - } - - sendMessage("TopicListCollectionsRequest", par, a_req, a_resp, function (reply) { - a_resp.json(reply); - }); -}); - app.get("/api/top/view", (a_req, a_resp) => { sendMessage("TopicViewRequest", { id: a_req.query.id }, a_req, a_resp, function (reply) { a_resp.json(reply); @@ -2065,15 +2032,17 @@ function sendMessage(a_msg_name, a_msg_data, a_req, a_resp, a_cb, a_anon) { a_resp.setHeader("Content-Type", "application/json"); allocRequestContext(a_resp, function (ctx) { - var msg = g_msg_by_name[a_msg_name]; - if (!msg) throw "Invalid message type: " + a_msg_name; + var msg_info = g_msg_by_name[a_msg_name]; + if (!msg_info) throw "Invalid message type: " + a_msg_name; - var msg_buf = msg.encode(a_msg_data).finish(); + // Wrap inner message data in an Envelope (matches C++ sendBody wrapInEnvelope) + var envelope_data = {}; + envelope_data[msg_info.field_name] = a_msg_data; + var msg_buf = g_envelope_type.encode(envelope_data).finish(); var frame = Buffer.alloc(8); frame.writeUInt32BE(msg_buf.length, 0); - frame.writeUInt8(msg._pid, 4); - frame.writeUInt8(msg._mid, 5); + frame.writeUInt16BE(msg_info.field_id, 4); frame.writeUInt16BE(ctx, 6); g_ctx[ctx] = function (a_reply) { @@ -2121,7 +2090,10 @@ function sendMessage(a_msg_name, a_msg_data, a_req, a_resp, a_cb, a_anon) { sendMessage.name, getCurrentLineNumber(), "MsgType is: " + - msg._msg_type + + msg_info.field_id + + " (" + + a_msg_name + + ")" + " Writing ctx to frame, " + ctx + " buffer size " + @@ -2142,7 +2114,10 @@ function sendMessage(a_msg_name, a_msg_data, a_req, a_resp, a_cb, a_anon) { sendMessage.name, getCurrentLineNumber(), "MsgType is: " + - msg._msg_type + + msg_info.field_id + + " (" + + a_msg_name + + ")" + " Writing ctx to frame, " + ctx + " buffer size " + @@ -2154,17 +2129,19 @@ function sendMessage(a_msg_name, a_msg_data, a_req, a_resp, a_cb, a_anon) { } function sendMessageDirect(a_msg_name, a_client, a_msg_data, a_cb) { - var msg = g_msg_by_name[a_msg_name]; - if (!msg) throw "Invalid message type: " + a_msg_name; + var msg_info = g_msg_by_name[a_msg_name]; + if (!msg_info) throw "Invalid message type: " + a_msg_name; allocRequestContext(null, function (ctx) { - var msg_buf = msg.encode(a_msg_data).finish(); + // Wrap inner message data in an Envelope (matches C++ sendBody wrapInEnvelope) + var envelope_data = {}; + envelope_data[msg_info.field_name] = a_msg_data; + var msg_buf = g_envelope_type.encode(envelope_data).finish(); var frame = Buffer.alloc(8); // A protobuf message doesn't have to have a payload frame.writeUInt32BE(msg_buf.length, 0); - frame.writeUInt8(msg._pid, 4); - frame.writeUInt8(msg._mid, 5); + frame.writeUInt16BE(msg_info.field_id, 4); frame.writeUInt16BE(ctx, 6); g_ctx[ctx] = a_cb; @@ -2187,7 +2164,10 @@ function sendMessageDirect(a_msg_name, a_client, a_msg_data, a_cb) { sendMessageDirect.name, getCurrentLineNumber(), "MsgType is: " + - msg._msg_type + + msg_info.field_id + + " (" + + a_msg_name + + ")" + " Direct Writing ctx to frame, " + ctx + " buffer size " + @@ -2208,7 +2188,10 @@ function sendMessageDirect(a_msg_name, a_client, a_msg_data, a_cb) { sendMessageDirect.name, getCurrentLineNumber(), "MsgType is: " + - msg._msg_type + + msg_info.field_id + + " (" + + a_msg_name + + ")" + " Direct Writing ctx to frame, " + ctx + " buffer size " + @@ -2219,71 +2202,65 @@ function sendMessageDirect(a_msg_name, a_client, a_msg_data, a_cb) { }); } -function processProtoFile(msg) { - //var mlist = msg.parent.order; - var i, - msg_list = []; - for (i in msg.parent.nested) msg_list.push(msg.parent.nested[i]); - - //msg_list.sort(); - - var pid = msg.values.ID; +/** + * Processes the proto3 Envelope message to build message type maps. + * + * Instead of the old proto2 approach that derived message types from Protocol enum IDs + * and file ordering (pid << 8 | mid), this uses the envelope's oneof field numbers + * as stable message type identifiers. + * + * Each map entry stores: + * - type: the protobufjs Type (for encode/decode of the inner message) + * - field_name: the envelope oneof field name (e.g. "version_request") + * - field_id: the envelope field number (used as msg_type in the frame) + * + * @param {protobuf.Root} root - The loaded protobuf root containing SDMS.Envelope + */ +function processEnvelope(root) { + g_envelope_type = root.lookupType("SDMS.Envelope"); + var payloadOneof = g_envelope_type.oneofs.payload; - for (i = 1; i < msg_list.length; i++) { - msg = msg_list[i]; - msg._pid = pid; - msg._mid = i - 1; - msg._msg_type = (pid << 8) | (i - 1); + if (!payloadOneof) throw "Missing 'payload' oneof in SDMS.Envelope"; - g_msg_by_id[msg._msg_type] = msg; - g_msg_by_name[msg.name] = msg; - } -} + payloadOneof.fieldsArray.forEach(function (field) { + var msgType = field.resolvedType; + if (!msgType) { + logger.warning( + processEnvelope.name, + getCurrentLineNumber(), + "Unresolved type for envelope field: " + field.name, + ); + return; + } -protobuf.load("Version.proto", function (err, root) { - if (err) throw err; + var entry = { + type: msgType, // protobufjs Type for encode/decode + field_name: field.name, // envelope oneof field name + field_id: field.id, // envelope field number = msg_type in frame + }; - var msg = root.lookupEnum("Version"); - if (!msg) throw "Missing Version enum in Version.Anon proto file"; - - g_ver_release_year = msg.values.DATAFED_RELEASE_YEAR; - g_ver_release_month = msg.values.DATAFED_RELEASE_MONTH; - g_ver_release_day = msg.values.DATAFED_RELEASE_DAY; - g_ver_release_hour = msg.values.DATAFED_RELEASE_HOUR; - g_ver_release_minute = msg.values.DATAFED_RELEASE_MINUTE; - - g_version = - g_ver_release_year + - "." + - g_ver_release_month + - "." + - g_ver_release_day + - "." + - g_ver_release_hour + - "." + - g_ver_release_minute; - - logger.info("protobuf.load", getCurrentLineNumber(), "Running Version: " + g_version); - if (--g_ready_start == 0) startServer(); -}); + g_msg_by_id[field.id] = entry; + g_msg_by_name[msgType.name] = entry; + }); -protobuf.load("SDMS_Anon.proto", function (err, root) { - if (err) throw err; + logger.info( + processEnvelope.name, + getCurrentLineNumber(), + "Loaded " + Object.keys(g_msg_by_id).length + " message types from envelope", + ); +} - var msg = root.lookupEnum("SDMS.Anon.Protocol"); - if (!msg) throw "Missing Protocol enum in SDMS.Anon proto file"; +var protobufRoot = new protobuf.Root(); - processProtoFile(msg); - if (--g_ready_start == 0) startServer(); -}); +protobufRoot.resolvePath = function (origin, target) { + return "proto3/" + target; +}; -protobuf.load("SDMS_Auth.proto", function (err, root) { +protobufRoot.load("envelope.proto", function (err, root) { if (err) throw err; - var msg = root.lookupEnum("SDMS.Auth.Protocol"); - if (!msg) throw "Missing Protocol enum in SDMS.Auth proto file"; - - processProtoFile(msg); + root.resolveAll(); + processEnvelope(root); if (--g_ready_start == 0) startServer(); }); @@ -2304,20 +2281,34 @@ g_core_sock.on( var mtype = (frame.readUInt8(4) << 8) | frame.readUInt8(5); var ctx = frame.readUInt16BE(6); - var msg_class = g_msg_by_id[mtype]; + var msg_info = g_msg_by_id[mtype]; var msg; + var msg_name = msg_info ? msg_info.type.name : "unknown(" + mtype + ")"; - if (msg_class) { + if (msg_info) { // Only try to decode if there is a payload if (msg_buf && msg_buf.length) { try { - // This is unserializing the protocol message - msg = msg_class.decode(msg_buf); + // Decode as Envelope (matches C++ receiveBody unwrapFromEnvelope) + var envelope = g_envelope_type.decode(msg_buf); + var which_field = envelope.payload; // oneof discriminator: field name that is set + if (which_field) { + msg = envelope[which_field]; + msg_name = which_field; + } else { + logger.warning( + "g_core_sock.on", + getCurrentLineNumber(), + "Envelope decoded but no payload field set, correlation_id: " + + correlation_id, + ); + msg = msg_info.type.create({}); + } if (!msg) { logger.error( "g_core_sock.on", getCurrentLineNumber(), - "ERROR: msg decode failed: no reason, correlation_id: " + + "ERROR: envelope decode produced null msg, correlation_id: " + correlation_id, ); } @@ -2325,11 +2316,15 @@ g_core_sock.on( logger.error( "g_core_sock.on", getCurrentLineNumber(), - "ERROR: msg decode failed: " + err + " correlation_id: " + correlation_id, + "ERROR: envelope decode failed: " + + err + + " correlation_id: " + + correlation_id, ); } } else { - msg = msg_class; + // No payload body - create empty message instance + msg = msg_info.type.create({}); } } else { logger.error( @@ -2345,7 +2340,7 @@ g_core_sock.on( logger.info( "g_core_sock.on", getCurrentLineNumber(), - "freed ctx: " + ctx + " for msg: " + msg_class.name, + "freed ctx: " + ctx + " for msg: " + msg_name, correlation_id, ); g_ctx_next = ctx; @@ -2360,7 +2355,7 @@ g_core_sock.on( " - msg type: " + mtype + ", name: " + - msg_class.name + + msg_name + " correlation_id: " + correlation_id, ); diff --git a/web/docker/Dockerfile b/web/docker/Dockerfile index 5fafb6b08..15cbbc2b1 100644 --- a/web/docker/Dockerfile +++ b/web/docker/Dockerfile @@ -39,7 +39,7 @@ COPY ./scripts/generate_ws_config.sh ${BUILD_DIR}/scripts/ COPY ./scripts/install_ws.sh ${BUILD_DIR}/scripts/ COPY ./scripts/export_dependency_version.sh ${BUILD_DIR}/scripts/ COPY ./cmake ${BUILD_DIR}/cmake -COPY ./common/proto ${BUILD_DIR}/common/proto +COPY ./common/proto3 ${BUILD_DIR}/common/proto3 COPY ./web ${BUILD_DIR}/web RUN ${DATAFED_DEPENDENCIES_ROOT}/scripts/generate_dependencies_config.sh && \ @@ -83,6 +83,7 @@ WORKDIR ${DATAFED_DIR} USER datafed +RUN mkdir -p ${DATAFED_DEFAULT_LOG_PATH}; chown root:datafed ${DATAFED_DEFAULT_LOG_PATH}; chmod -R g+rw ${DATAFED_DEFAULT_LOG_PATH} COPY --from=ws-build --chown=datafed:root ${DATAFED_DEPENDENCIES_ROOT}/scripts/ {DATAFED_DEPENDENCIES_ROOT}/scripts/ COPY --chown=datafed:root ./scripts/generate_datafed.sh ${BUILD_DIR}/scripts/generate_datafed.sh @@ -97,7 +98,7 @@ COPY --from=ws-build --chown=datafed:root ${DATAFED_DEPENDENCIES_ROOT}/scripts $ COPY --from=ws-build --chown=datafed:root ${DATAFED_INSTALL_PATH}/web ${DATAFED_INSTALL_PATH}/web COPY --from=ws-build --chown=datafed:root /usr/bin/curl /usr/bin/curl -WORKDIR ${BUILD_DIR} +WORKDIR ${BUILD_DIR}/web USER root diff --git a/web/static/api.js b/web/static/api.js index 3adf056f5..72af70138 100644 --- a/web/static/api.js +++ b/web/static/api.js @@ -184,17 +184,6 @@ export function dataPutCheck(a_id, a_cb) { _asyncGet("/api/dat/put?id=" + encodeURIComponent(a_id) + "&check=true", null, a_cb); } -export function dataGetDeps(a_ids, a_cb) { - _asyncGet("/api/dat/dep/get?ids=" + encodeURIComponent(a_ids), null, function (ok, data) { - if (ok) { - a_cb(data); - } else { - util.setStatusText("Get Dependencies Error: " + data, true); - a_cb(); - } - }); -} - export function dataGetDepGraph(a_id, a_cb) { _asyncGet("/api/dat/dep/graph/get?id=" + encodeURIComponent(a_id), null, function (ok, data) { if (ok) { @@ -240,25 +229,10 @@ export function sendDataDelete(a_ids, a_cb) { _asyncGet("/api/dat/delete?ids=" + encodeURIComponent(JSON.stringify(a_ids)), null, a_cb); } -export function copyData(a_src_id, a_dst_id, a_cb) { - _asyncGet( - "/api/dat/copy?src=" + - encodeURIComponent(a_src_id) + - "&dst=" + - encodeURIComponent(a_dst_id), - null, - a_cb, - ); -} - export function dataSearch(a_query, a_callback) { _asyncPost("/api/dat/search", a_query, a_callback); } -export function dataPubSearch(a_query, a_cb) { - _asyncPost("/api/col/pub/search/data", a_query, a_cb); -} - export function sendDataLock(a_ids, a_lock, a_cb) { _asyncGet( "/api/dat/lock?lock=" + a_lock + "&ids=" + encodeURIComponent(JSON.stringify(a_ids)), @@ -334,13 +308,6 @@ export function collDelete(a_ids, a_cb) { _asyncGet("/api/col/delete?ids=" + encodeURIComponent(JSON.stringify(a_ids)), null, a_cb); } -export function catalogSearch(a_query, a_cb) { - _asyncPost("/api/cat/search", a_query, a_cb); - /*_asyncPost( "/api/col/pub/search", a_query, function( ok, data ){ - setTimeout( function(){ a_cb( ok, data ); }, 2000 ); - });*/ -} - export function projList_url(a_owned, a_admin, a_member, a_sort, a_offset, a_count) { return ( "/api/prj/list?owner=" + @@ -852,25 +819,6 @@ export function topicListTopics(a_id, a_offset, a_count, a_cb) { if (!a_cb) return; _asyncGet(topicListTopics_url(a_id, a_offset, a_count), null, a_cb); - /*_asyncGet( topicListTopics_url( a_id, a_offset, a_count ), null, function( ok, data ){ - setTimeout( function(){ a_cb( ok, data ); }, 2000 ); - });*/ -} - -export function topicListColl_url(a_id, a_offset, a_count) { - return ( - "/api/top/list/coll?id=" + - a_id + - (a_offset != undefined && a_count != undefined - ? "&offset=" + a_offset + "&count=" + a_count - : "") - ); -} - -export function topicListColl(a_id, a_offset, a_count, a_cb) { - if (!a_cb) return; - - _asyncGet(topicListColl_url(a_id, a_offset, a_count), null, a_cb); } export function topicSearch_url(a_phrase) { diff --git a/web/version.js.in b/web/version.js.in index 69ec65aca..ca3eb3b97 100644 --- a/web/version.js.in +++ b/web/version.js.in @@ -2,5 +2,10 @@ const MAJOR = @DATAFED_WEB_MAJOR@; const MINOR = @DATAFED_WEB_MINOR@; const PATCH = @DATAFED_WEB_PATCH@; +const RELEASE_YEAR = @DATAFED_RELEASE_YEAR@; +const RELEASE_MONTH = @DATAFED_RELEASE_MONTH@; +const RELEASE_DAY = @DATAFED_RELEASE_DAY@; +const RELEASE_HOUR = @DATAFED_RELEASE_HOUR@; +const RELEASE_MINUTE = @DATAFED_RELEASE_MINUTE@; -export default { MAJOR, MINOR, PATCH }; +export default { MAJOR, MINOR, PATCH, RELEASE_YEAR, RELEASE_MONTH, RELEASE_DAY, RELEASE_HOUR, RELEASE_MINUTE } From b1a223973548ee86df9d94c24b5b56cc16b8c00e Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Wed, 18 Feb 2026 15:13:46 -0500 Subject: [PATCH 56/65] updated dependencies submodule --- external/DataFedDependencies | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/external/DataFedDependencies b/external/DataFedDependencies index 0e9a5bb75..e0319a2f2 160000 --- a/external/DataFedDependencies +++ b/external/DataFedDependencies @@ -1 +1 @@ -Subproject commit 0e9a5bb75558d09ac50ac632160fc8f08e5fc873 +Subproject commit e0319a2f2e70d901180c730ebd5b10b10d8fce93 From 7fd171f9d53464f1eea6211fd2e8a0ff297b48e1 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Wed, 18 Feb 2026 15:14:03 -0500 Subject: [PATCH 57/65] fixed gcs build job --- .gitlab/build/build_gcs_base_image.yml | 1 + .gitlab/build/force_build_gcs_base_image.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.gitlab/build/build_gcs_base_image.yml b/.gitlab/build/build_gcs_base_image.yml index a437f7502..70c071827 100644 --- a/.gitlab/build/build_gcs_base_image.yml +++ b/.gitlab/build/build_gcs_base_image.yml @@ -31,6 +31,7 @@ build-gcs-base: - echo "$BRANCH_LOWER" - source "scripts/dependency_versions.sh" - cd "external/globus-connect-server-deploy/docker" + - git fetch origin "$DATAFED_GCS_SUBMODULE_VERSION" - git checkout "$DATAFED_GCS_SUBMODULE_VERSION" - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - docker build --no-cache --progress plain -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" - < "./docker-files/Dockerfile.${GCS_BASE_IMAGE_DISTRO}" diff --git a/.gitlab/build/force_build_gcs_base_image.yml b/.gitlab/build/force_build_gcs_base_image.yml index 7e32fad39..c11b97573 100644 --- a/.gitlab/build/force_build_gcs_base_image.yml +++ b/.gitlab/build/force_build_gcs_base_image.yml @@ -18,6 +18,7 @@ build-gcs-base: - echo "$BRANCH_LOWER" - source "scripts/dependency_versions.sh" - cd "external/globus-connect-server-deploy/docker" + - git fetch origin "$DATAFED_GCS_SUBMODULE_VERSION" - git checkout "$DATAFED_GCS_SUBMODULE_VERSION" - docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" - docker build --no-cache --progress plain -t "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" - < "./docker-files/Dockerfile.${GCS_BASE_IMAGE_DISTRO}" From a851f0f51b81b96ebbe0a5803ce506a6fe2587a5 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Mon, 23 Feb 2026 03:35:27 -0500 Subject: [PATCH 58/65] fixed proto3 version conversion --- web/datafed-ws.js | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/web/datafed-ws.js b/web/datafed-ws.js index d9205fad3..8421d2b43 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -146,11 +146,15 @@ class Logger { const logger = new Logger(LogLevel.INFO); -g_ver_release_year = version.DATAFED_RELEASE_YEAR; -g_ver_release_month = version.DATAFED_RELEASE_MONTH; -g_ver_release_day = version.DATAFED_RELEASE_DAY; -g_ver_release_hour = version.DATAFED_RELEASE_HOUR; -g_ver_release_minute = version.DATAFED_RELEASE_MINUTE; +g_ver_release_year = version.RELEASE_YEAR; +g_ver_release_month = version.RELEASE_MONTH; +g_ver_release_day = version.RELEASE_DAY; +g_ver_release_hour = version.RELEASE_HOUR; +g_ver_release_minute = version.RELEASE_MINUTE; + +g_ver_api_major = version.MAJOR; +g_ver_api_minor = version.MINOR; +g_ver_api_patch = version.PATCH; g_version = g_ver_release_year + From b4442b94310cca124a46ff9bb58b6570c673cf34 Mon Sep 17 00:00:00 2001 From: Blake Nedved Date: Mon, 23 Feb 2026 10:41:43 -0500 Subject: [PATCH 59/65] fixed missed merge conflict --- core/database/foxx/api/user_router.js | 4 ---- 1 file changed, 4 deletions(-) diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 3f10df5be..c5cba9680 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -1161,12 +1161,8 @@ router router .get("/token/get/expiring", function (req, res) { -<<<<<<< HEAD let extra_log_info = []; const desc = `User access tokens expiring in ${req.queryParams.expires_in} seconds`; -======= - let result = null; ->>>>>>> devel try { logger.logRequestStarted({ client: req.queryParams.client, From 8e07d106be4879b2a0916de2b4dfa8facfd99301 Mon Sep 17 00:00:00 2001 From: nedvedba <145805866+nedvedba@users.noreply.github.com> Date: Mon, 23 Feb 2026 11:06:47 -0500 Subject: [PATCH 60/65] =?UTF-8?q?fix:=20prevent=20defaults=20being=20set?= =?UTF-8?q?=20to=20undefined,=20and=20interpret=20numbers=20a=E2=80=A6=20(?= =?UTF-8?q?#1861)=20(#1866)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: prevent defaults being set to undefined, and interpret numbers and enums as strings. * chore: Auto-format JavaScript files with Prettier Co-authored-by: Joshua S Brown --- web/datafed-ws.js | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 8421d2b43..9e05027a5 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -2458,6 +2458,25 @@ g_core_sock.on( correlation_id, ); g_ctx_next = ctx; + + // Convert protobufjs message to plain object with default values + if (msg) { + var resolve_type = msg_info ? msg_info.type : null; + if (which_field) { + var actual_entry = Object.values(g_msg_by_id).find( + (e) => e.field_name === which_field, + ); + if (actual_entry) resolve_type = actual_entry.type; + } + if (resolve_type) { + msg = resolve_type.toObject(msg, { + defaults: true, + longs: String, + enums: String, + }); + } + } + f(msg); } else { g_ctx[ctx] = null; From d41bc4690acaf40513a5f1caf5d7238b2daaa7e7 Mon Sep 17 00:00:00 2001 From: nedvedba <145805866+nedvedba@users.noreply.github.com> Date: Mon, 23 Feb 2026 11:50:06 -0500 Subject: [PATCH 61/65] Update Staging (#1869) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: prevent defaults being set to undefined, and interpret numbers a… (#1861) * fix: prevent defaults being set to undefined, and interpret numbers and enums as strings. * chore: Auto-format JavaScript files with Prettier * fix: version numbers from proto3 messages follow camel case. (#1868) --------- Co-authored-by: Joshua S Brown --- web/datafed-ws.js | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 9e05027a5..71783fd3c 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -216,46 +216,46 @@ function startServer() { "ERROR: No reply from core server", ); } else if ( - reply.api_major != g_ver_api_major || - reply.api_minor < g_ver_api_minor || - reply.api_minor > g_ver_api_minor + 9 + reply.apiMajor != g_ver_api_major || + reply.apiMinor < g_ver_api_minor || + reply.apiMinor > g_ver_api_minor + 9 ) { logger.error( startServer.name, getCurrentLineNumber(), "ERROR: Incompatible api version detected (" + - reply.api_major + + reply.apiMajor + "." + - reply.api_minor + + reply.apiMinor + "." + - reply.api_patch + + reply.apiPatch + ")", ); } else { var warning_msg = "WARNING: A newer web server may be available the latest release version is: (" + - reply.release_year + + reply.releaseYear + "." + - reply.release_month + + reply.releaseMonth + "." + - reply.release_day + + reply.releaseDay + "." + - reply.release_hour + + reply.releaseHour + "." + - reply.release_minute; - if (reply.release_year > g_ver_release_year) { + reply.releaseMinute; + if (reply.releaseYear > g_ver_release_year) { logger.warning(startServer.name, getCurrentLineNumber(), warning_msg); - } else if (reply.release_year == g_ver_release_year) { - if (reply.release_month > g_ver_release_month) { + } else if (reply.releaseYear == g_ver_release_year) { + if (reply.releaseMonth > g_ver_release_month) { logger.warning(startServer.name, getCurrentLineNumber(), warning_msg); - } else if (reply.release_month == g_ver_release_month) { - if (reply.release_day > g_ver_release_day) { + } else if (reply.releaseMonth == g_ver_release_month) { + if (reply.releaseDay > g_ver_release_day) { logger.warning(startServer.name, getCurrentLineNumber(), warning_msg); - } else if (reply.release_day == g_ver_release_day) { - if (reply.release_hour > g_ver_release_hour) { + } else if (reply.releaseDay == g_ver_release_day) { + if (reply.releaseHour > g_ver_release_hour) { logger.warning(startServer.name, getCurrentLineNumber(), warning_msg); - } else if (reply.release_hour == g_ver_release_hour) { - if (reply.release_minute > g_ver_release_minute) { + } else if (reply.releaseHour == g_ver_release_hour) { + if (reply.releaseMinute > g_ver_release_minute) { logger.warning( startServer.name, getCurrentLineNumber(), From 0fae43edc8d9f2afc9b1348a916ed29b0ca89c4e Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Tue, 24 Feb 2026 08:40:31 -0500 Subject: [PATCH 62/65] [DAPS-1862] - fix allocation change failure (#1864) --- core/database/foxx/api/authz_router.js | 9 +- core/database/foxx/api/posix_path.js | 8 ++ core/database/foxx/api/record.js | 108 +++++++++++------------- core/database/foxx/api/repo.js | 34 ++++++++ core/database/foxx/api/tasks.js | 17 +++- core/database/foxx/tests/record.test.js | 2 +- web/datafed-ws.js | 6 +- 7 files changed, 119 insertions(+), 65 deletions(-) diff --git a/core/database/foxx/api/authz_router.js b/core/database/foxx/api/authz_router.js index 3597cecf0..6a099099d 100644 --- a/core/database/foxx/api/authz_router.js +++ b/core/database/foxx/api/authz_router.js @@ -45,7 +45,14 @@ router if (!client) { throw [error.ERR_PERM_DENIED, "Unknown client: " + req.queryParams.client]; } - let repo = new Repo(req.queryParams.repo); + let repo = Repo.resolveFromPath(req.queryParams.file); + + if (repo.id() !== req.queryParams.repo) { + throw [ + error.ERR_PERM_DENIED, + "File path does not match repository: " + req.queryParams.file, + ]; + } let path_type = repo.pathType(req.queryParams.file); // If the provided path is not within the repo throw an error diff --git a/core/database/foxx/api/posix_path.js b/core/database/foxx/api/posix_path.js index f171b1d8b..ae0f45cf0 100644 --- a/core/database/foxx/api/posix_path.js +++ b/core/database/foxx/api/posix_path.js @@ -1,5 +1,6 @@ "use strict"; +const error = require("./lib/error_codes"); const path = require("path"); module.exports = (function () { @@ -45,5 +46,12 @@ module.exports = (function () { return components.filter((component) => component !== ""); }; + obj.normalizePOSIXPath = function (a_posix_path) { + if (!a_posix_path || typeof a_posix_path !== "string") { + throw [error.ERR_INVALID_PARAM, "Invalid POSIX path"]; + } + return path.posix.normalize(a_posix_path); + }; + return obj; })(); diff --git a/core/database/foxx/api/record.js b/core/database/foxx/api/record.js index 1c3fbbf97..dd66ec597 100644 --- a/core/database/foxx/api/record.js +++ b/core/database/foxx/api/record.js @@ -66,23 +66,23 @@ class Record { /** * Generates the full path to the record as it should appear in the repository. * - * @param {object} loc - The location object which specifies the owner of the record. - * @param {string} basePath - The base path where the record is stored. - * - * @returns {string} - the path to the record or null if error + * @param {string} uid - The owner uid (e.g. "u/bob" or "p/myproject") + * @param {string} basePath - The base path of the repository. + * @returns {string|null} - the path to the record or null if error */ - _pathToRecord(loc, basePath) { + _pathToRecord(uid, basePath) { const path = basePath.endsWith("/") ? basePath : basePath + "/"; - if (loc.uid.charAt(0) == "u") { - return path + "user/" + loc.uid.substr(2) + "/" + this.#key; - } else if (loc.uid.charAt(0) == "p") { - return path + "project/" + loc.uid.substr(2) + "/" + this.#key; + if (uid.charAt(0) === "u") { + return path + "user/" + uid.substr(2) + "/" + this.#key; + } else if (uid.charAt(0) === "p") { + return path + "project/" + uid.substr(2) + "/" + this.#key; } else { this.#error = error.ERR_INTERNAL_FAULT; - this.#err_msg = "Provided path does not fit within supported directory "; - this.#err_msg += "structure for repository, no user or project folder has"; - this.#err_msg += " been determined for the record."; - console.log(e); + this.#err_msg = + "Provided uid does not fit within supported directory " + + "structure for repository, no user or project folder has " + + "been determined for the record. uid: " + + uid; return null; } } @@ -174,77 +174,71 @@ class Record { return !!this.#alloc; } - /** - * Validates if the provided record path is consistent with the database. - * - * @param {string} a_path - The path to validate. - * @returns {boolean} True if consistent, otherwise false. - */ isPathConsistent(a_path) { - // This function will populate the this.#loc member and the this.#alloc - // member if (!this.isManaged()) { return false; } - // If there is a new repo we need to check the path there and use that + if (!a_path.startsWith("/")) { + a_path = "/" + a_path; + } + + // If record is in flight, only check new location if (this.#loc.hasOwnProperty("new_repo") && this.#loc.new_repo) { - // Below we get the allocation associated with data item by - // 1. Checking if the data item is in flight, is in the process - // of being moved to a new location or new owner and using that - // oweners id. - // 2. Using the loc.uid parameter if not inflight to get the owner - // id. + const new_uid = this.#loc.new_owner ? this.#loc.new_owner : this.#loc.uid; const new_alloc = g_db.alloc.firstExample({ - _from: this.#loc.new_owner ? this.#loc.new_owner : this.#loc.uid, + _from: new_uid, _to: this.#loc.new_repo, }); - // If no allocation is found for the item throw an error - // if the paths do not align also throw an error. if (!new_alloc) { this.#error = error.ERR_PERM_DENIED; this.#err_msg = - "Permission denied, '" + this.#key + "' is not part of an allocation '"; + "Permission denied, '" + this.#key + "' is not part of an allocation'"; return false; } - this.#repo = g_db._document(this.#loc.new_repo); - - if (!this.#repo) { + const new_repo = g_db._document(this.#loc.new_repo); + if (!new_repo) { this.#error = error.ERR_INTERNAL_FAULT; this.#err_msg = - "Unable to find repo that record is meant to be allocated too, '" + + "Unable to find repo '" + this.#loc.new_repo + - "' record '" + - this.#data_id; + "' for record '" + + this.#data_id + + "'"; return false; } - // If path is missing the starting "/" add it back in - if (!a_path.startsWith("/") && this.#repo.path.startsWith("/")) { - a_path = "/" + a_path; + let new_path = this._pathToRecord(new_uid, new_repo.path); + if (new_path === a_path) { + return true; } - let stored_path = this._pathToRecord(this.#loc, this.#repo.path); - - if (!this._comparePaths(stored_path, a_path)) { - return false; - } - } else { - this.#repo = g_db._document(this.#loc._to); + this.#error = error.ERR_PERM_DENIED; + this.#err_msg = + "Record path is not consistent with repo. Expected: " + + new_path + + " but got: " + + a_path; + return false; + } - if (!a_path.startsWith("/") && this.#repo.path.startsWith("/")) { - a_path = "/" + a_path; - } - let stored_path = this._pathToRecord(this.#loc, this.#repo.path); + // No in-flight move — check current location + this.#repo = g_db._document(this.#loc._to); + let current_path = this._pathToRecord(this.#loc.uid, this.#repo.path); - // If there is no new repo check that the paths align - if (!this._comparePaths(stored_path, a_path)) { - return false; - } + if (current_path === a_path) { + return true; } - return true; + + this.#error = error.ERR_PERM_DENIED; + this.#err_msg = + "Record path is not consistent with repo. Expected: " + + current_path + + " but got: " + + a_path; + return false; } } diff --git a/core/database/foxx/api/repo.js b/core/database/foxx/api/repo.js index 226179e0b..9d1c5a9e2 100644 --- a/core/database/foxx/api/repo.js +++ b/core/database/foxx/api/repo.js @@ -185,6 +185,40 @@ class Repo { return PathType.UNKNOWN; } + + static resolveFromPath(file_path) { + var canonical = pathModule.normalizePOSIXPath(file_path); + + if (canonical !== file_path) { + throw [error.ERR_PERM_DENIED, "Path contains invalid sequences: " + file_path]; + } + + var repos = g_db.repo.all().toArray(); + var best_match = null; + var best_length = 0; + + for (var i = 0; i < repos.length; i++) { + var repo_path = repos[i].path; + if (repo_path.charAt(repo_path.length - 1) !== "/") { + repo_path += "/"; + } + if (canonical.indexOf(repo_path) === 0 || canonical === repo_path.slice(0, -1)) { + if (repo_path.length > best_length) { + best_match = repos[i]; + best_length = repo_path.length; + } + } + } + + if (!best_match) { + throw [ + error.ERR_PERM_DENIED, + "File path does not match any known repository: " + file_path, + ]; + } + + return new Repo(best_match._id); + } } module.exports = { Repo, PathType }; diff --git a/core/database/foxx/api/tasks.js b/core/database/foxx/api/tasks.js index 44cdc2397..023c18484 100644 --- a/core/database/foxx/api/tasks.js +++ b/core/database/foxx/api/tasks.js @@ -892,7 +892,11 @@ var tasks_func = (function () { //console.log("taskRunRecAllocChg - do xfr"); // Transfer data step - var tokens = g_lib.getAccessToken(a_task.client); + const token_doc = new UserToken({ + user_id: a_task.client, + }).get_token(); + var tokens = UserToken.formatUserTokenForTransferTask(token_doc); + const extra_token_format = UserToken.formatUserToken(false, token_doc, false); params = { uid: a_task.client, type: a_task.type, @@ -900,6 +904,8 @@ var tasks_func = (function () { acc_tok: tokens.acc_tok, ref_tok: tokens.ref_tok, acc_tok_exp_in: tokens.acc_tok_exp_in, + token_type: extra_token_format.token_type, + scopes: extra_token_format.scopes, }; params = Object.assign(params, xfr); reply = { @@ -1276,8 +1282,11 @@ var tasks_func = (function () { case 1: //console.log("taskRunRecOwnerChg - do xfr"); // Transfer data step - - var tokens = g_lib.getAccessToken(a_task.client); + const token_doc = new UserToken({ + user_id: a_task.client, + }).get_token(); + var tokens = UserToken.formatUserTokenForTransferTask(token_doc); + const extra_token_format = UserToken.formatUserToken(false, token_doc, false); params = { uid: a_task.client, type: a_task.type, @@ -1285,6 +1294,8 @@ var tasks_func = (function () { acc_tok: tokens.acc_tok, ref_tok: tokens.ref_tok, acc_tok_exp_in: tokens.acc_tok_exp_in, + token_type: extra_token_format.token_type, + scopes: extra_token_format.scopes, }; params = Object.assign(params, xfr); reply = { diff --git a/core/database/foxx/tests/record.test.js b/core/database/foxx/tests/record.test.js index ed0c4fbcb..1725f10a0 100644 --- a/core/database/foxx/tests/record.test.js +++ b/core/database/foxx/tests/record.test.js @@ -237,7 +237,7 @@ describe("Record Class", () => { const valid_key = "1127"; const key_id = "d/" + valid_key; const owner_id = "u/john"; - const repo_id = "repo/orange-at-com"; + const repo_id = "repo/orange-at-org"; const new_repo_id = "repo/watermelon-at-org"; // Create nodes diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 71783fd3c..6b367565e 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -2324,9 +2324,9 @@ function sendMessageDirect(a_msg_name, a_client, a_msg_data, a_cb) { * as stable message type identifiers. * * Each map entry stores: - * - type: the protobufjs Type (for encode/decode of the inner message) - * - field_name: the envelope oneof field name (e.g. "version_request") - * - field_id: the envelope field number (used as msg_type in the frame) + * - type: the protobufjs Type (for encode/decode of the inner message) + * - field_name: the envelope oneof field name (e.g. "version_request") + * - field_id: the envelope field number (used as msg_type in the frame) * * @param {protobuf.Root} root - The loaded protobuf root containing SDMS.Envelope */ From 228483ec180158af9ead6eafa18d173083dee3d3 Mon Sep 17 00:00:00 2001 From: nedvedba <145805866+nedvedba@users.noreply.github.com> Date: Tue, 24 Feb 2026 14:33:03 -0500 Subject: [PATCH 63/65] fixed user router /update (#1877) --- core/database/foxx/api/user_router.js | 1 + 1 file changed, 1 insertion(+) diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index fce626e90..6ac6a66cc 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -315,6 +315,7 @@ router router .get("/update", function (req, res) { let result = null; + let client = null; let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { logger.logRequestStarted({ From 3209240417c82d080c2ab8700eb53b8bf1391793 Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Wed, 25 Feb 2026 06:46:35 -0500 Subject: [PATCH 64/65] [DAPS-1872] - fix: QA, tests running until timeout, proto3 compatibility bugs, log bugs, removed proj search --- .gitlab/common.yml | 35 -- .../common/auth/query_update_request.proto | 1 + .../proto3/common/enums/dependency_dir.proto | 4 +- .../proto3/common/enums/dependency_type.proto | 6 +- .../common/enums/execution_method.proto | 6 +- core/database/foxx/api/acl_router.js | 4 +- core/database/foxx/api/admin_router.js | 8 +- core/database/foxx/api/coll_router.js | 3 +- core/database/foxx/api/config_router.js | 2 +- core/database/foxx/api/data_router.js | 55 ++- core/database/foxx/api/group_router.js | 1 - core/database/foxx/api/metrics_router.js | 5 +- core/database/foxx/api/note_router.js | 5 +- core/database/foxx/api/proj_router.js | 323 ++++++++---------- core/database/foxx/api/query_router.js | 68 ++-- core/database/foxx/api/repo_router.js | 10 +- core/database/foxx/api/schema_router.js | 24 +- core/database/foxx/api/tag_router.js | 3 +- core/database/foxx/api/task_router.js | 16 +- core/database/foxx/api/topic_router.js | 21 +- core/database/foxx/api/user_router.js | 46 +-- core/database/foxx/tests/proj_router.test.js | 84 ----- core/server/DatabaseAPI.cpp | 52 ++- core/server/DatabaseAPI.hpp | 2 - external/DataFedDependencies | 2 +- external/globus-connect-server-deploy | 2 +- tests/end-to-end/test_api_query.py | 1 + web/datafed-ws.js | 8 +- web/static/util.js | 5 +- 29 files changed, 352 insertions(+), 450 deletions(-) diff --git a/.gitlab/common.yml b/.gitlab/common.yml index f24889144..588a1990f 100644 --- a/.gitlab/common.yml +++ b/.gitlab/common.yml @@ -161,41 +161,6 @@ reports: dotenv: build.env -#.image_check: -# tags: -# - docker -# script: -# - | -# BRANCH_LOWER=$(echo "$CI_COMMIT_REF_NAME" | tr '[:upper:]' '[:lower:]') -# docker login "${REGISTRY}" -u "${HARBOR_USER}" -p "${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" -# FORCE_BUILD="FALSE" -# set +e -# docker pull --quiet "${REGISTRY}/${PROJECT}/${COMPONENT}-${BRANCH_LOWER}:latest" -# if [ $? -eq 0 ]; then echo "Image exists"; else FORCE_BUILD="TRUE"; fi; -# set -e -# if [ "${BUILD_INTERMEDIATE}" == "TRUE" ] -# then -# set +e -# docker pull --quiet "${REGISTRY}/${PROJECT}/${COMPONENT}-${INTERMEDIATE_LAYER_NAME}-${BRANCH_LOWER}:latest" -# if [ $? -eq 0 ]; then echo "Image exists"; else FORCE_BUILD="TRUE"; fi; -# set -e -# fi -# if [ "$FORCE_BUILD" == "TRUE" ] -# then -# cp .gitlab/build/force_build_${COMPONENT}_image.yml ${COMPONENT}_image.yml -# else -# cp .gitlab/build/build_${COMPONENT}_image.yml ${COMPONENT}_image.yml -# fi -# echo "REGISTRY=${REGISTRY}" >> build.env -# echo "HARBOR_USER=${HARBOR_USER}" >> build.env -# echo "HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN=${HARBOR_DATAFED_GITLAB_CI_REGISTRY_TOKEN}" >> build.env -# sed -i 's/\(HARBOR_USER=.*\)\$/\1$$/g' build.env -# artifacts: -# paths: -# - ${COMPONENT}_image.yml -# reports: -# dotenv: build.env - # The purpose of this anchor is to check that an image has been uploaded correctly # to the registry and if it has not attempt to upload it again. # diff --git a/common/proto3/common/auth/query_update_request.proto b/common/proto3/common/auth/query_update_request.proto index 518a93ce7..01092275d 100644 --- a/common/proto3/common/auth/query_update_request.proto +++ b/common/proto3/common/auth/query_update_request.proto @@ -10,4 +10,5 @@ message QueryUpdateRequest { string id = 1; optional string title = 2; SearchRequest query = 3; + bool replace_query = 4; } diff --git a/common/proto3/common/enums/dependency_dir.proto b/common/proto3/common/enums/dependency_dir.proto index 1cd722921..cf3b2f49a 100644 --- a/common/proto3/common/enums/dependency_dir.proto +++ b/common/proto3/common/enums/dependency_dir.proto @@ -5,6 +5,6 @@ package SDMS; option cc_enable_arenas = true; enum DependencyDir { - DEPENDENCY_DIR_IN = 0; - DEPENDENCY_DIR_OUT = 1; + DIR_IN = 0; + DIR_OUT = 1; } diff --git a/common/proto3/common/enums/dependency_type.proto b/common/proto3/common/enums/dependency_type.proto index 795eeff79..16010e48a 100644 --- a/common/proto3/common/enums/dependency_type.proto +++ b/common/proto3/common/enums/dependency_type.proto @@ -5,7 +5,7 @@ package SDMS; option cc_enable_arenas = true; enum DependencyType { - DEPENDENCY_TYPE_IS_DERIVED_FROM = 0; - DEPENDENCY_TYPE_IS_COMPONENT_OF = 1; - DEPENDENCY_TYPE_IS_NEW_VERSION_OF = 2; + DEP_IS_DERIVED_FROM = 0; + DEP_IS_COMPONENT_OF = 1; + DEP_IS_NEW_VERSION_OF = 2; } diff --git a/common/proto3/common/enums/execution_method.proto b/common/proto3/common/enums/execution_method.proto index dff667350..2cd3ef0b7 100644 --- a/common/proto3/common/enums/execution_method.proto +++ b/common/proto3/common/enums/execution_method.proto @@ -5,7 +5,7 @@ package SDMS; option cc_enable_arenas = true; enum ExecutionMethod { - EXECUTION_METHOD_UNSPECIFIED = 0; - EXECUTION_METHOD_DIRECT = 1; - EXECUTION_METHOD_DEFERRED = 2; + EXEC_UNSPECIFIED = 0; + DIRECT = 1; + DEFERRED = 2; } diff --git a/core/database/foxx/api/acl_router.js b/core/database/foxx/api/acl_router.js index 995b18020..23fad8393 100644 --- a/core/database/foxx/api/acl_router.js +++ b/core/database/foxx/api/acl_router.js @@ -234,7 +234,7 @@ router router .get("/view", function (req, res) { - let rules = null; + let rules = []; try { logger.logRequestStarted({ client: req.queryParams.client, @@ -296,7 +296,7 @@ router .description("View current ACL on an object (data record or collection)"); router .get("/shared/list", function (req, res) { - let result = null; + let result = []; try { const client = g_lib.getUserFromClientID(req.queryParams.client); result = g_lib.getACLOwnersBySubject( diff --git a/core/database/foxx/api/admin_router.js b/core/database/foxx/api/admin_router.js index 8ef49ce9d..6cadf2b57 100644 --- a/core/database/foxx/api/admin_router.js +++ b/core/database/foxx/api/admin_router.js @@ -58,7 +58,7 @@ router let result = null; try { logger.logRequestStarted({ - client: "N/A", + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/test", @@ -82,7 +82,7 @@ router time: (t2 - t1) / 1000, }); logger.logRequestSuccess({ - client: "N/A", + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/test", @@ -92,13 +92,13 @@ router }); } catch (e) { logger.logRequestFailure({ - client: "N/A", + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/test", status: "Failure", description: "Do perf test", - extra: { execution_time_seconds: (t2 - t1) / 1000 }, + extra: { execution_time_seconds: 0 }, error: e, }); g_lib.handleException(e, res); diff --git a/core/database/foxx/api/coll_router.js b/core/database/foxx/api/coll_router.js index ee37f4739..c5ab01b77 100644 --- a/core/database/foxx/api/coll_router.js +++ b/core/database/foxx/api/coll_router.js @@ -43,10 +43,11 @@ router var owner = client, parent_id; + let owner_id = owner._id; if (req.body.parent) { parent_id = g_lib.resolveCollID(req.body.parent, client); - var owner_id = g_db.owner.firstExample({ + owner_id = g_db.owner.firstExample({ _from: parent_id, })._to; if (owner_id != client._id) { diff --git a/core/database/foxx/api/config_router.js b/core/database/foxx/api/config_router.js index 0d94b2eaa..37b4d18e3 100644 --- a/core/database/foxx/api/config_router.js +++ b/core/database/foxx/api/config_router.js @@ -48,7 +48,7 @@ router routePath: basePath + "/msg/daily", status: "Failure", description: "Get message of the day", - extra: (msg.msg || "").substring(0, 10), + extra: ((msg && msg.msg) || "").substring(0, 10), error: e, }); diff --git a/core/database/foxx/api/data_router.js b/core/database/foxx/api/data_router.js index 078448e5a..457a95831 100644 --- a/core/database/foxx/api/data_router.js +++ b/core/database/foxx/api/data_router.js @@ -515,7 +515,8 @@ function recordUpdate(client, record, result) { perms |= permissions.PERM_WR_REC; } - if (data.locked || !g_lib.hasPermissions(client, data, perms)) throw error.ERR_PERM_DENIED; + if (data.locked || !permissions.hasPermissions(client, data, perms)) + throw error.ERR_PERM_DENIED; } var owner_id = g_db.owner.firstExample({ @@ -646,14 +647,14 @@ function recordUpdate(client, record, result) { for (i in data.tags) { tag = data.tags[i]; - if (!(tag in record.tags)) { + if (!record.tags.includes(tag)) { rem_tags.push(tag); } } for (i in record.tags) { tag = record.tags[i]; - if (!(tag in data.tags)) { + if (!data.tags.includes(tag)) { add_tags.push(tag); } } @@ -707,7 +708,7 @@ function recordUpdate(client, record, result) { } } - if (record.deps != undefined && (record.deps_add != undefined || record.deps_rem != undefined)) + if (record.deps != undefined && (record.dep_add != undefined || record.dep_rem != undefined)) throw [error.ERR_INVALID_PARAM, "Cannot use both dependency set and add/remove."]; var dep, @@ -1049,7 +1050,7 @@ router httpVerb: "POST", routePath: basePath + "/update/batch", status: "Failure", - description: `Update a batch of existing data record. RecordIDs: ${displayIds}`, + description: `Update a batch of existing data record. RecordIDs: ${displayedIds}`, extra: { count: totalCount, }, @@ -1515,7 +1516,7 @@ router router .get("/dep/graph/get", function (req, res) { - let result = null; + let result = []; try { logger.logRequestStarted({ client: req.queryParams.client, @@ -1541,7 +1542,6 @@ router notes, gen = 0; - result = []; // Get Ancestors //console.log("get ancestors"); @@ -1825,6 +1825,7 @@ router */ router .get("/path", function (req, res) { + let path = null; try { logger.logRequestStarted({ client: req.queryParams.client, @@ -1856,7 +1857,7 @@ router "Can only access data from '" + repo.domain + "' domain", ]; - var path = g_lib.computeDataPath(loc, true); + path = g_lib.computeDataPath(loc, true); res.send({ path: path, }); @@ -2389,6 +2390,15 @@ router router .post("/delete", function (req, res) { var retry = 10; + let ids = []; + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Started", + description: `Attempting to delete a total of: ${req.body.ids.length}`, + }); for (;;) { try { @@ -2416,10 +2426,11 @@ router }, action: function () { const client = g_lib.getUserFromClientID(req.queryParams.client); - var i, - id, - ids = []; + var i, id; + // Needs to be reinitialized to an empty array to avoid + // accumulating content from retries + ids = []; for (i in req.body.ids) { id = g_lib.resolveDataCollID(req.body.ids[i], client); ids.push(id); @@ -2430,8 +2441,30 @@ router res.send(result); }, }); + const preview = ids.slice(0, 5).join(", "); + const idSummary = ids.length > 5 ? `${preview}, ...` : preview; + logger.logRequestSuccess({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Success", + description: `Delete data items: ${idSummary}...`, + extra: { count: ids.length }, + }); + break; } catch (e) { + logger.logRequestFailure({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/delete", + status: "Failure", + description: `Attempting to delete a total of: ${req.body.ids.length}`, + extra: { retry_attempt: retry }, + error: e, + }); if (--retry == 0 || !e.errorNum || e.errorNum != 1200) { g_lib.handleException(e, res); } diff --git a/core/database/foxx/api/group_router.js b/core/database/foxx/api/group_router.js index 6a49bf3b6..9c80ccbb3 100644 --- a/core/database/foxx/api/group_router.js +++ b/core/database/foxx/api/group_router.js @@ -440,7 +440,6 @@ router extra: logExtra, }); } catch (e) { - res.send(groups); logger.logRequestFailure({ client: client?._id, correlationId: req.headers["x-correlation-id"], diff --git a/core/database/foxx/api/metrics_router.js b/core/database/foxx/api/metrics_router.js index 66c762436..76dc05e68 100644 --- a/core/database/foxx/api/metrics_router.js +++ b/core/database/foxx/api/metrics_router.js @@ -73,7 +73,7 @@ router router .get("/msg_count", function (req, res) { let client = null; - let result = null; + let result = []; try { client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ @@ -147,7 +147,7 @@ router router .get("/users/active", function (req, res) { let client = null; - let cnt = null; + let cnt = {}; try { client = req.queryParams.client ? g_lib.getUserFromClientID(req.queryParams.client) @@ -161,7 +161,6 @@ router description: "Get recently active users from metrics", }); - cnt = {}; var u, r, qryres = g_db diff --git a/core/database/foxx/api/note_router.js b/core/database/foxx/api/note_router.js index cefbcdff7..3fad32187 100644 --- a/core/database/foxx/api/note_router.js +++ b/core/database/foxx/api/note_router.js @@ -180,9 +180,10 @@ router }), old_state = note.state, old_type = note.type, - doc = g_db._document(ne._from), updates = {}; + doc = g_db._document(ne._from); + /* Permissions to update: Currently any admin of the subject and the creator of the annotation may make edits to the annotation. This approach is optimistic in assuming that conflicts will not arise and all parties are ethical. Eventually a mechanism will be put in place to deal with conflicts and @@ -412,7 +413,7 @@ router req.queryParams.id + " Comment ID:" + req.queryParams.comment_idx, - extra: note.new, + extra: note, error: e, }); g_lib.handleException(e, res); diff --git a/core/database/foxx/api/proj_router.js b/core/database/foxx/api/proj_router.js index 47f3d0d53..b8ccd3a68 100644 --- a/core/database/foxx/api/proj_router.js +++ b/core/database/foxx/api/proj_router.js @@ -468,11 +468,11 @@ router status: "Failure", description: `Update project information. Project ID: ${req.queryParams.id}`, extra: { - owner: proj.new?.owner, - title: proj.new?.title - ? proj.new?.title.length > 15 - ? proj.new?.title.slice(0, 15) + "…" - : proj.new?.title + owner: proj?.new?.owner, + title: proj?.new?.title + ? proj?.new?.title.length > 15 + ? proj?.new?.title.slice(0, 15) + "…" + : proj?.new?.title : undefined, }, @@ -601,208 +601,167 @@ router router .get("/list", function (req, res) { - logger.logRequestStarted({ - client: req.queryParams.client, - correlationId: req.headers["x-correlation-id"], - httpVerb: "GET", - routePath: basePath + "/list", - status: "Started", - description: `List projects`, - }); - - const client = g_lib.getUserFromClientID(req.queryParams.client); - var qry, - result, - count = - (req.queryParams.as_owner ? 1 : 0) + - (req.queryParams.as_admin ? 1 : 0) + - (req.queryParams.as_member ? 1 : 0); - - if (count) { - var comma = false; - - if (count > 1) qry = "for i in union(("; - else qry = ""; - - if (req.queryParams.as_owner) { - qry += "for i in 1..1 inbound @user owner filter IS_SAME_COLLECTION('p',i)"; - if (count > 1) qry += " return { _id: i._id, title: i.title, owner: i.owner }"; - comma = true; - } - - if (!count || req.queryParams.as_admin) { - qry += - (comma ? "),(" : "") + - "for i in 1..1 inbound @user admin filter IS_SAME_COLLECTION('p',i)"; - if (count > 1) - qry += " return { _id: i._id, title: i.title, owner: i.owner, creator: @user }"; - comma = true; - } - - if (req.queryParams.as_member) { - qry += - (comma ? "),(" : "") + - "for i,e,p in 2..2 inbound @user member, outbound owner filter p.vertices[1].gid == 'members'"; - if (count > 1) qry += " return { _id: i._id, title: i.title, owner: i.owner }"; - } - - if (count > 1) qry += "))"; - } else { - qry = "for i in p"; - } - - qry += " sort i."; - - switch (req.queryParams.sort) { - case g_lib.SORT_ID: - qry += "_id"; - break; - case g_lib.SORT_TITLE: - qry += "title"; - break; - case g_lib.SORT_TIME_CREATE: - qry += "ct"; - break; - case g_lib.SORT_TIME_UPDATE: - qry += "ut"; - break; - default: - qry += "_id"; - break; - } - - if (req.queryParams.sort_rev) qry += " desc"; - - var user_id; - if (req.queryParams.subject) { - permissions.ensureAdminPermUser(client, req.queryParams.subject); - } else user_id = client._id; - - if (req.queryParams.offset != undefined && req.queryParams.count != undefined) { - qry += " limit " + req.queryParams.offset + ", " + req.queryParams.count; - qry += " return { id: i._id, title: i.title, owner: i.owner, creator: i.creator }"; - //console.log("proj list qry:",qry); - result = g_db._query( - qry, - count - ? { - user: user_id, - } - : {}, - {}, - { - fullCount: true, - }, - ); - var tot = result.getExtra().stats.fullCount; - result = result.toArray(); - result.push({ - paging: { - off: req.queryParams.offset, - cnt: req.queryParams.count, - tot: tot, - }, - }); - } else { - qry += " return { id: i._id, title: i.title, owner: i.owner, creator: i.creator }"; - //console.log("proj list qry:",qry); - result = g_db._query( - qry, - count - ? { - user: user_id, - } - : {}, - ); - } - - //res.send( g_db._query( qry, { user: client._id })); - res.send(result); - logger.logRequestSuccess({ - client: req.queryParams.client, - correlationId: req.headers["x-correlation-id"], - httpVerb: "GET", - routePath: basePath + "/list", - status: "Success", - description: `List projects`, - extra: { NumOfProjs: tot }, - }); - }) - .queryParam("client", joi.string().required(), "Client ID") - .queryParam("subject", joi.string().optional(), "Subject (user) ID") - .queryParam("as_owner", joi.bool().optional(), "List projects owned by client/subject") - .queryParam("as_admin", joi.bool().optional(), "List projects administered by client/subject") - .queryParam( - "as_member", - joi.bool().optional(), - "List projects where client is a member/subject", - ) - .queryParam("sort", joi.number().optional(), "Sort field (default = id)") - .queryParam("sort_rev", joi.bool().optional(), "Sort in reverse order") - .queryParam("offset", joi.number().optional(), "Offset") - .queryParam("count", joi.number().optional(), "Count") - .summary("List projects") - .description( - "List projects. If no options are provided, lists all projects associated with client.", - ); - -router - .get("/search", function (req, res) { - let result = null; - let extra_log = null; - const rawQuery = typeof req.queryParams.query === "string" ? req.queryParams.query : ""; - const safeQuerySnippet = - rawQuery.length > 200 ? rawQuery.slice(0, 200) + "…[truncated]" : rawQuery; + let tot = null; try { logger.logRequestStarted({ client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", - routePath: basePath + "/search", + routePath: basePath + "/list", status: "Started", - description: `Find all projects that match query: ${safeQuerySnippet}`, + description: `List projects`, }); - g_lib.getUserFromClientID(req.queryParams.client); + const client = g_lib.getUserFromClientID(req.queryParams.client); + var qry, + result, + count = + (req.queryParams.as_owner ? 1 : 0) + + (req.queryParams.as_admin ? 1 : 0) + + (req.queryParams.as_member ? 1 : 0); + + if (count) { + var comma = false; + + if (count > 1) qry = "for i in union(("; + else qry = ""; + + if (req.queryParams.as_owner) { + qry += "for i in 1..1 inbound @user owner filter IS_SAME_COLLECTION('p',i)"; + if (count > 1) qry += " return { _id: i._id, title: i.title, owner: i.owner }"; + comma = true; + } + + if (!count || req.queryParams.as_admin) { + qry += + (comma ? "),(" : "") + + "for i in 1..1 inbound @user admin filter IS_SAME_COLLECTION('p',i)"; + if (count > 1) + qry += + " return { _id: i._id, title: i.title, owner: i.owner, creator: @user }"; + comma = true; + } - result = g_db._query(req.queryParams.query, {}); - res.send(result); - extra_log = { - documents: result._documents ? result._documents.slice(0, 10) : [], // first 10 IDs only - countTotal: result?._countTotal, - countQuery: result?._countQuery, - skip: result?._skip, - limit: result?._limit, - cached: result?._cached, - }; + if (req.queryParams.as_member) { + qry += + (comma ? "),(" : "") + + "for i,e,p in 2..2 inbound @user member, outbound owner filter p.vertices[1].gid == 'members'"; + if (count > 1) qry += " return { _id: i._id, title: i.title, owner: i.owner }"; + } + if (count > 1) qry += "))"; + } else { + qry = "for i in p"; + } + + qry += " sort i."; + + switch (req.queryParams.sort) { + case g_lib.SORT_ID: + qry += "_id"; + break; + case g_lib.SORT_TITLE: + qry += "title"; + break; + case g_lib.SORT_TIME_CREATE: + qry += "ct"; + break; + case g_lib.SORT_TIME_UPDATE: + qry += "ut"; + break; + default: + qry += "_id"; + break; + } + + if (req.queryParams.sort_rev) qry += " desc"; + + let user_id; + if (req.queryParams.subject) { + permissions.ensureAdminPermUser(client, req.queryParams.subject); + user_id = req.queryParams.subject; + } else user_id = client._id; + + if (req.queryParams.offset != undefined && req.queryParams.count != undefined) { + qry += " limit " + req.queryParams.offset + ", " + req.queryParams.count; + qry += " return { id: i._id, title: i.title, owner: i.owner, creator: i.creator }"; + //console.log("proj list qry:",qry); + result = g_db._query( + qry, + count + ? { + user: user_id, + } + : {}, + {}, + { + fullCount: true, + }, + ); + tot = result.getExtra().stats.fullCount; + result = result.toArray(); + result.push({ + paging: { + off: req.queryParams.offset, + cnt: req.queryParams.count, + tot: tot, + }, + }); + } else { + qry += " return { id: i._id, title: i.title, owner: i.owner, creator: i.creator }"; + //console.log("proj list qry:",qry); + result = g_db._query( + qry, + count + ? { + user: user_id, + } + : {}, + ); + } + + //res.send( g_db._query( qry, { user: client._id })); + res.send(result); logger.logRequestSuccess({ client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", - routePath: basePath + "/search", + routePath: basePath + "/list", status: "Success", - description: `Find all projects that match query: ${safeQuerySnippet}`, - extra: extra_log, + description: `List projects`, + extra: { NumOfProjs: tot }, }); } catch (e) { logger.logRequestFailure({ client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", - routePath: basePath + "/search", + routePath: basePath + "/list", status: "Failure", - description: `Find all projects that match query: ${safeQuerySnippet}`, - extra: extra_log, + description: `List projects`, + extra: { NumOfProjs: tot }, error: e, }); g_lib.handleException(e, res); } }) .queryParam("client", joi.string().required(), "Client ID") - .queryParam("query", joi.string().required(), "Query") - .summary("Find all projects that match query") - .description("Find all projects that match query"); + .queryParam("subject", joi.string().optional(), "Subject (user) ID") + .queryParam("as_owner", joi.bool().optional(), "List projects owned by client/subject") + .queryParam("as_admin", joi.bool().optional(), "List projects administered by client/subject") + .queryParam( + "as_member", + joi.bool().optional(), + "List projects where client is a member/subject", + ) + .queryParam("sort", joi.number().optional(), "Sort field (default = id)") + .queryParam("sort_rev", joi.bool().optional(), "Sort in reverse order") + .queryParam("offset", joi.number().optional(), "Offset") + .queryParam("count", joi.number().optional(), "Count") + .summary("List projects") + .description( + "List projects. If no options are provided, lists all projects associated with client.", + ); router .post("/delete", function (req, res) { @@ -826,7 +785,7 @@ router action: function () { const client = g_lib.getUserFromClientID(req.queryParams.client); - var result = g_tasks.taskInitProjDelete(client, req.body.ids); + result = g_tasks.taskInitProjDelete(client, req.body.ids); res.send(result); }, @@ -908,12 +867,12 @@ router extra: { role: role }, }); } catch (e) { - logger.logRequestSuccess({ + logger.logRequestFailure({ client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/get_role", - status: "Success", + status: "Failure", description: `Get client/subject project role. ID: ${req.queryParams.id}`, extra: { role: role }, error: e, diff --git a/core/database/foxx/api/query_router.js b/core/database/foxx/api/query_router.js index fb174caee..ff679bb5b 100644 --- a/core/database/foxx/api/query_router.js +++ b/core/database/foxx/api/query_router.js @@ -19,6 +19,14 @@ router .post("/create", function (req, res) { let result = undefined; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/create", + status: "Started", + description: "Create Query", + }); g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "admin"], @@ -26,14 +34,6 @@ router }, action: function () { const client = g_lib.getUserFromClientID(req.queryParams.client); - logger.logRequestStarted({ - client: req.queryParams.client, - correlationId: req.headers["x-correlation-id"], - httpVerb: "POST", - routePath: basePath + "/create", - status: "Started", - description: "Create Query", - }); // Check max number of saved queries if (client.max_sav_qry >= 0) { @@ -82,7 +82,7 @@ router delete qry.qry_end; delete qry.qry_filter; delete qry.params; - delete qry.lmit; + delete qry.limit; result = qry; }, @@ -135,6 +135,15 @@ router .post("/update", function (req, res) { let result = undefined; try { + logger.logRequestStarted({ + client: req.queryParams.client, + correlationId: req.headers["x-correlation-id"], + httpVerb: "POST", + routePath: basePath + "/update", + status: "Started", + description: "Update a saved query", + }); + g_db._executeTransaction({ collections: { read: ["u", "uuid", "accn", "admin"], @@ -142,15 +151,6 @@ router }, action: function () { const client = g_lib.getUserFromClientID(req.queryParams.client); - logger.logRequestStarted({ - client: req.queryParams.client, - correlationId: req.headers["x-correlation-id"], - httpVerb: "POST", - routePath: basePath + "/update", - status: "Started", - description: "Update a saved query", - }); - var qry = g_db.q.document(req.body.id); if (client._id != qry.owner && !client.is_admin) { @@ -189,7 +189,7 @@ router delete qry.qry_end; delete qry.qry_filter; delete qry.params; - delete qry.lmit; + delete qry.limit; result = qry; }, @@ -265,7 +265,7 @@ router delete qry.qry_end; delete qry.qry_filter; delete qry.params; - delete qry.lmit; + delete qry.limit; res.send(qry); logger.logRequestSuccess({ @@ -345,6 +345,7 @@ router extra: req.queryParams.ids[i], }); } + res.send(); } catch (e) { logger.logRequestFailure({ client: req.queryParams.client, @@ -445,6 +446,10 @@ router .description("List client saved queries"); function execQuery(client, mode, published, orig_query) { + // Make sure we are always dealing with strings. + if (typeof mode === "string" && mode in g_lib) { + mode = g_lib[mode]; + } var col_chk = true, ctxt = client._id; let query = { @@ -471,7 +476,7 @@ function execQuery(client, mode, published, orig_query) { }, ) .toArray(); - if (!query.params.cols) { + if (!query.params.cols.length) { throw [ error.ERR_PERM_DENIED, "No access to user '" + query.params.owner + "' data/collections.", @@ -505,7 +510,7 @@ function execQuery(client, mode, published, orig_query) { }, ) .toArray(); - if (!query.params.cols) { + if (!query.params.cols.length) { throw [ error.ERR_PERM_DENIED, "No access to project '" + query.params.owner + "'.", @@ -656,6 +661,15 @@ router var qry = g_db.q.document(req.queryParams.id); + // Legacy query documents may have `params` stored as a JSON string + // rather than an object, because the original schema validation + // (joi.any()) accepted both. New documents are stored as objects + // (joi.object()), but old records remain until migrated. + // TODO: Remove after backfilling existing queries in ArangoDB. + if (typeof qry.params === "string") { + qry.params = JSON.parse(qry.params); + } + if (client._id != qry.owner && !client.is_admin) { throw error.ERR_PERM_DENIED; } @@ -675,7 +689,10 @@ router routePath: basePath + "/exec", status: "Success", description: "Execute specified queries", - extra: results, + extra: { + count: Array.isArray(results) ? results.length : undefined, + query_id: req.queryParams.id, + }, }); } catch (e) { logger.logRequestFailure({ @@ -685,7 +702,10 @@ router routePath: basePath + "/exec", status: "Failure", description: "Execute specified queries", - extra: results, + extra: { + count: Array.isArray(results) ? results.length : undefined, + query_id: req.queryParams.id, + }, error: e, }); g_lib.handleException(e, res); diff --git a/core/database/foxx/api/repo_router.js b/core/database/foxx/api/repo_router.js index 27b8b4d94..64a913490 100644 --- a/core/database/foxx/api/repo_router.js +++ b/core/database/foxx/api/repo_router.js @@ -171,9 +171,9 @@ router status: "Failure", description: `View repo server record: ${req.queryParams.id}`, extra: { - type: repo.type, - capacity: repo.capacity, - admins: repo.admins, + type: repo?.type, + capacity: repo?.capacity, + admins: repo?.admins, }, error: e, }); @@ -917,7 +917,7 @@ router permissions.ensureAdminPermRepo(client, req.queryParams.repo); result = getAllocStats(req.queryParams.repo, req.queryParams.subject); res.send(result); - logger.logRequestStarted({ + logger.logRequestSuccess({ client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", @@ -1211,7 +1211,7 @@ router if (req.queryParams.subject) { if (req.queryParams.subject.startsWith("p/")) { - if (!g_db._exists(subject_id)) + if (!g_db._exists(req.queryParams.subject)) throw [ error.ERR_NOT_FOUND, "Project, " + req.queryParams.subject + ", not found", diff --git a/core/database/foxx/api/schema_router.js b/core/database/foxx/api/schema_router.js index ee5356207..7dc84fee8 100644 --- a/core/database/foxx/api/schema_router.js +++ b/core/database/foxx/api/schema_router.js @@ -333,11 +333,11 @@ router status: "Failure", description: `Update schema. Schema ID: ${req.queryParams.id}`, extra: { - id: sch_new.id, - own_id: sch_new.own_id, - pub: sch_new.pub, + id: sch_new?.id, + own_id: sch_new?.own_id, + pub: sch_new?.pub, sys: req.body?.sys ?? false, - ver: sch_new.ver, + ver: sch_new?.ver, }, error: e, }); @@ -499,11 +499,11 @@ router status: "Failure", description: `Revise schema. Schema ID: ${req.queryParams.id}`, extra: { - own_id: sch_new.own_id, - own_nm: sch_new.own_nm, - id: sch_new.id, - pub: req.body.pub, - sys: req.body.sys, + own_id: sch_new?.own_id, + own_nm: sch_new?.own_nm, + id: sch_new?.id, + pub: req.body?.pub, + sys: req.body?.sys, }, error: e, }); @@ -608,7 +608,7 @@ router routePath: basePath + "/delete", status: "Failure", description: `Delete schema. Schema ID: ${req.queryParams.id}`, - extra: { deleted: sch_old._id }, + extra: { deleted: sch_old?._id }, }); g_lib.handleException(e, res); } @@ -700,8 +700,8 @@ router status: "Failure", description: `View schema. Schema ID: ${req.queryParams.id}`, extra: { - pub: sch.pub, - sys: sch.sys, + pub: sch?.pub, + sys: sch?.sys, }, }); g_lib.handleException(e, res); diff --git a/core/database/foxx/api/tag_router.js b/core/database/foxx/api/tag_router.js index 575d04f3a..ea30323d9 100644 --- a/core/database/foxx/api/tag_router.js +++ b/core/database/foxx/api/tag_router.js @@ -19,6 +19,7 @@ router let client = null; let result = null; let tot = null; + let name = null; try { client = req.queryParams.client ? g_lib.getUserFromClientID(req.queryParams.client) @@ -31,7 +32,7 @@ router status: "Started", description: `Search for tags by name (${req.queryParams?.name?.trim()})`, }); - var name = req.queryParams.name.trim(); + name = req.queryParams.name.trim(); if (name.length < 3) throw [error.ERR_INVALID_PARAM, "Input is too short for tag search."]; diff --git a/core/database/foxx/api/task_router.js b/core/database/foxx/api/task_router.js index 292beb922..e9ae12dd1 100644 --- a/core/database/foxx/api/task_router.js +++ b/core/database/foxx/api/task_router.js @@ -347,6 +347,7 @@ router throw [error.ERR_IN_USE, "Cannot delete task that is still scheduled."]; g_lib.graph.task.remove(req.queryParams.task_id); + res.send(); logger.logRequestSuccess({ client: req?.queryParams?.task_id, correlationId: req.headers["x-correlation-id"], @@ -357,7 +358,6 @@ router extra: req.queryParams.task_id, }); } catch (e) { - g_lib.handleException(e, res); logger.logRequestFailure({ client: req?.queryParams?.task_id, correlationId: req.headers["x-correlation-id"], @@ -368,6 +368,7 @@ router extra: "undefined", error: e, }); + g_lib.handleException(e, res); } }) .queryParam("task_id", joi.string().required(), "Task ID") @@ -380,7 +381,7 @@ router try { const client = g_lib.getUserFromClientID(req.queryParams.client); logger.logRequestStarted({ - client: req?.queryParams?.task_id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/list", @@ -422,7 +423,7 @@ router res.send(result); logger.logRequestSuccess({ - client: req?.queryParams?.task_id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/list", @@ -435,7 +436,7 @@ router }); } catch (e) { logger.logRequestFailure({ - client: req?.queryParams?.task_id, + client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/list", @@ -471,7 +472,7 @@ router try { result = []; logger.logRequestStarted({ - client: req?.queryParams?.task_id, + client: "system", correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/reload", @@ -495,7 +496,7 @@ router res.send(result); logger.logRequestSuccess({ - client: req?.queryParams?.task_id, + client: "system", correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/reload", @@ -505,7 +506,7 @@ router }); } catch (e) { logger.logRequestFailure({ - client: req?.queryParams?.task_id, + client: "system", correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/reload", @@ -549,6 +550,7 @@ router ); }, }); + res.send(); logger.logRequestSuccess({ client: "undefined", correlationId: req.headers["x-correlation-id"], diff --git a/core/database/foxx/api/topic_router.js b/core/database/foxx/api/topic_router.js index b5b531505..ef284d292 100644 --- a/core/database/foxx/api/topic_router.js +++ b/core/database/foxx/api/topic_router.js @@ -16,9 +16,7 @@ module.exports = router; router .get("/list/topics", function (req, res) { - let client = req.queryParams.client - ? g_lib.getUserFromClientID(req.queryParams.client) - : undefined; + let client = null; let result = null; try { client = g_lib.getUserFromClientID(req.queryParams.client); @@ -109,11 +107,12 @@ router router .get("/view", function (req, res) { - let client = req.queryParams.client - ? g_lib.getUserFromClientID(req.queryParams.client) - : undefined; + let client = null; let topic_extra = undefined; try { + client = req.queryParams.client + ? g_lib.getUserFromClientID(req.queryParams.client) + : undefined; logger.logRequestStarted({ client: client?._id, correlationId: req.headers["x-correlation-id"], @@ -166,13 +165,14 @@ router router .get("/search", function (req, res) { - let client = req.queryParams.client - ? g_lib.getUserFromClientID(req.queryParams.client) - : undefined; - let result = null; + let client = null; + let result = []; const phrase = req.queryParams.phrase; const shortPhrase = phrase.length > 10 ? phrase.slice(0, 10) + "..." : phrase; try { + client = req.queryParams.client + ? g_lib.getUserFromClientID(req.queryParams.client) + : undefined; logger.logRequestStarted({ client: client?._id, correlationId: req.headers["x-correlation-id"], @@ -194,7 +194,6 @@ router path, op = false; - result = []; if (tokens.length == 0) throw [error.ERR_INVALID_PARAM, "Invalid topic search phrase."]; it = 0; diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index 6ac6a66cc..c96a17803 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -291,7 +291,7 @@ router routePath: basePath + "/create", status: "Failure", description: "Create new user entry", - extra: user.new.uid, + extra: user?.new?.uid, error: e, }); g_lib.handleException(e, res); @@ -314,6 +314,7 @@ router router .get("/update", function (req, res) { + let client = null; let result = null; let client = null; let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; @@ -401,15 +402,6 @@ router delete user.new.refresh; result = [user.new]; - - const { is_admin, max_coll, max_proj, max_sav_qry } = user.new; - - extra_log_info = { - is_admin, - max_coll, - max_proj, - max_sav_qry, - }; }, }); res.send(result); @@ -726,6 +718,7 @@ router router .get("/keys/get", function (req, res) { let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; + let user = null; try { if (req.queryParams.subject) { if (!g_db.u.exists(req.queryParams.subject)) @@ -734,7 +727,7 @@ router "No such user '" + req.queryParams.subject + "'", ]; - let user = g_db.u.document({ + user = g_db.u.document({ _id: req.queryParams.subject, }); logger.logRequestStarted({ @@ -746,7 +739,7 @@ router description: `Get user public and private keys. ${sub}`, }); } else { - let user = g_lib.getUserFromClientID(req.queryParams.client); + user = g_lib.getUserFromClientID(req.queryParams.client); } if (!user.pub_key || !user.priv_key) { @@ -1037,6 +1030,7 @@ router router .get("/token/get", function (req, res) { let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; + let user = null; try { const collection_token = UserToken.validateRequestParams(req.queryParams); // TODO: collection type determines logic when mapped vs HA @@ -1049,11 +1043,11 @@ router "No such user '" + req.queryParams.subject + "'", ]; - var user = g_db.u.document({ + user = g_db.u.document({ _id: req.queryParams.subject, }); } else { - var user = g_lib.getUserFromClientID(req.queryParams.client); + user = g_lib.getUserFromClientID(req.queryParams.client); } logger.logRequestStarted({ @@ -1123,6 +1117,7 @@ router router .get("/token/get/access", function (req, res) { let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; + let user = null; try { if (req.queryParams.subject) { if (!g_db.u.exists(req.queryParams.subject)) @@ -1130,11 +1125,11 @@ router error.ERR_INVALID_PARAM, "No such user '" + req.queryParams.subject + "'", ]; - let user = g_db.u.document({ + user = g_db.u.document({ _id: req.queryParams.subject, }); } else { - let user = g_lib.getUserFromClientID(req.queryParams.client); + user = g_lib.getUserFromClientID(req.queryParams.client); } logger.logRequestStarted({ client: req.queryParams.client, @@ -1230,8 +1225,9 @@ router router .get("/view", function (req, res) { let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; + let client = null; try { - const client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); + client = g_lib.getUserFromClientID_noexcept(req.queryParams.client); logger.logRequestStarted({ client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], @@ -1330,7 +1326,6 @@ router extra: `uid=${user.uid}, is_admin=${!!client?.is_admin}`, }); //req.queryParams.details ? } catch (e) { - g_lib.handleException(e, res); logger.logRequestFailure({ client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], @@ -1338,9 +1333,10 @@ router routePath: basePath + "/view", status: "Failure", description: `View User Information. Subject: ${sub}`, - extra: `uid=${user.uid}, is_admin=${!!client?.is_admin}`, + extra: `uid=${user?.uid}, is_admin=${!!client?.is_admin}`, error: e, }); + g_lib.handleException(e, res); } }) .queryParam("client", joi.string().required(), "Client ID") @@ -1486,7 +1482,9 @@ Note: must delete ALL data records and projects owned by the user being deleted router .get("/delete", function (req, res) { let user_id = null; + let sub = null; try { + sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; logger.logRequestStarted({ client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], @@ -1611,6 +1609,7 @@ router router .get("/ident/list", function (req, res) { let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; + let client = null; let extra_log = []; try { client = g_lib.getUserFromClientID(req.queryParams.client); @@ -1681,6 +1680,7 @@ router router .get("/ident/add", function (req, res) { + let client = null; let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { logger.logRequestStarted({ @@ -1714,7 +1714,7 @@ router g_db._exists({ _id: "uuid/" + req.queryParams.ident, }) - ) + ) { logger.logRequestSuccess({ client: req.queryParams.client, correlationId: req.headers["x-correlation-id"], @@ -1724,8 +1724,8 @@ router description: `Add new linked identity. Subject: ${sub}`, extra: req.queryParams.ident, }); - - return; + return; + } id = g_db.uuid.save( { _key: req.queryParams.ident, @@ -1965,7 +1965,7 @@ router correlationId: req.headers["x-correlation-id"], httpVerb: "GET", routePath: basePath + "/ep/set", - status: "Started", + status: "Success", description: "Set recent end-points", extra: client.eps, }); diff --git a/core/database/foxx/tests/proj_router.test.js b/core/database/foxx/tests/proj_router.test.js index 3e854f84c..1d8e46272 100644 --- a/core/database/foxx/tests/proj_router.test.js +++ b/core/database/foxx/tests/proj_router.test.js @@ -367,56 +367,6 @@ describe("unit_proj_router: test project create endpoint", () => { expect(ids).to.include.members(["p/proj1", "p/proj2", "p/proj3"]); }); - it("should search projects using a provided AQL query", () => { - // ------------------------------------------------------------------ - // Arrange - // ------------------------------------------------------------------ - db.u.save({ _key: "search_user", is_admin: false }); - - db.p.save({ - _key: "search_proj1", - title: "Alpha Project", - desc: "First searchable project", - ct: 1, - ut: 1, - owner: "u/search_user", - }); - - db.p.save({ - _key: "search_proj2", - title: "Beta Project", - desc: "Second searchable project", - ct: 1, - ut: 1, - owner: "u/search_user", - }); - - // AQL query passed directly to /search - const aql = "FOR p IN p FILTER p.title LIKE '%Project%' RETURN p._id"; - - const url = - `${proj_base_url}/search` + - `?client=u/search_user` + - `&query=${encodeURIComponent(aql)}`; - - // ------------------------------------------------------------------ - // Act - // ------------------------------------------------------------------ - const response = request.get(url, { - headers: { "x-correlation-id": "test-proj-search" }, - }); - - // ------------------------------------------------------------------ - // Assert - // ------------------------------------------------------------------ - expect(response.status).to.equal(200); - - const body = JSON.parse(response.body); - - expect(body).to.be.an("array"); - expect(body).to.include.members(["p/search_proj1", "p/search_proj2"]); - }); - it("should enqueue a project delete task when client is authorized", () => { // ------------------------------------------------------------------ // Arrange @@ -564,38 +514,4 @@ describe("unit_proj_router: test project create endpoint", () => { body = JSON.parse(response.body); expect(body.role).to.equal(1); // member }); - - it("should handle malformed AQL for /prj/search without crashing and return an error response", () => { - // ------------------------------------------------------------------ - // Arrange - // ------------------------------------------------------------------ - db.u.save({ _key: "search_user_malformed", is_admin: false }); - - const client = "search_user_malformed"; - - // Intentionally malformed AQL (missing RETURN and invalid syntax) - const malformedBody = { - client, - aql: "FOR p IN p FILTER p.title == @title INVALID_SYNTAX", - bindVars: { - title: "Alpha Project", - }, - }; - - // ------------------------------------------------------------------ - // Act - // ------------------------------------------------------------------ - const response = request.get("/prj/search", malformedBody); - - // ------------------------------------------------------------------ - // Assert - // ------------------------------------------------------------------ - // Expect a 400-series error (bad request / invalid query) and a JSON error payload - expect(response.status).to.be.within(400, 499); - - const body = JSON.parse(response.body); - expect(body).to.have.property("error", true); - expect(body).to.have.property("code"); - expect(body).to.have.property("errorMessage"); - }); }); diff --git a/core/server/DatabaseAPI.cpp b/core/server/DatabaseAPI.cpp index 4dd2be945..cdf752e6c 100644 --- a/core/server/DatabaseAPI.cpp +++ b/core/server/DatabaseAPI.cpp @@ -808,16 +808,6 @@ void DatabaseAPI::projGetRole(const SDMS::ProjectGetRoleRequest &a_request, a_reply.set_role((ProjectRole)(unsigned short)obj.getNumber("role")); } -void DatabaseAPI::projSearch(const std::string &a_query, - SDMS::ProjectDataReply &a_reply, - LogContext log_context) { - Value result; - - dbGet("prj/search", {{"query", a_query}}, result, log_context); - - setProjectData(a_reply, result, log_context); -} - void DatabaseAPI::setProjectData(SDMS::ProjectDataReply &a_reply, const Value &a_result, LogContext log_context) { @@ -1760,8 +1750,9 @@ void DatabaseAPI::queryCreate(const SDMS::QueryCreateRequest &a_request, google::protobuf::util::JsonPrintOptions options; string query_json; - options.always_print_enums_as_ints = true; + options.always_print_enums_as_ints = false; options.preserve_proto_field_names = true; + options.always_print_primitive_fields = true; auto stat = google::protobuf::util::MessageToJsonString(a_request.query(), &query_json, options); @@ -1796,19 +1787,45 @@ void DatabaseAPI::queryUpdate(const SDMS::QueryUpdateRequest &a_request, } if (a_request.has_query()) { - string qry_begin, qry_end, qry_filter, params; + SDMS::SearchRequest final_query; + if (a_request.replace_query()) { + // Full replacement — use incoming query as-is + final_query.CopyFrom(a_request.query()); + } else { + // Partial update — merge incoming onto existing + Value existing; + dbGet("qry/view", {{"id", a_request.id()}}, existing, log_context); + + auto parse_stat = google::protobuf::util::JsonStringToMessage( + existing.asObject().getValue("query").toString(), &final_query); + if (!parse_stat.ok()) { + EXCEPT(1, "Failed to parse existing query"); + } + + if (a_request.query().coll_size() > 0) { + final_query.clear_coll(); + } + if (a_request.query().tags_size() > 0) { + final_query.clear_tags(); + } + if (a_request.query().cat_tags_size() > 0) { + final_query.clear_cat_tags(); + } + final_query.MergeFrom(a_request.query()); + } - uint32_t cnt = parseSearchRequest(a_request.query(), qry_begin, qry_end, + // Re-generate AQL from the complete merged query + string qry_begin, qry_end, qry_filter, params; + uint32_t cnt = parseSearchRequest(final_query, qry_begin, qry_end, qry_filter, params, log_context); google::protobuf::util::JsonPrintOptions options; string query_json; - - options.always_print_enums_as_ints = true; + options.always_print_enums_as_ints = false; options.preserve_proto_field_names = true; - + options.always_print_primitive_fields = true; auto stat = google::protobuf::util::MessageToJsonString( - a_request.query(), &query_json, options); + final_query, &query_json, options); if (!stat.ok()) { EXCEPT(1, "Invalid search request"); } @@ -1823,7 +1840,6 @@ void DatabaseAPI::queryUpdate(const SDMS::QueryUpdateRequest &a_request, string body = payload.dump(-1, ' ', true); dbPost("qry/update", {}, &body, result, log_context); - setQueryData(a_reply, result, log_context); } diff --git a/core/server/DatabaseAPI.hpp b/core/server/DatabaseAPI.hpp index f0df10a51..fa9327c50 100644 --- a/core/server/DatabaseAPI.hpp +++ b/core/server/DatabaseAPI.hpp @@ -105,8 +105,6 @@ class DatabaseAPI { SDMS::ProjectDataReply &a_reply, LogContext log_context); void projList(const SDMS::ProjectListRequest &a_request, SDMS::ListingReply &a_reply, LogContext log_context); - void projSearch(const std::string &a_query, SDMS::ProjectDataReply &a_reply, - LogContext log_context); void projGetRole(const SDMS::ProjectGetRoleRequest &a_request, SDMS::ProjectGetRoleReply &a_reply, LogContext log_context); diff --git a/external/DataFedDependencies b/external/DataFedDependencies index fe59a393f..e0319a2f2 160000 --- a/external/DataFedDependencies +++ b/external/DataFedDependencies @@ -1 +1 @@ -Subproject commit fe59a393f54d3aa1b8bf551f97d274b762bf93d2 +Subproject commit e0319a2f2e70d901180c730ebd5b10b10d8fce93 diff --git a/external/globus-connect-server-deploy b/external/globus-connect-server-deploy index 436b396c4..ff7167860 160000 --- a/external/globus-connect-server-deploy +++ b/external/globus-connect-server-deploy @@ -1 +1 @@ -Subproject commit 436b396c4da6d141c9c0534b297b5e43cc9ac35c +Subproject commit ff7167860345e9b994110dfabdb251fe4dea8c00 diff --git a/tests/end-to-end/test_api_query.py b/tests/end-to-end/test_api_query.py index cf1894a09..174d0d4c6 100755 --- a/tests/end-to-end/test_api_query.py +++ b/tests/end-to-end/test_api_query.py @@ -191,6 +191,7 @@ def test_query_create_delete(self): if model.alias.startswith("adamantium"): material = model.alias time.sleep(self._timeout) + count = count + 1 print(f"Query found {material}") diff --git a/web/datafed-ws.js b/web/datafed-ws.js index 6b367565e..e12386156 100755 --- a/web/datafed-ws.js +++ b/web/datafed-ws.js @@ -1020,12 +1020,6 @@ app.get("/api/prj/list", (a_req, a_resp) => { }); }); -app.post("/api/prj/search", (a_req, a_resp) => { - sendMessage("ProjectSearchRequest", a_req.body, a_req, a_resp, function (reply) { - a_resp.send(reply.item ? reply.item : []); - }); -}); - app.get("/api/grp/create", (a_req, a_resp) => { var params = { group: { @@ -1114,7 +1108,7 @@ app.post("/api/query/create", (a_req, a_resp) => { }); app.post("/api/query/update", (a_req, a_resp) => { - var params = { id: a_req.query.id }; + var params = { id: a_req.query.id, replaceQuery: true }; if (a_req.query.title) params.title = a_req.query.title; if (a_req.body) params.query = a_req.body; diff --git a/web/static/util.js b/web/static/util.js index ab19b6973..6cff31350 100644 --- a/web/static/util.js +++ b/web/static/util.js @@ -51,14 +51,11 @@ export function getUpdatedValue(a_new_val, a_old_obj, a_new_obj, a_field) { export function getUpdatedValueJSON(a_new_val, a_old_obj, a_new_obj, a_field) { var tmp = a_new_val.trim(), old = a_old_obj[a_field]; - if (old === undefined && tmp.length) { + if ((!old || old === undefined) && tmp.length) { a_new_obj[a_field] = tmp; } else if (tmp.length) { - // Must compare values - have to restringify both b/c formats may differ with same content - // TODO - This should be a deep compare due to possibly inconsistent object arrangement var oldjs = JSON.stringify(JSON.parse(old)), newjs = JSON.stringify(JSON.parse(tmp)); - if (oldjs != newjs) { a_new_obj[a_field] = tmp; } From ffa8322f1cf325608b8091185b05aa3d61c5c956 Mon Sep 17 00:00:00 2001 From: Joshua S Brown Date: Wed, 25 Feb 2026 07:58:18 -0500 Subject: [PATCH 65/65] [DAPS-1878] - release: version number update (#1878) --- cmake/Version.cmake | 24 ++++++++++++------------ core/database/foxx/api/user_router.js | 1 - 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/cmake/Version.cmake b/cmake/Version.cmake index 6e5cdc7c5..fb094d3c2 100644 --- a/cmake/Version.cmake +++ b/cmake/Version.cmake @@ -1,44 +1,44 @@ -set(DATAFED_RELEASE_YEAR 2025) -set(DATAFED_RELEASE_MONTH 12) -set(DATAFED_RELEASE_DAY 9) +set(DATAFED_RELEASE_YEAR 2026) +set(DATAFED_RELEASE_MONTH 2) +set(DATAFED_RELEASE_DAY 24) set(DATAFED_RELEASE_HOUR 10) set(DATAFED_RELEASE_MINUTE 0) -set(DATAFED_COMMON_LIB_MAJOR 2) +set(DATAFED_COMMON_LIB_MAJOR 3) set(DATAFED_COMMON_LIB_MINOR 0) set(DATAFED_COMMON_LIB_PATCH 0) -set(DATAFED_COMMON_PROTOCOL_API_MAJOR 2) +set(DATAFED_COMMON_PROTOCOL_API_MAJOR 3) set(DATAFED_COMMON_PROTOCOL_API_MINOR 0) set(DATAFED_COMMON_PROTOCOL_API_PATCH 0) -set(DATAFED_CORE_MAJOR 2) +set(DATAFED_CORE_MAJOR 3) set(DATAFED_CORE_MINOR 0) set(DATAFED_CORE_PATCH 0) set(DATAFED_FOXX_MAJOR 1) -set(DATAFED_FOXX_MINOR 2) +set(DATAFED_FOXX_MINOR 3) set(DATAFED_FOXX_PATCH 0) set(DATAFED_FOXX_API_MAJOR 1) -set(DATAFED_FOXX_API_MINOR 2) +set(DATAFED_FOXX_API_MINOR 3) set(DATAFED_FOXX_API_PATCH 0) -set(DATAFED_WEB_MAJOR 2) +set(DATAFED_WEB_MAJOR 3) set(DATAFED_WEB_MINOR 0) set(DATAFED_WEB_PATCH 0) -set(DATAFED_REPO_MAJOR 2) +set(DATAFED_REPO_MAJOR 3) set(DATAFED_REPO_MINOR 0) set(DATAFED_REPO_PATCH 0) -set(DATAFED_AUTHZ_MAJOR 2) +set(DATAFED_AUTHZ_MAJOR 3) set(DATAFED_AUTHZ_MINOR 0) set(DATAFED_AUTHZ_PATCH 0) -set(DATAFED_PYTHON_CLIENT_MAJOR 4) +set(DATAFED_PYTHON_CLIENT_MAJOR 5) set(DATAFED_PYTHON_CLIENT_MINOR 0) set(DATAFED_PYTHON_CLIENT_PATCH 0) set(DATAFED_PYTHON_CLIENT_RELEASE_TYPE "") diff --git a/core/database/foxx/api/user_router.js b/core/database/foxx/api/user_router.js index c96a17803..4c4b1e031 100644 --- a/core/database/foxx/api/user_router.js +++ b/core/database/foxx/api/user_router.js @@ -316,7 +316,6 @@ router .get("/update", function (req, res) { let client = null; let result = null; - let client = null; let sub = req.queryParams.subject ? req.queryParams.subject : req.queryParams.client; try { logger.logRequestStarted({